]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-2.6.39.4-201108211939.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.39.4-201108211939.patch
1 diff -urNp linux-2.6.39.4/arch/alpha/include/asm/elf.h linux-2.6.39.4/arch/alpha/include/asm/elf.h
2 --- linux-2.6.39.4/arch/alpha/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
3 +++ linux-2.6.39.4/arch/alpha/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
4 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10 +
11 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13 +#endif
14 +
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-2.6.39.4/arch/alpha/include/asm/pgtable.h linux-2.6.39.4/arch/alpha/include/asm/pgtable.h
19 --- linux-2.6.39.4/arch/alpha/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
20 +++ linux-2.6.39.4/arch/alpha/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25 +
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30 +#else
31 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
34 +#endif
35 +
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-2.6.39.4/arch/alpha/kernel/module.c linux-2.6.39.4/arch/alpha/kernel/module.c
40 --- linux-2.6.39.4/arch/alpha/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
41 +++ linux-2.6.39.4/arch/alpha/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46 - gp = (u64)me->module_core + me->core_size - 0x8000;
47 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51 diff -urNp linux-2.6.39.4/arch/alpha/kernel/osf_sys.c linux-2.6.39.4/arch/alpha/kernel/osf_sys.c
52 --- linux-2.6.39.4/arch/alpha/kernel/osf_sys.c 2011-08-05 21:11:51.000000000 -0400
53 +++ linux-2.6.39.4/arch/alpha/kernel/osf_sys.c 2011-08-05 19:44:33.000000000 -0400
54 @@ -1145,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58 - if (!vma || addr + len <= vma->vm_start)
59 + if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63 @@ -1181,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67 +#ifdef CONFIG_PAX_RANDMMAP
68 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69 +#endif
70 +
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74 @@ -1188,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79 - len, limit);
80 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81 +
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85 diff -urNp linux-2.6.39.4/arch/alpha/mm/fault.c linux-2.6.39.4/arch/alpha/mm/fault.c
86 --- linux-2.6.39.4/arch/alpha/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
87 +++ linux-2.6.39.4/arch/alpha/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
88 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92 +#ifdef CONFIG_PAX_PAGEEXEC
93 +/*
94 + * PaX: decide what to do with offenders (regs->pc = fault address)
95 + *
96 + * returns 1 when task should be killed
97 + * 2 when patched PLT trampoline was detected
98 + * 3 when unpatched PLT trampoline was detected
99 + */
100 +static int pax_handle_fetch_fault(struct pt_regs *regs)
101 +{
102 +
103 +#ifdef CONFIG_PAX_EMUPLT
104 + int err;
105 +
106 + do { /* PaX: patched PLT emulation #1 */
107 + unsigned int ldah, ldq, jmp;
108 +
109 + err = get_user(ldah, (unsigned int *)regs->pc);
110 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112 +
113 + if (err)
114 + break;
115 +
116 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118 + jmp == 0x6BFB0000U)
119 + {
120 + unsigned long r27, addr;
121 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123 +
124 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125 + err = get_user(r27, (unsigned long *)addr);
126 + if (err)
127 + break;
128 +
129 + regs->r27 = r27;
130 + regs->pc = r27;
131 + return 2;
132 + }
133 + } while (0);
134 +
135 + do { /* PaX: patched PLT emulation #2 */
136 + unsigned int ldah, lda, br;
137 +
138 + err = get_user(ldah, (unsigned int *)regs->pc);
139 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
140 + err |= get_user(br, (unsigned int *)(regs->pc+8));
141 +
142 + if (err)
143 + break;
144 +
145 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
147 + (br & 0xFFE00000U) == 0xC3E00000U)
148 + {
149 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152 +
153 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155 + return 2;
156 + }
157 + } while (0);
158 +
159 + do { /* PaX: unpatched PLT emulation */
160 + unsigned int br;
161 +
162 + err = get_user(br, (unsigned int *)regs->pc);
163 +
164 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165 + unsigned int br2, ldq, nop, jmp;
166 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167 +
168 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169 + err = get_user(br2, (unsigned int *)addr);
170 + err |= get_user(ldq, (unsigned int *)(addr+4));
171 + err |= get_user(nop, (unsigned int *)(addr+8));
172 + err |= get_user(jmp, (unsigned int *)(addr+12));
173 + err |= get_user(resolver, (unsigned long *)(addr+16));
174 +
175 + if (err)
176 + break;
177 +
178 + if (br2 == 0xC3600000U &&
179 + ldq == 0xA77B000CU &&
180 + nop == 0x47FF041FU &&
181 + jmp == 0x6B7B0000U)
182 + {
183 + regs->r28 = regs->pc+4;
184 + regs->r27 = addr+16;
185 + regs->pc = resolver;
186 + return 3;
187 + }
188 + }
189 + } while (0);
190 +#endif
191 +
192 + return 1;
193 +}
194 +
195 +void pax_report_insns(void *pc, void *sp)
196 +{
197 + unsigned long i;
198 +
199 + printk(KERN_ERR "PAX: bytes at PC: ");
200 + for (i = 0; i < 5; i++) {
201 + unsigned int c;
202 + if (get_user(c, (unsigned int *)pc+i))
203 + printk(KERN_CONT "???????? ");
204 + else
205 + printk(KERN_CONT "%08x ", c);
206 + }
207 + printk("\n");
208 +}
209 +#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217 - if (!(vma->vm_flags & VM_EXEC))
218 + if (!(vma->vm_flags & VM_EXEC)) {
219 +
220 +#ifdef CONFIG_PAX_PAGEEXEC
221 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222 + goto bad_area;
223 +
224 + up_read(&mm->mmap_sem);
225 + switch (pax_handle_fetch_fault(regs)) {
226 +
227 +#ifdef CONFIG_PAX_EMUPLT
228 + case 2:
229 + case 3:
230 + return;
231 +#endif
232 +
233 + }
234 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235 + do_group_exit(SIGKILL);
236 +#else
237 goto bad_area;
238 +#endif
239 +
240 + }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244 diff -urNp linux-2.6.39.4/arch/arm/include/asm/elf.h linux-2.6.39.4/arch/arm/include/asm/elf.h
245 --- linux-2.6.39.4/arch/arm/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
246 +++ linux-2.6.39.4/arch/arm/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
247 @@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253 +
254 +#ifdef CONFIG_PAX_ASLR
255 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256 +
257 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259 +#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263 @@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
266
267 -struct mm_struct;
268 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269 -#define arch_randomize_brk arch_randomize_brk
270 -
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274 diff -urNp linux-2.6.39.4/arch/arm/include/asm/kmap_types.h linux-2.6.39.4/arch/arm/include/asm/kmap_types.h
275 --- linux-2.6.39.4/arch/arm/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
276 +++ linux-2.6.39.4/arch/arm/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
277 @@ -21,6 +21,7 @@ enum km_type {
278 KM_L1_CACHE,
279 KM_L2_CACHE,
280 KM_KDB,
281 + KM_CLEARPAGE,
282 KM_TYPE_NR
283 };
284
285 diff -urNp linux-2.6.39.4/arch/arm/include/asm/uaccess.h linux-2.6.39.4/arch/arm/include/asm/uaccess.h
286 --- linux-2.6.39.4/arch/arm/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
287 +++ linux-2.6.39.4/arch/arm/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
288 @@ -22,6 +22,8 @@
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
291
292 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
293 +
294 /*
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297 @@ -387,8 +389,23 @@ do { \
298
299
300 #ifdef CONFIG_MMU
301 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305 +
306 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307 +{
308 + if (!__builtin_constant_p(n))
309 + check_object_size(to, n, false);
310 + return ___copy_from_user(to, from, n);
311 +}
312 +
313 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314 +{
315 + if (!__builtin_constant_p(n))
316 + check_object_size(from, n, true);
317 + return ___copy_to_user(to, from, n);
318 +}
319 +
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326 {
327 + if ((long)n < 0)
328 + return n;
329 +
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333 @@ -412,6 +432,9 @@ static inline unsigned long __must_check
334
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337 + if ((long)n < 0)
338 + return n;
339 +
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
342 return n;
343 diff -urNp linux-2.6.39.4/arch/arm/kernel/armksyms.c linux-2.6.39.4/arch/arm/kernel/armksyms.c
344 --- linux-2.6.39.4/arch/arm/kernel/armksyms.c 2011-05-19 00:06:34.000000000 -0400
345 +++ linux-2.6.39.4/arch/arm/kernel/armksyms.c 2011-08-05 19:44:33.000000000 -0400
346 @@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347 #ifdef CONFIG_MMU
348 EXPORT_SYMBOL(copy_page);
349
350 -EXPORT_SYMBOL(__copy_from_user);
351 -EXPORT_SYMBOL(__copy_to_user);
352 +EXPORT_SYMBOL(___copy_from_user);
353 +EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
355
356 EXPORT_SYMBOL(__get_user_1);
357 diff -urNp linux-2.6.39.4/arch/arm/kernel/process.c linux-2.6.39.4/arch/arm/kernel/process.c
358 --- linux-2.6.39.4/arch/arm/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
359 +++ linux-2.6.39.4/arch/arm/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
360 @@ -28,7 +28,6 @@
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364 -#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
366
367 #include <asm/cacheflush.h>
368 @@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru
369 return 0;
370 }
371
372 -unsigned long arch_randomize_brk(struct mm_struct *mm)
373 -{
374 - unsigned long range_end = mm->brk + 0x02000000;
375 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376 -}
377 -
378 #ifdef CONFIG_MMU
379 /*
380 * The vectors page is always readable from user space for the
381 diff -urNp linux-2.6.39.4/arch/arm/kernel/traps.c linux-2.6.39.4/arch/arm/kernel/traps.c
382 --- linux-2.6.39.4/arch/arm/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
383 +++ linux-2.6.39.4/arch/arm/kernel/traps.c 2011-08-05 19:44:33.000000000 -0400
384 @@ -258,6 +258,8 @@ static int __die(const char *str, int er
385
386 static DEFINE_SPINLOCK(die_lock);
387
388 +extern void gr_handle_kernel_exploit(void);
389 +
390 /*
391 * This function is protected against re-entrancy.
392 */
393 @@ -285,6 +287,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
395 if (panic_on_oops)
396 panic("Fatal exception");
397 +
398 + gr_handle_kernel_exploit();
399 +
400 if (ret != NOTIFY_STOP)
401 do_exit(SIGSEGV);
402 }
403 diff -urNp linux-2.6.39.4/arch/arm/lib/copy_from_user.S linux-2.6.39.4/arch/arm/lib/copy_from_user.S
404 --- linux-2.6.39.4/arch/arm/lib/copy_from_user.S 2011-05-19 00:06:34.000000000 -0400
405 +++ linux-2.6.39.4/arch/arm/lib/copy_from_user.S 2011-08-05 19:44:33.000000000 -0400
406 @@ -16,7 +16,7 @@
407 /*
408 * Prototype:
409 *
410 - * size_t __copy_from_user(void *to, const void *from, size_t n)
411 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
412 *
413 * Purpose:
414 *
415 @@ -84,11 +84,11 @@
416
417 .text
418
419 -ENTRY(__copy_from_user)
420 +ENTRY(___copy_from_user)
421
422 #include "copy_template.S"
423
424 -ENDPROC(__copy_from_user)
425 +ENDPROC(___copy_from_user)
426
427 .pushsection .fixup,"ax"
428 .align 0
429 diff -urNp linux-2.6.39.4/arch/arm/lib/copy_to_user.S linux-2.6.39.4/arch/arm/lib/copy_to_user.S
430 --- linux-2.6.39.4/arch/arm/lib/copy_to_user.S 2011-05-19 00:06:34.000000000 -0400
431 +++ linux-2.6.39.4/arch/arm/lib/copy_to_user.S 2011-08-05 19:44:33.000000000 -0400
432 @@ -16,7 +16,7 @@
433 /*
434 * Prototype:
435 *
436 - * size_t __copy_to_user(void *to, const void *from, size_t n)
437 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
438 *
439 * Purpose:
440 *
441 @@ -88,11 +88,11 @@
442 .text
443
444 ENTRY(__copy_to_user_std)
445 -WEAK(__copy_to_user)
446 +WEAK(___copy_to_user)
447
448 #include "copy_template.S"
449
450 -ENDPROC(__copy_to_user)
451 +ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
453
454 .pushsection .fixup,"ax"
455 diff -urNp linux-2.6.39.4/arch/arm/lib/uaccess.S linux-2.6.39.4/arch/arm/lib/uaccess.S
456 --- linux-2.6.39.4/arch/arm/lib/uaccess.S 2011-05-19 00:06:34.000000000 -0400
457 +++ linux-2.6.39.4/arch/arm/lib/uaccess.S 2011-08-05 19:44:33.000000000 -0400
458 @@ -20,7 +20,7 @@
459
460 #define PAGE_SHIFT 12
461
462 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467 @@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
468 sub r2, r2, ip
469 b .Lc2u_dest_aligned
470
471 -ENTRY(__copy_to_user)
472 +ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
474 cmp r2, #4
475 blt .Lc2u_not_enough
476 @@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
477 ldrgtb r3, [r1], #0
478 USER( T(strgtb) r3, [r0], #1) @ May fault
479 b .Lc2u_finished
480 -ENDPROC(__copy_to_user)
481 +ENDPROC(___copy_to_user)
482
483 .pushsection .fixup,"ax"
484 .align 0
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
486 .popsection
487
488 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493 @@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
494 sub r2, r2, ip
495 b .Lcfu_dest_aligned
496
497 -ENTRY(__copy_from_user)
498 +ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
500 cmp r2, #4
501 blt .Lcfu_not_enough
502 @@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
504 strgtb r3, [r0], #1
505 b .Lcfu_finished
506 -ENDPROC(__copy_from_user)
507 +ENDPROC(___copy_from_user)
508
509 .pushsection .fixup,"ax"
510 .align 0
511 diff -urNp linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c
512 --- linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c 2011-05-19 00:06:34.000000000 -0400
513 +++ linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c 2011-08-05 19:44:33.000000000 -0400
514 @@ -103,7 +103,7 @@ out:
515 }
516
517 unsigned long
518 -__copy_to_user(void __user *to, const void *from, unsigned long n)
519 +___copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 /*
522 * This test is stubbed out of the main function above to keep
523 diff -urNp linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c
524 --- linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c 2011-05-19 00:06:34.000000000 -0400
525 +++ linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c 2011-08-05 19:44:33.000000000 -0400
526 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
528 }
529
530 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532
533 static int mbox_show(struct seq_file *s, void *data)
534 {
535 diff -urNp linux-2.6.39.4/arch/arm/mm/fault.c linux-2.6.39.4/arch/arm/mm/fault.c
536 --- linux-2.6.39.4/arch/arm/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
537 +++ linux-2.6.39.4/arch/arm/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
538 @@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539 }
540 #endif
541
542 +#ifdef CONFIG_PAX_PAGEEXEC
543 + if (fsr & FSR_LNX_PF) {
544 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545 + do_group_exit(SIGKILL);
546 + }
547 +#endif
548 +
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552 @@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign
553 }
554 #endif /* CONFIG_MMU */
555
556 +#ifdef CONFIG_PAX_PAGEEXEC
557 +void pax_report_insns(void *pc, void *sp)
558 +{
559 + long i;
560 +
561 + printk(KERN_ERR "PAX: bytes at PC: ");
562 + for (i = 0; i < 20; i++) {
563 + unsigned char c;
564 + if (get_user(c, (__force unsigned char __user *)pc+i))
565 + printk(KERN_CONT "?? ");
566 + else
567 + printk(KERN_CONT "%02x ", c);
568 + }
569 + printk("\n");
570 +
571 + printk(KERN_ERR "PAX: bytes at SP-4: ");
572 + for (i = -1; i < 20; i++) {
573 + unsigned long c;
574 + if (get_user(c, (__force unsigned long __user *)sp+i))
575 + printk(KERN_CONT "???????? ");
576 + else
577 + printk(KERN_CONT "%08lx ", c);
578 + }
579 + printk("\n");
580 +}
581 +#endif
582 +
583 /*
584 * First Level Translation Fault Handler
585 *
586 diff -urNp linux-2.6.39.4/arch/arm/mm/mmap.c linux-2.6.39.4/arch/arm/mm/mmap.c
587 --- linux-2.6.39.4/arch/arm/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
588 +++ linux-2.6.39.4/arch/arm/mm/mmap.c 2011-08-05 19:44:33.000000000 -0400
589 @@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590 if (len > TASK_SIZE)
591 return -ENOMEM;
592
593 +#ifdef CONFIG_PAX_RANDMMAP
594 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595 +#endif
596 +
597 if (addr) {
598 if (do_align)
599 addr = COLOUR_ALIGN(addr, pgoff);
600 @@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
602
603 vma = find_vma(mm, addr);
604 - if (TASK_SIZE - len >= addr &&
605 - (!vma || addr + len <= vma->vm_start))
606 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607 return addr;
608 }
609 if (len > mm->cached_hole_size) {
610 - start_addr = addr = mm->free_area_cache;
611 + start_addr = addr = mm->free_area_cache;
612 } else {
613 - start_addr = addr = TASK_UNMAPPED_BASE;
614 - mm->cached_hole_size = 0;
615 + start_addr = addr = mm->mmap_base;
616 + mm->cached_hole_size = 0;
617 }
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620 @@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
622 * some holes.
623 */
624 - if (start_addr != TASK_UNMAPPED_BASE) {
625 - start_addr = addr = TASK_UNMAPPED_BASE;
626 + if (start_addr != mm->mmap_base) {
627 + start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
629 goto full_search;
630 }
631 return -ENOMEM;
632 }
633 - if (!vma || addr + len <= vma->vm_start) {
634 + if (check_heap_stack_gap(vma, addr, len)) {
635 /*
636 * Remember the place where we stopped the search:
637 */
638 diff -urNp linux-2.6.39.4/arch/avr32/include/asm/elf.h linux-2.6.39.4/arch/avr32/include/asm/elf.h
639 --- linux-2.6.39.4/arch/avr32/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
640 +++ linux-2.6.39.4/arch/avr32/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
641 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647
648 +#ifdef CONFIG_PAX_ASLR
649 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
650 +
651 +#define PAX_DELTA_MMAP_LEN 15
652 +#define PAX_DELTA_STACK_LEN 15
653 +#endif
654
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657 diff -urNp linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h
658 --- linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
659 +++ linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
660 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661 D(11) KM_IRQ1,
662 D(12) KM_SOFTIRQ0,
663 D(13) KM_SOFTIRQ1,
664 -D(14) KM_TYPE_NR
665 +D(14) KM_CLEARPAGE,
666 +D(15) KM_TYPE_NR
667 };
668
669 #undef D
670 diff -urNp linux-2.6.39.4/arch/avr32/mm/fault.c linux-2.6.39.4/arch/avr32/mm/fault.c
671 --- linux-2.6.39.4/arch/avr32/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
672 +++ linux-2.6.39.4/arch/avr32/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
673 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674
675 int exception_trace = 1;
676
677 +#ifdef CONFIG_PAX_PAGEEXEC
678 +void pax_report_insns(void *pc, void *sp)
679 +{
680 + unsigned long i;
681 +
682 + printk(KERN_ERR "PAX: bytes at PC: ");
683 + for (i = 0; i < 20; i++) {
684 + unsigned char c;
685 + if (get_user(c, (unsigned char *)pc+i))
686 + printk(KERN_CONT "???????? ");
687 + else
688 + printk(KERN_CONT "%02x ", c);
689 + }
690 + printk("\n");
691 +}
692 +#endif
693 +
694 /*
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697 @@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
699
700 if (user_mode(regs)) {
701 +
702 +#ifdef CONFIG_PAX_PAGEEXEC
703 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706 + do_group_exit(SIGKILL);
707 + }
708 + }
709 +#endif
710 +
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714 diff -urNp linux-2.6.39.4/arch/frv/include/asm/kmap_types.h linux-2.6.39.4/arch/frv/include/asm/kmap_types.h
715 --- linux-2.6.39.4/arch/frv/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
716 +++ linux-2.6.39.4/arch/frv/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
717 @@ -23,6 +23,7 @@ enum km_type {
718 KM_IRQ1,
719 KM_SOFTIRQ0,
720 KM_SOFTIRQ1,
721 + KM_CLEARPAGE,
722 KM_TYPE_NR
723 };
724
725 diff -urNp linux-2.6.39.4/arch/frv/mm/elf-fdpic.c linux-2.6.39.4/arch/frv/mm/elf-fdpic.c
726 --- linux-2.6.39.4/arch/frv/mm/elf-fdpic.c 2011-05-19 00:06:34.000000000 -0400
727 +++ linux-2.6.39.4/arch/frv/mm/elf-fdpic.c 2011-08-05 19:44:33.000000000 -0400
728 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729 if (addr) {
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732 - if (TASK_SIZE - len >= addr &&
733 - (!vma || addr + len <= vma->vm_start))
734 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735 goto success;
736 }
737
738 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742 - if (addr + len <= vma->vm_start)
743 + if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
749 if (addr > limit)
750 break;
751 - if (addr + len <= vma->vm_start)
752 + if (check_heap_stack_gap(vma, addr, len))
753 goto success;
754 addr = vma->vm_end;
755 }
756 diff -urNp linux-2.6.39.4/arch/ia64/include/asm/elf.h linux-2.6.39.4/arch/ia64/include/asm/elf.h
757 --- linux-2.6.39.4/arch/ia64/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
758 +++ linux-2.6.39.4/arch/ia64/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
759 @@ -42,6 +42,13 @@
760 */
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
762
763 +#ifdef CONFIG_PAX_ASLR
764 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765 +
766 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768 +#endif
769 +
770 #define PT_IA_64_UNWIND 0x70000001
771
772 /* IA-64 relocations: */
773 diff -urNp linux-2.6.39.4/arch/ia64/include/asm/pgtable.h linux-2.6.39.4/arch/ia64/include/asm/pgtable.h
774 --- linux-2.6.39.4/arch/ia64/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
775 +++ linux-2.6.39.4/arch/ia64/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
776 @@ -12,7 +12,7 @@
777 * David Mosberger-Tang <davidm@hpl.hp.com>
778 */
779
780 -
781 +#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
785 @@ -143,6 +143,17 @@
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789 +
790 +#ifdef CONFIG_PAX_PAGEEXEC
791 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794 +#else
795 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
796 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
797 +# define PAGE_COPY_NOEXEC PAGE_COPY
798 +#endif
799 +
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803 diff -urNp linux-2.6.39.4/arch/ia64/include/asm/spinlock.h linux-2.6.39.4/arch/ia64/include/asm/spinlock.h
804 --- linux-2.6.39.4/arch/ia64/include/asm/spinlock.h 2011-05-19 00:06:34.000000000 -0400
805 +++ linux-2.6.39.4/arch/ia64/include/asm/spinlock.h 2011-08-05 19:44:33.000000000 -0400
806 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
808
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812 }
813
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815 diff -urNp linux-2.6.39.4/arch/ia64/include/asm/uaccess.h linux-2.6.39.4/arch/ia64/include/asm/uaccess.h
816 --- linux-2.6.39.4/arch/ia64/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
817 +++ linux-2.6.39.4/arch/ia64/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
818 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
821 \
822 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
823 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
825 __cu_len; \
826 })
827 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
829 \
830 __chk_user_ptr(__cu_from); \
831 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
832 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
834 __cu_len; \
835 })
836 diff -urNp linux-2.6.39.4/arch/ia64/kernel/module.c linux-2.6.39.4/arch/ia64/kernel/module.c
837 --- linux-2.6.39.4/arch/ia64/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
838 +++ linux-2.6.39.4/arch/ia64/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
839 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
840 void
841 module_free (struct module *mod, void *module_region)
842 {
843 - if (mod && mod->arch.init_unw_table &&
844 - module_region == mod->module_init) {
845 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
848 }
849 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850 }
851
852 static inline int
853 +in_init_rx (const struct module *mod, uint64_t addr)
854 +{
855 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856 +}
857 +
858 +static inline int
859 +in_init_rw (const struct module *mod, uint64_t addr)
860 +{
861 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862 +}
863 +
864 +static inline int
865 in_init (const struct module *mod, uint64_t addr)
866 {
867 - return addr - (uint64_t) mod->module_init < mod->init_size;
868 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869 +}
870 +
871 +static inline int
872 +in_core_rx (const struct module *mod, uint64_t addr)
873 +{
874 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875 +}
876 +
877 +static inline int
878 +in_core_rw (const struct module *mod, uint64_t addr)
879 +{
880 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881 }
882
883 static inline int
884 in_core (const struct module *mod, uint64_t addr)
885 {
886 - return addr - (uint64_t) mod->module_core < mod->core_size;
887 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888 }
889
890 static inline int
891 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
892 break;
893
894 case RV_BDREL:
895 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896 + if (in_init_rx(mod, val))
897 + val -= (uint64_t) mod->module_init_rx;
898 + else if (in_init_rw(mod, val))
899 + val -= (uint64_t) mod->module_init_rw;
900 + else if (in_core_rx(mod, val))
901 + val -= (uint64_t) mod->module_core_rx;
902 + else if (in_core_rw(mod, val))
903 + val -= (uint64_t) mod->module_core_rw;
904 break;
905
906 case RV_LTV:
907 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
909 */
910 uint64_t gp;
911 - if (mod->core_size > MAX_LTOFF)
912 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913 /*
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
916 */
917 - gp = mod->core_size - MAX_LTOFF / 2;
918 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919 else
920 - gp = mod->core_size / 2;
921 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924 mod->arch.gp = gp;
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926 }
927 diff -urNp linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c
928 --- linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c 2011-05-19 00:06:34.000000000 -0400
929 +++ linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c 2011-08-05 19:44:33.000000000 -0400
930 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
932 addr = 0;
933 #endif
934 +
935 +#ifdef CONFIG_PAX_RANDMMAP
936 + if (mm->pax_flags & MF_PAX_RANDMMAP)
937 + addr = mm->free_area_cache;
938 + else
939 +#endif
940 +
941 if (!addr)
942 addr = mm->free_area_cache;
943
944 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948 - if (start_addr != TASK_UNMAPPED_BASE) {
949 + if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951 - addr = TASK_UNMAPPED_BASE;
952 + addr = mm->mmap_base;
953 goto full_search;
954 }
955 return -ENOMEM;
956 }
957 - if (!vma || addr + len <= vma->vm_start) {
958 + if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
961 return addr;
962 diff -urNp linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S
963 --- linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S 2011-05-19 00:06:34.000000000 -0400
964 +++ linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S 2011-08-05 19:44:33.000000000 -0400
965 @@ -199,7 +199,7 @@ SECTIONS {
966 /* Per-cpu data: */
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969 - __phys_per_cpu_start = __per_cpu_load;
970 + __phys_per_cpu_start = per_cpu_load;
971 /*
972 * ensure percpu data fits
973 * into percpu page size
974 diff -urNp linux-2.6.39.4/arch/ia64/mm/fault.c linux-2.6.39.4/arch/ia64/mm/fault.c
975 --- linux-2.6.39.4/arch/ia64/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
976 +++ linux-2.6.39.4/arch/ia64/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
977 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
979 }
980
981 +#ifdef CONFIG_PAX_PAGEEXEC
982 +void pax_report_insns(void *pc, void *sp)
983 +{
984 + unsigned long i;
985 +
986 + printk(KERN_ERR "PAX: bytes at PC: ");
987 + for (i = 0; i < 8; i++) {
988 + unsigned int c;
989 + if (get_user(c, (unsigned int *)pc+i))
990 + printk(KERN_CONT "???????? ");
991 + else
992 + printk(KERN_CONT "%08x ", c);
993 + }
994 + printk("\n");
995 +}
996 +#endif
997 +
998 void __kprobes
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000 {
1001 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004
1005 - if ((vma->vm_flags & mask) != mask)
1006 + if ((vma->vm_flags & mask) != mask) {
1007 +
1008 +#ifdef CONFIG_PAX_PAGEEXEC
1009 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011 + goto bad_area;
1012 +
1013 + up_read(&mm->mmap_sem);
1014 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015 + do_group_exit(SIGKILL);
1016 + }
1017 +#endif
1018 +
1019 goto bad_area;
1020
1021 + }
1022 +
1023 /*
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026 diff -urNp linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c
1027 --- linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c 2011-05-19 00:06:34.000000000 -0400
1028 +++ linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c 2011-08-05 19:44:33.000000000 -0400
1029 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032 return -ENOMEM;
1033 - if (!vmm || (addr + len) <= vmm->vm_start)
1034 + if (check_heap_stack_gap(vmm, addr, len))
1035 return addr;
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037 }
1038 diff -urNp linux-2.6.39.4/arch/ia64/mm/init.c linux-2.6.39.4/arch/ia64/mm/init.c
1039 --- linux-2.6.39.4/arch/ia64/mm/init.c 2011-05-19 00:06:34.000000000 -0400
1040 +++ linux-2.6.39.4/arch/ia64/mm/init.c 2011-08-05 19:44:33.000000000 -0400
1041 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045 +
1046 +#ifdef CONFIG_PAX_PAGEEXEC
1047 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048 + vma->vm_flags &= ~VM_EXEC;
1049 +
1050 +#ifdef CONFIG_PAX_MPROTECT
1051 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052 + vma->vm_flags &= ~VM_MAYEXEC;
1053 +#endif
1054 +
1055 + }
1056 +#endif
1057 +
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(&current->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061 diff -urNp linux-2.6.39.4/arch/m32r/lib/usercopy.c linux-2.6.39.4/arch/m32r/lib/usercopy.c
1062 --- linux-2.6.39.4/arch/m32r/lib/usercopy.c 2011-05-19 00:06:34.000000000 -0400
1063 +++ linux-2.6.39.4/arch/m32r/lib/usercopy.c 2011-08-05 19:44:33.000000000 -0400
1064 @@ -14,6 +14,9 @@
1065 unsigned long
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067 {
1068 + if ((long)n < 0)
1069 + return n;
1070 +
1071 prefetch(from);
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1075 unsigned long
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077 {
1078 + if ((long)n < 0)
1079 + return n;
1080 +
1081 prefetchw(to);
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084 diff -urNp linux-2.6.39.4/arch/mips/include/asm/elf.h linux-2.6.39.4/arch/mips/include/asm/elf.h
1085 --- linux-2.6.39.4/arch/mips/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
1086 +++ linux-2.6.39.4/arch/mips/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
1087 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1089 #endif
1090
1091 +#ifdef CONFIG_PAX_ASLR
1092 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093 +
1094 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096 +#endif
1097 +
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101 int uses_interp);
1102
1103 -struct mm_struct;
1104 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105 -#define arch_randomize_brk arch_randomize_brk
1106 -
1107 #endif /* _ASM_ELF_H */
1108 diff -urNp linux-2.6.39.4/arch/mips/include/asm/page.h linux-2.6.39.4/arch/mips/include/asm/page.h
1109 --- linux-2.6.39.4/arch/mips/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400
1110 +++ linux-2.6.39.4/arch/mips/include/asm/page.h 2011-08-05 19:44:33.000000000 -0400
1111 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117 #else
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120 diff -urNp linux-2.6.39.4/arch/mips/include/asm/system.h linux-2.6.39.4/arch/mips/include/asm/system.h
1121 --- linux-2.6.39.4/arch/mips/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
1122 +++ linux-2.6.39.4/arch/mips/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
1123 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124 */
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1126
1127 -extern unsigned long arch_align_stack(unsigned long sp);
1128 +#define arch_align_stack(x) ((x) & ~0xfUL)
1129
1130 #endif /* _ASM_SYSTEM_H */
1131 diff -urNp linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c
1132 --- linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c 2011-05-19 00:06:34.000000000 -0400
1133 +++ linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c 2011-08-05 19:44:33.000000000 -0400
1134 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1137
1138 +#ifdef CONFIG_PAX_ASLR
1139 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140 +
1141 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143 +#endif
1144 +
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148 diff -urNp linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c
1149 --- linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c 2011-05-19 00:06:34.000000000 -0400
1150 +++ linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c 2011-08-05 19:44:33.000000000 -0400
1151 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1154
1155 +#ifdef CONFIG_PAX_ASLR
1156 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157 +
1158 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160 +#endif
1161 +
1162 #include <asm/processor.h>
1163
1164 /*
1165 diff -urNp linux-2.6.39.4/arch/mips/kernel/process.c linux-2.6.39.4/arch/mips/kernel/process.c
1166 --- linux-2.6.39.4/arch/mips/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
1167 +++ linux-2.6.39.4/arch/mips/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
1168 @@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
1169 out:
1170 return pc;
1171 }
1172 -
1173 -/*
1174 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1175 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176 - */
1177 -unsigned long arch_align_stack(unsigned long sp)
1178 -{
1179 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180 - sp -= get_random_int() & ~PAGE_MASK;
1181 -
1182 - return sp & ALMASK;
1183 -}
1184 diff -urNp linux-2.6.39.4/arch/mips/kernel/syscall.c linux-2.6.39.4/arch/mips/kernel/syscall.c
1185 --- linux-2.6.39.4/arch/mips/kernel/syscall.c 2011-05-19 00:06:34.000000000 -0400
1186 +++ linux-2.6.39.4/arch/mips/kernel/syscall.c 2011-08-05 19:44:33.000000000 -0400
1187 @@ -108,14 +108,18 @@ unsigned long arch_get_unmapped_area(str
1188 do_color_align = 0;
1189 if (filp || (flags & MAP_SHARED))
1190 do_color_align = 1;
1191 +
1192 +#ifdef CONFIG_PAX_RANDMMAP
1193 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1194 +#endif
1195 +
1196 if (addr) {
1197 if (do_color_align)
1198 addr = COLOUR_ALIGN(addr, pgoff);
1199 else
1200 addr = PAGE_ALIGN(addr);
1201 vmm = find_vma(current->mm, addr);
1202 - if (task_size - len >= addr &&
1203 - (!vmm || addr + len <= vmm->vm_start))
1204 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1205 return addr;
1206 }
1207 addr = current->mm->mmap_base;
1208 @@ -128,7 +132,7 @@ unsigned long arch_get_unmapped_area(str
1209 /* At this point: (!vmm || addr < vmm->vm_end). */
1210 if (task_size - len < addr)
1211 return -ENOMEM;
1212 - if (!vmm || addr + len <= vmm->vm_start)
1213 + if (check_heap_stack_gap(vmm, addr, len))
1214 return addr;
1215 addr = vmm->vm_end;
1216 if (do_color_align)
1217 @@ -154,33 +158,6 @@ void arch_pick_mmap_layout(struct mm_str
1218 mm->unmap_area = arch_unmap_area;
1219 }
1220
1221 -static inline unsigned long brk_rnd(void)
1222 -{
1223 - unsigned long rnd = get_random_int();
1224 -
1225 - rnd = rnd << PAGE_SHIFT;
1226 - /* 8MB for 32bit, 256MB for 64bit */
1227 - if (TASK_IS_32BIT_ADDR)
1228 - rnd = rnd & 0x7ffffful;
1229 - else
1230 - rnd = rnd & 0xffffffful;
1231 -
1232 - return rnd;
1233 -}
1234 -
1235 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1236 -{
1237 - unsigned long base = mm->brk;
1238 - unsigned long ret;
1239 -
1240 - ret = PAGE_ALIGN(base + brk_rnd());
1241 -
1242 - if (ret < mm->brk)
1243 - return mm->brk;
1244 -
1245 - return ret;
1246 -}
1247 -
1248 SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
1249 unsigned long, prot, unsigned long, flags, unsigned long,
1250 fd, off_t, offset)
1251 diff -urNp linux-2.6.39.4/arch/mips/mm/fault.c linux-2.6.39.4/arch/mips/mm/fault.c
1252 --- linux-2.6.39.4/arch/mips/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
1253 +++ linux-2.6.39.4/arch/mips/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
1254 @@ -28,6 +28,23 @@
1255 #include <asm/highmem.h> /* For VMALLOC_END */
1256 #include <linux/kdebug.h>
1257
1258 +#ifdef CONFIG_PAX_PAGEEXEC
1259 +void pax_report_insns(void *pc, void *sp)
1260 +{
1261 + unsigned long i;
1262 +
1263 + printk(KERN_ERR "PAX: bytes at PC: ");
1264 + for (i = 0; i < 5; i++) {
1265 + unsigned int c;
1266 + if (get_user(c, (unsigned int *)pc+i))
1267 + printk(KERN_CONT "???????? ");
1268 + else
1269 + printk(KERN_CONT "%08x ", c);
1270 + }
1271 + printk("\n");
1272 +}
1273 +#endif
1274 +
1275 /*
1276 * This routine handles page faults. It determines the address,
1277 * and the problem, and then passes it off to one of the appropriate
1278 diff -urNp linux-2.6.39.4/arch/parisc/include/asm/elf.h linux-2.6.39.4/arch/parisc/include/asm/elf.h
1279 --- linux-2.6.39.4/arch/parisc/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
1280 +++ linux-2.6.39.4/arch/parisc/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
1281 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1282
1283 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1284
1285 +#ifdef CONFIG_PAX_ASLR
1286 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1287 +
1288 +#define PAX_DELTA_MMAP_LEN 16
1289 +#define PAX_DELTA_STACK_LEN 16
1290 +#endif
1291 +
1292 /* This yields a mask that user programs can use to figure out what
1293 instruction set this CPU supports. This could be done in user space,
1294 but it's not easy, and we've already done it here. */
1295 diff -urNp linux-2.6.39.4/arch/parisc/include/asm/pgtable.h linux-2.6.39.4/arch/parisc/include/asm/pgtable.h
1296 --- linux-2.6.39.4/arch/parisc/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
1297 +++ linux-2.6.39.4/arch/parisc/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
1298 @@ -207,6 +207,17 @@ struct vm_area_struct;
1299 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1300 #define PAGE_COPY PAGE_EXECREAD
1301 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1302 +
1303 +#ifdef CONFIG_PAX_PAGEEXEC
1304 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1305 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1306 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1307 +#else
1308 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1309 +# define PAGE_COPY_NOEXEC PAGE_COPY
1310 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1311 +#endif
1312 +
1313 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1314 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1315 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1316 diff -urNp linux-2.6.39.4/arch/parisc/kernel/module.c linux-2.6.39.4/arch/parisc/kernel/module.c
1317 --- linux-2.6.39.4/arch/parisc/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
1318 +++ linux-2.6.39.4/arch/parisc/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
1319 @@ -96,16 +96,38 @@
1320
1321 /* three functions to determine where in the module core
1322 * or init pieces the location is */
1323 +static inline int in_init_rx(struct module *me, void *loc)
1324 +{
1325 + return (loc >= me->module_init_rx &&
1326 + loc < (me->module_init_rx + me->init_size_rx));
1327 +}
1328 +
1329 +static inline int in_init_rw(struct module *me, void *loc)
1330 +{
1331 + return (loc >= me->module_init_rw &&
1332 + loc < (me->module_init_rw + me->init_size_rw));
1333 +}
1334 +
1335 static inline int in_init(struct module *me, void *loc)
1336 {
1337 - return (loc >= me->module_init &&
1338 - loc <= (me->module_init + me->init_size));
1339 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1340 +}
1341 +
1342 +static inline int in_core_rx(struct module *me, void *loc)
1343 +{
1344 + return (loc >= me->module_core_rx &&
1345 + loc < (me->module_core_rx + me->core_size_rx));
1346 +}
1347 +
1348 +static inline int in_core_rw(struct module *me, void *loc)
1349 +{
1350 + return (loc >= me->module_core_rw &&
1351 + loc < (me->module_core_rw + me->core_size_rw));
1352 }
1353
1354 static inline int in_core(struct module *me, void *loc)
1355 {
1356 - return (loc >= me->module_core &&
1357 - loc <= (me->module_core + me->core_size));
1358 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1359 }
1360
1361 static inline int in_local(struct module *me, void *loc)
1362 @@ -365,13 +387,13 @@ int module_frob_arch_sections(CONST Elf_
1363 }
1364
1365 /* align things a bit */
1366 - me->core_size = ALIGN(me->core_size, 16);
1367 - me->arch.got_offset = me->core_size;
1368 - me->core_size += gots * sizeof(struct got_entry);
1369 -
1370 - me->core_size = ALIGN(me->core_size, 16);
1371 - me->arch.fdesc_offset = me->core_size;
1372 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1373 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1374 + me->arch.got_offset = me->core_size_rw;
1375 + me->core_size_rw += gots * sizeof(struct got_entry);
1376 +
1377 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1378 + me->arch.fdesc_offset = me->core_size_rw;
1379 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1380
1381 me->arch.got_max = gots;
1382 me->arch.fdesc_max = fdescs;
1383 @@ -389,7 +411,7 @@ static Elf64_Word get_got(struct module
1384
1385 BUG_ON(value == 0);
1386
1387 - got = me->module_core + me->arch.got_offset;
1388 + got = me->module_core_rw + me->arch.got_offset;
1389 for (i = 0; got[i].addr; i++)
1390 if (got[i].addr == value)
1391 goto out;
1392 @@ -407,7 +429,7 @@ static Elf64_Word get_got(struct module
1393 #ifdef CONFIG_64BIT
1394 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1395 {
1396 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1397 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1398
1399 if (!value) {
1400 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1401 @@ -425,7 +447,7 @@ static Elf_Addr get_fdesc(struct module
1402
1403 /* Create new one */
1404 fdesc->addr = value;
1405 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1406 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1407 return (Elf_Addr)fdesc;
1408 }
1409 #endif /* CONFIG_64BIT */
1410 @@ -849,7 +871,7 @@ register_unwind_table(struct module *me,
1411
1412 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1413 end = table + sechdrs[me->arch.unwind_section].sh_size;
1414 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1415 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1416
1417 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1418 me->arch.unwind_section, table, end, gp);
1419 diff -urNp linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c
1420 --- linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c 2011-05-19 00:06:34.000000000 -0400
1421 +++ linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c 2011-08-05 19:44:33.000000000 -0400
1422 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1423 /* At this point: (!vma || addr < vma->vm_end). */
1424 if (TASK_SIZE - len < addr)
1425 return -ENOMEM;
1426 - if (!vma || addr + len <= vma->vm_start)
1427 + if (check_heap_stack_gap(vma, addr, len))
1428 return addr;
1429 addr = vma->vm_end;
1430 }
1431 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1432 /* At this point: (!vma || addr < vma->vm_end). */
1433 if (TASK_SIZE - len < addr)
1434 return -ENOMEM;
1435 - if (!vma || addr + len <= vma->vm_start)
1436 + if (check_heap_stack_gap(vma, addr, len))
1437 return addr;
1438 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1439 if (addr < vma->vm_end) /* handle wraparound */
1440 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1441 if (flags & MAP_FIXED)
1442 return addr;
1443 if (!addr)
1444 - addr = TASK_UNMAPPED_BASE;
1445 + addr = current->mm->mmap_base;
1446
1447 if (filp) {
1448 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1449 diff -urNp linux-2.6.39.4/arch/parisc/kernel/traps.c linux-2.6.39.4/arch/parisc/kernel/traps.c
1450 --- linux-2.6.39.4/arch/parisc/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
1451 +++ linux-2.6.39.4/arch/parisc/kernel/traps.c 2011-08-05 19:44:33.000000000 -0400
1452 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1453
1454 down_read(&current->mm->mmap_sem);
1455 vma = find_vma(current->mm,regs->iaoq[0]);
1456 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1457 - && (vma->vm_flags & VM_EXEC)) {
1458 -
1459 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1460 fault_address = regs->iaoq[0];
1461 fault_space = regs->iasq[0];
1462
1463 diff -urNp linux-2.6.39.4/arch/parisc/mm/fault.c linux-2.6.39.4/arch/parisc/mm/fault.c
1464 --- linux-2.6.39.4/arch/parisc/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
1465 +++ linux-2.6.39.4/arch/parisc/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
1466 @@ -15,6 +15,7 @@
1467 #include <linux/sched.h>
1468 #include <linux/interrupt.h>
1469 #include <linux/module.h>
1470 +#include <linux/unistd.h>
1471
1472 #include <asm/uaccess.h>
1473 #include <asm/traps.h>
1474 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1475 static unsigned long
1476 parisc_acctyp(unsigned long code, unsigned int inst)
1477 {
1478 - if (code == 6 || code == 16)
1479 + if (code == 6 || code == 7 || code == 16)
1480 return VM_EXEC;
1481
1482 switch (inst & 0xf0000000) {
1483 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1484 }
1485 #endif
1486
1487 +#ifdef CONFIG_PAX_PAGEEXEC
1488 +/*
1489 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1490 + *
1491 + * returns 1 when task should be killed
1492 + * 2 when rt_sigreturn trampoline was detected
1493 + * 3 when unpatched PLT trampoline was detected
1494 + */
1495 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1496 +{
1497 +
1498 +#ifdef CONFIG_PAX_EMUPLT
1499 + int err;
1500 +
1501 + do { /* PaX: unpatched PLT emulation */
1502 + unsigned int bl, depwi;
1503 +
1504 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1505 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1506 +
1507 + if (err)
1508 + break;
1509 +
1510 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1511 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1512 +
1513 + err = get_user(ldw, (unsigned int *)addr);
1514 + err |= get_user(bv, (unsigned int *)(addr+4));
1515 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1516 +
1517 + if (err)
1518 + break;
1519 +
1520 + if (ldw == 0x0E801096U &&
1521 + bv == 0xEAC0C000U &&
1522 + ldw2 == 0x0E881095U)
1523 + {
1524 + unsigned int resolver, map;
1525 +
1526 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1527 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1528 + if (err)
1529 + break;
1530 +
1531 + regs->gr[20] = instruction_pointer(regs)+8;
1532 + regs->gr[21] = map;
1533 + regs->gr[22] = resolver;
1534 + regs->iaoq[0] = resolver | 3UL;
1535 + regs->iaoq[1] = regs->iaoq[0] + 4;
1536 + return 3;
1537 + }
1538 + }
1539 + } while (0);
1540 +#endif
1541 +
1542 +#ifdef CONFIG_PAX_EMUTRAMP
1543 +
1544 +#ifndef CONFIG_PAX_EMUSIGRT
1545 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1546 + return 1;
1547 +#endif
1548 +
1549 + do { /* PaX: rt_sigreturn emulation */
1550 + unsigned int ldi1, ldi2, bel, nop;
1551 +
1552 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1553 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1554 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1555 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1556 +
1557 + if (err)
1558 + break;
1559 +
1560 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1561 + ldi2 == 0x3414015AU &&
1562 + bel == 0xE4008200U &&
1563 + nop == 0x08000240U)
1564 + {
1565 + regs->gr[25] = (ldi1 & 2) >> 1;
1566 + regs->gr[20] = __NR_rt_sigreturn;
1567 + regs->gr[31] = regs->iaoq[1] + 16;
1568 + regs->sr[0] = regs->iasq[1];
1569 + regs->iaoq[0] = 0x100UL;
1570 + regs->iaoq[1] = regs->iaoq[0] + 4;
1571 + regs->iasq[0] = regs->sr[2];
1572 + regs->iasq[1] = regs->sr[2];
1573 + return 2;
1574 + }
1575 + } while (0);
1576 +#endif
1577 +
1578 + return 1;
1579 +}
1580 +
1581 +void pax_report_insns(void *pc, void *sp)
1582 +{
1583 + unsigned long i;
1584 +
1585 + printk(KERN_ERR "PAX: bytes at PC: ");
1586 + for (i = 0; i < 5; i++) {
1587 + unsigned int c;
1588 + if (get_user(c, (unsigned int *)pc+i))
1589 + printk(KERN_CONT "???????? ");
1590 + else
1591 + printk(KERN_CONT "%08x ", c);
1592 + }
1593 + printk("\n");
1594 +}
1595 +#endif
1596 +
1597 int fixup_exception(struct pt_regs *regs)
1598 {
1599 const struct exception_table_entry *fix;
1600 @@ -192,8 +303,33 @@ good_area:
1601
1602 acc_type = parisc_acctyp(code,regs->iir);
1603
1604 - if ((vma->vm_flags & acc_type) != acc_type)
1605 + if ((vma->vm_flags & acc_type) != acc_type) {
1606 +
1607 +#ifdef CONFIG_PAX_PAGEEXEC
1608 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1609 + (address & ~3UL) == instruction_pointer(regs))
1610 + {
1611 + up_read(&mm->mmap_sem);
1612 + switch (pax_handle_fetch_fault(regs)) {
1613 +
1614 +#ifdef CONFIG_PAX_EMUPLT
1615 + case 3:
1616 + return;
1617 +#endif
1618 +
1619 +#ifdef CONFIG_PAX_EMUTRAMP
1620 + case 2:
1621 + return;
1622 +#endif
1623 +
1624 + }
1625 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1626 + do_group_exit(SIGKILL);
1627 + }
1628 +#endif
1629 +
1630 goto bad_area;
1631 + }
1632
1633 /*
1634 * If for any reason at all we couldn't handle the fault, make
1635 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/elf.h linux-2.6.39.4/arch/powerpc/include/asm/elf.h
1636 --- linux-2.6.39.4/arch/powerpc/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
1637 +++ linux-2.6.39.4/arch/powerpc/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
1638 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1639 the loader. We need to make sure that it is out of the way of the program
1640 that it will "exec", and that there is sufficient room for the brk. */
1641
1642 -extern unsigned long randomize_et_dyn(unsigned long base);
1643 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1644 +#define ELF_ET_DYN_BASE (0x20000000)
1645 +
1646 +#ifdef CONFIG_PAX_ASLR
1647 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1648 +
1649 +#ifdef __powerpc64__
1650 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1651 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1652 +#else
1653 +#define PAX_DELTA_MMAP_LEN 15
1654 +#define PAX_DELTA_STACK_LEN 15
1655 +#endif
1656 +#endif
1657
1658 /*
1659 * Our registers are always unsigned longs, whether we're a 32 bit
1660 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1661 (0x7ff >> (PAGE_SHIFT - 12)) : \
1662 (0x3ffff >> (PAGE_SHIFT - 12)))
1663
1664 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1665 -#define arch_randomize_brk arch_randomize_brk
1666 -
1667 #endif /* __KERNEL__ */
1668
1669 /*
1670 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h
1671 --- linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
1672 +++ linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
1673 @@ -27,6 +27,7 @@ enum km_type {
1674 KM_PPC_SYNC_PAGE,
1675 KM_PPC_SYNC_ICACHE,
1676 KM_KDB,
1677 + KM_CLEARPAGE,
1678 KM_TYPE_NR
1679 };
1680
1681 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/page_64.h linux-2.6.39.4/arch/powerpc/include/asm/page_64.h
1682 --- linux-2.6.39.4/arch/powerpc/include/asm/page_64.h 2011-05-19 00:06:34.000000000 -0400
1683 +++ linux-2.6.39.4/arch/powerpc/include/asm/page_64.h 2011-08-05 19:44:33.000000000 -0400
1684 @@ -172,15 +172,18 @@ do { \
1685 * stack by default, so in the absence of a PT_GNU_STACK program header
1686 * we turn execute permission off.
1687 */
1688 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1689 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1690 +#define VM_STACK_DEFAULT_FLAGS32 \
1691 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1692 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1693
1694 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1695 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1696
1697 +#ifndef CONFIG_PAX_PAGEEXEC
1698 #define VM_STACK_DEFAULT_FLAGS \
1699 (is_32bit_task() ? \
1700 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1701 +#endif
1702
1703 #include <asm-generic/getorder.h>
1704
1705 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/page.h linux-2.6.39.4/arch/powerpc/include/asm/page.h
1706 --- linux-2.6.39.4/arch/powerpc/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400
1707 +++ linux-2.6.39.4/arch/powerpc/include/asm/page.h 2011-08-05 19:44:33.000000000 -0400
1708 @@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1709 * and needs to be executable. This means the whole heap ends
1710 * up being executable.
1711 */
1712 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1713 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1714 +#define VM_DATA_DEFAULT_FLAGS32 \
1715 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1716 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1717
1718 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1719 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1720 @@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1721 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1722 #endif
1723
1724 +#define ktla_ktva(addr) (addr)
1725 +#define ktva_ktla(addr) (addr)
1726 +
1727 #ifndef __ASSEMBLY__
1728
1729 #undef STRICT_MM_TYPECHECKS
1730 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h
1731 --- linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
1732 +++ linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
1733 @@ -2,6 +2,7 @@
1734 #define _ASM_POWERPC_PGTABLE_H
1735 #ifdef __KERNEL__
1736
1737 +#include <linux/const.h>
1738 #ifndef __ASSEMBLY__
1739 #include <asm/processor.h> /* For TASK_SIZE */
1740 #include <asm/mmu.h>
1741 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h
1742 --- linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h 2011-05-19 00:06:34.000000000 -0400
1743 +++ linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h 2011-08-05 19:44:33.000000000 -0400
1744 @@ -21,6 +21,7 @@
1745 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1746 #define _PAGE_USER 0x004 /* usermode access allowed */
1747 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1748 +#define _PAGE_EXEC _PAGE_GUARDED
1749 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1750 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1751 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1752 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/reg.h linux-2.6.39.4/arch/powerpc/include/asm/reg.h
1753 --- linux-2.6.39.4/arch/powerpc/include/asm/reg.h 2011-05-19 00:06:34.000000000 -0400
1754 +++ linux-2.6.39.4/arch/powerpc/include/asm/reg.h 2011-08-05 19:44:33.000000000 -0400
1755 @@ -201,6 +201,7 @@
1756 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1757 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1758 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1759 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1760 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1761 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1762 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1763 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/system.h linux-2.6.39.4/arch/powerpc/include/asm/system.h
1764 --- linux-2.6.39.4/arch/powerpc/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
1765 +++ linux-2.6.39.4/arch/powerpc/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
1766 @@ -533,7 +533,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1767 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1768 #endif
1769
1770 -extern unsigned long arch_align_stack(unsigned long sp);
1771 +#define arch_align_stack(x) ((x) & ~0xfUL)
1772
1773 /* Used in very early kernel initialization. */
1774 extern unsigned long reloc_offset(void);
1775 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h
1776 --- linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
1777 +++ linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
1778 @@ -13,6 +13,8 @@
1779 #define VERIFY_READ 0
1780 #define VERIFY_WRITE 1
1781
1782 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1783 +
1784 /*
1785 * The fs value determines whether argument validity checking should be
1786 * performed or not. If get_fs() == USER_DS, checking is performed, with
1787 @@ -327,52 +329,6 @@ do { \
1788 extern unsigned long __copy_tofrom_user(void __user *to,
1789 const void __user *from, unsigned long size);
1790
1791 -#ifndef __powerpc64__
1792 -
1793 -static inline unsigned long copy_from_user(void *to,
1794 - const void __user *from, unsigned long n)
1795 -{
1796 - unsigned long over;
1797 -
1798 - if (access_ok(VERIFY_READ, from, n))
1799 - return __copy_tofrom_user((__force void __user *)to, from, n);
1800 - if ((unsigned long)from < TASK_SIZE) {
1801 - over = (unsigned long)from + n - TASK_SIZE;
1802 - return __copy_tofrom_user((__force void __user *)to, from,
1803 - n - over) + over;
1804 - }
1805 - return n;
1806 -}
1807 -
1808 -static inline unsigned long copy_to_user(void __user *to,
1809 - const void *from, unsigned long n)
1810 -{
1811 - unsigned long over;
1812 -
1813 - if (access_ok(VERIFY_WRITE, to, n))
1814 - return __copy_tofrom_user(to, (__force void __user *)from, n);
1815 - if ((unsigned long)to < TASK_SIZE) {
1816 - over = (unsigned long)to + n - TASK_SIZE;
1817 - return __copy_tofrom_user(to, (__force void __user *)from,
1818 - n - over) + over;
1819 - }
1820 - return n;
1821 -}
1822 -
1823 -#else /* __powerpc64__ */
1824 -
1825 -#define __copy_in_user(to, from, size) \
1826 - __copy_tofrom_user((to), (from), (size))
1827 -
1828 -extern unsigned long copy_from_user(void *to, const void __user *from,
1829 - unsigned long n);
1830 -extern unsigned long copy_to_user(void __user *to, const void *from,
1831 - unsigned long n);
1832 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
1833 - unsigned long n);
1834 -
1835 -#endif /* __powerpc64__ */
1836 -
1837 static inline unsigned long __copy_from_user_inatomic(void *to,
1838 const void __user *from, unsigned long n)
1839 {
1840 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1841 if (ret == 0)
1842 return 0;
1843 }
1844 +
1845 + if (!__builtin_constant_p(n))
1846 + check_object_size(to, n, false);
1847 +
1848 return __copy_tofrom_user((__force void __user *)to, from, n);
1849 }
1850
1851 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1852 if (ret == 0)
1853 return 0;
1854 }
1855 +
1856 + if (!__builtin_constant_p(n))
1857 + check_object_size(from, n, true);
1858 +
1859 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1860 }
1861
1862 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1863 return __copy_to_user_inatomic(to, from, size);
1864 }
1865
1866 +#ifndef __powerpc64__
1867 +
1868 +static inline unsigned long __must_check copy_from_user(void *to,
1869 + const void __user *from, unsigned long n)
1870 +{
1871 + unsigned long over;
1872 +
1873 + if ((long)n < 0)
1874 + return n;
1875 +
1876 + if (access_ok(VERIFY_READ, from, n)) {
1877 + if (!__builtin_constant_p(n))
1878 + check_object_size(to, n, false);
1879 + return __copy_tofrom_user((__force void __user *)to, from, n);
1880 + }
1881 + if ((unsigned long)from < TASK_SIZE) {
1882 + over = (unsigned long)from + n - TASK_SIZE;
1883 + if (!__builtin_constant_p(n - over))
1884 + check_object_size(to, n - over, false);
1885 + return __copy_tofrom_user((__force void __user *)to, from,
1886 + n - over) + over;
1887 + }
1888 + return n;
1889 +}
1890 +
1891 +static inline unsigned long __must_check copy_to_user(void __user *to,
1892 + const void *from, unsigned long n)
1893 +{
1894 + unsigned long over;
1895 +
1896 + if ((long)n < 0)
1897 + return n;
1898 +
1899 + if (access_ok(VERIFY_WRITE, to, n)) {
1900 + if (!__builtin_constant_p(n))
1901 + check_object_size(from, n, true);
1902 + return __copy_tofrom_user(to, (__force void __user *)from, n);
1903 + }
1904 + if ((unsigned long)to < TASK_SIZE) {
1905 + over = (unsigned long)to + n - TASK_SIZE;
1906 + if (!__builtin_constant_p(n))
1907 + check_object_size(from, n - over, true);
1908 + return __copy_tofrom_user(to, (__force void __user *)from,
1909 + n - over) + over;
1910 + }
1911 + return n;
1912 +}
1913 +
1914 +#else /* __powerpc64__ */
1915 +
1916 +#define __copy_in_user(to, from, size) \
1917 + __copy_tofrom_user((to), (from), (size))
1918 +
1919 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1920 +{
1921 + if ((long)n < 0 || n > INT_MAX)
1922 + return n;
1923 +
1924 + if (!__builtin_constant_p(n))
1925 + check_object_size(to, n, false);
1926 +
1927 + if (likely(access_ok(VERIFY_READ, from, n)))
1928 + n = __copy_from_user(to, from, n);
1929 + else
1930 + memset(to, 0, n);
1931 + return n;
1932 +}
1933 +
1934 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1935 +{
1936 + if ((long)n < 0 || n > INT_MAX)
1937 + return n;
1938 +
1939 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
1940 + if (!__builtin_constant_p(n))
1941 + check_object_size(from, n, true);
1942 + n = __copy_to_user(to, from, n);
1943 + }
1944 + return n;
1945 +}
1946 +
1947 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
1948 + unsigned long n);
1949 +
1950 +#endif /* __powerpc64__ */
1951 +
1952 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1953
1954 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1955 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S
1956 --- linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S 2011-05-19 00:06:34.000000000 -0400
1957 +++ linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S 2011-08-05 19:44:33.000000000 -0400
1958 @@ -495,6 +495,7 @@ storage_fault_common:
1959 std r14,_DAR(r1)
1960 std r15,_DSISR(r1)
1961 addi r3,r1,STACK_FRAME_OVERHEAD
1962 + bl .save_nvgprs
1963 mr r4,r14
1964 mr r5,r15
1965 ld r14,PACA_EXGEN+EX_R14(r13)
1966 @@ -504,8 +505,7 @@ storage_fault_common:
1967 cmpdi r3,0
1968 bne- 1f
1969 b .ret_from_except_lite
1970 -1: bl .save_nvgprs
1971 - mr r5,r3
1972 +1: mr r5,r3
1973 addi r3,r1,STACK_FRAME_OVERHEAD
1974 ld r4,_DAR(r1)
1975 bl .bad_page_fault
1976 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S
1977 --- linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S 2011-05-19 00:06:34.000000000 -0400
1978 +++ linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S 2011-08-05 19:44:33.000000000 -0400
1979 @@ -848,10 +848,10 @@ handle_page_fault:
1980 11: ld r4,_DAR(r1)
1981 ld r5,_DSISR(r1)
1982 addi r3,r1,STACK_FRAME_OVERHEAD
1983 + bl .save_nvgprs
1984 bl .do_page_fault
1985 cmpdi r3,0
1986 beq+ 13f
1987 - bl .save_nvgprs
1988 mr r5,r3
1989 addi r3,r1,STACK_FRAME_OVERHEAD
1990 lwz r4,_DAR(r1)
1991 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/module_32.c linux-2.6.39.4/arch/powerpc/kernel/module_32.c
1992 --- linux-2.6.39.4/arch/powerpc/kernel/module_32.c 2011-05-19 00:06:34.000000000 -0400
1993 +++ linux-2.6.39.4/arch/powerpc/kernel/module_32.c 2011-08-05 19:44:33.000000000 -0400
1994 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
1995 me->arch.core_plt_section = i;
1996 }
1997 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
1998 - printk("Module doesn't contain .plt or .init.plt sections.\n");
1999 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2000 return -ENOEXEC;
2001 }
2002
2003 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2004
2005 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2006 /* Init, or core PLT? */
2007 - if (location >= mod->module_core
2008 - && location < mod->module_core + mod->core_size)
2009 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2010 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2011 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2012 - else
2013 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2014 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2015 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2016 + else {
2017 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2018 + return ~0UL;
2019 + }
2020
2021 /* Find this entry, or if that fails, the next avail. entry */
2022 while (entry->jump[0]) {
2023 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/module.c linux-2.6.39.4/arch/powerpc/kernel/module.c
2024 --- linux-2.6.39.4/arch/powerpc/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
2025 +++ linux-2.6.39.4/arch/powerpc/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
2026 @@ -31,11 +31,24 @@
2027
2028 LIST_HEAD(module_bug_list);
2029
2030 +#ifdef CONFIG_PAX_KERNEXEC
2031 void *module_alloc(unsigned long size)
2032 {
2033 if (size == 0)
2034 return NULL;
2035
2036 + return vmalloc(size);
2037 +}
2038 +
2039 +void *module_alloc_exec(unsigned long size)
2040 +#else
2041 +void *module_alloc(unsigned long size)
2042 +#endif
2043 +
2044 +{
2045 + if (size == 0)
2046 + return NULL;
2047 +
2048 return vmalloc_exec(size);
2049 }
2050
2051 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2052 vfree(module_region);
2053 }
2054
2055 +#ifdef CONFIG_PAX_KERNEXEC
2056 +void module_free_exec(struct module *mod, void *module_region)
2057 +{
2058 + module_free(mod, module_region);
2059 +}
2060 +#endif
2061 +
2062 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2063 const Elf_Shdr *sechdrs,
2064 const char *name)
2065 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/process.c linux-2.6.39.4/arch/powerpc/kernel/process.c
2066 --- linux-2.6.39.4/arch/powerpc/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
2067 +++ linux-2.6.39.4/arch/powerpc/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
2068 @@ -655,8 +655,8 @@ void show_regs(struct pt_regs * regs)
2069 * Lookup NIP late so we have the best change of getting the
2070 * above info out without failing
2071 */
2072 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2073 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2074 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2075 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2076 #endif
2077 show_stack(current, (unsigned long *) regs->gpr[1]);
2078 if (!user_mode(regs))
2079 @@ -1146,10 +1146,10 @@ void show_stack(struct task_struct *tsk,
2080 newsp = stack[0];
2081 ip = stack[STACK_FRAME_LR_SAVE];
2082 if (!firstframe || ip != lr) {
2083 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2084 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2085 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2086 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2087 - printk(" (%pS)",
2088 + printk(" (%pA)",
2089 (void *)current->ret_stack[curr_frame].ret);
2090 curr_frame--;
2091 }
2092 @@ -1169,7 +1169,7 @@ void show_stack(struct task_struct *tsk,
2093 struct pt_regs *regs = (struct pt_regs *)
2094 (sp + STACK_FRAME_OVERHEAD);
2095 lr = regs->link;
2096 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2097 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2098 regs->trap, (void *)regs->nip, (void *)lr);
2099 firstframe = 1;
2100 }
2101 @@ -1244,58 +1244,3 @@ void thread_info_cache_init(void)
2102 }
2103
2104 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2105 -
2106 -unsigned long arch_align_stack(unsigned long sp)
2107 -{
2108 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2109 - sp -= get_random_int() & ~PAGE_MASK;
2110 - return sp & ~0xf;
2111 -}
2112 -
2113 -static inline unsigned long brk_rnd(void)
2114 -{
2115 - unsigned long rnd = 0;
2116 -
2117 - /* 8MB for 32bit, 1GB for 64bit */
2118 - if (is_32bit_task())
2119 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2120 - else
2121 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2122 -
2123 - return rnd << PAGE_SHIFT;
2124 -}
2125 -
2126 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2127 -{
2128 - unsigned long base = mm->brk;
2129 - unsigned long ret;
2130 -
2131 -#ifdef CONFIG_PPC_STD_MMU_64
2132 - /*
2133 - * If we are using 1TB segments and we are allowed to randomise
2134 - * the heap, we can put it above 1TB so it is backed by a 1TB
2135 - * segment. Otherwise the heap will be in the bottom 1TB
2136 - * which always uses 256MB segments and this may result in a
2137 - * performance penalty.
2138 - */
2139 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2140 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2141 -#endif
2142 -
2143 - ret = PAGE_ALIGN(base + brk_rnd());
2144 -
2145 - if (ret < mm->brk)
2146 - return mm->brk;
2147 -
2148 - return ret;
2149 -}
2150 -
2151 -unsigned long randomize_et_dyn(unsigned long base)
2152 -{
2153 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2154 -
2155 - if (ret < base)
2156 - return base;
2157 -
2158 - return ret;
2159 -}
2160 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/signal_32.c linux-2.6.39.4/arch/powerpc/kernel/signal_32.c
2161 --- linux-2.6.39.4/arch/powerpc/kernel/signal_32.c 2011-05-19 00:06:34.000000000 -0400
2162 +++ linux-2.6.39.4/arch/powerpc/kernel/signal_32.c 2011-08-05 19:44:33.000000000 -0400
2163 @@ -858,7 +858,7 @@ int handle_rt_signal32(unsigned long sig
2164 /* Save user registers on the stack */
2165 frame = &rt_sf->uc.uc_mcontext;
2166 addr = frame;
2167 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2168 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2169 if (save_user_regs(regs, frame, 0, 1))
2170 goto badframe;
2171 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2172 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/signal_64.c linux-2.6.39.4/arch/powerpc/kernel/signal_64.c
2173 --- linux-2.6.39.4/arch/powerpc/kernel/signal_64.c 2011-05-19 00:06:34.000000000 -0400
2174 +++ linux-2.6.39.4/arch/powerpc/kernel/signal_64.c 2011-08-05 19:44:33.000000000 -0400
2175 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2176 current->thread.fpscr.val = 0;
2177
2178 /* Set up to return from userspace. */
2179 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2180 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2181 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2182 } else {
2183 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2184 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/traps.c linux-2.6.39.4/arch/powerpc/kernel/traps.c
2185 --- linux-2.6.39.4/arch/powerpc/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
2186 +++ linux-2.6.39.4/arch/powerpc/kernel/traps.c 2011-08-05 19:44:33.000000000 -0400
2187 @@ -96,6 +96,8 @@ static void pmac_backlight_unblank(void)
2188 static inline void pmac_backlight_unblank(void) { }
2189 #endif
2190
2191 +extern void gr_handle_kernel_exploit(void);
2192 +
2193 int die(const char *str, struct pt_regs *regs, long err)
2194 {
2195 static struct {
2196 @@ -170,6 +172,8 @@ int die(const char *str, struct pt_regs
2197 if (panic_on_oops)
2198 panic("Fatal exception");
2199
2200 + gr_handle_kernel_exploit();
2201 +
2202 oops_exit();
2203 do_exit(err);
2204
2205 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/vdso.c linux-2.6.39.4/arch/powerpc/kernel/vdso.c
2206 --- linux-2.6.39.4/arch/powerpc/kernel/vdso.c 2011-05-19 00:06:34.000000000 -0400
2207 +++ linux-2.6.39.4/arch/powerpc/kernel/vdso.c 2011-08-05 19:44:33.000000000 -0400
2208 @@ -36,6 +36,7 @@
2209 #include <asm/firmware.h>
2210 #include <asm/vdso.h>
2211 #include <asm/vdso_datapage.h>
2212 +#include <asm/mman.h>
2213
2214 #include "setup.h"
2215
2216 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2217 vdso_base = VDSO32_MBASE;
2218 #endif
2219
2220 - current->mm->context.vdso_base = 0;
2221 + current->mm->context.vdso_base = ~0UL;
2222
2223 /* vDSO has a problem and was disabled, just don't "enable" it for the
2224 * process
2225 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2226 vdso_base = get_unmapped_area(NULL, vdso_base,
2227 (vdso_pages << PAGE_SHIFT) +
2228 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2229 - 0, 0);
2230 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2231 if (IS_ERR_VALUE(vdso_base)) {
2232 rc = vdso_base;
2233 goto fail_mmapsem;
2234 diff -urNp linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c
2235 --- linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c 2011-05-19 00:06:34.000000000 -0400
2236 +++ linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c 2011-08-05 19:44:33.000000000 -0400
2237 @@ -9,22 +9,6 @@
2238 #include <linux/module.h>
2239 #include <asm/uaccess.h>
2240
2241 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2242 -{
2243 - if (likely(access_ok(VERIFY_READ, from, n)))
2244 - n = __copy_from_user(to, from, n);
2245 - else
2246 - memset(to, 0, n);
2247 - return n;
2248 -}
2249 -
2250 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2251 -{
2252 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2253 - n = __copy_to_user(to, from, n);
2254 - return n;
2255 -}
2256 -
2257 unsigned long copy_in_user(void __user *to, const void __user *from,
2258 unsigned long n)
2259 {
2260 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2261 return n;
2262 }
2263
2264 -EXPORT_SYMBOL(copy_from_user);
2265 -EXPORT_SYMBOL(copy_to_user);
2266 EXPORT_SYMBOL(copy_in_user);
2267
2268 diff -urNp linux-2.6.39.4/arch/powerpc/mm/fault.c linux-2.6.39.4/arch/powerpc/mm/fault.c
2269 --- linux-2.6.39.4/arch/powerpc/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
2270 +++ linux-2.6.39.4/arch/powerpc/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
2271 @@ -31,6 +31,10 @@
2272 #include <linux/kdebug.h>
2273 #include <linux/perf_event.h>
2274 #include <linux/magic.h>
2275 +#include <linux/slab.h>
2276 +#include <linux/pagemap.h>
2277 +#include <linux/compiler.h>
2278 +#include <linux/unistd.h>
2279
2280 #include <asm/firmware.h>
2281 #include <asm/page.h>
2282 @@ -42,6 +46,7 @@
2283 #include <asm/tlbflush.h>
2284 #include <asm/siginfo.h>
2285 #include <mm/mmu_decl.h>
2286 +#include <asm/ptrace.h>
2287
2288 #ifdef CONFIG_KPROBES
2289 static inline int notify_page_fault(struct pt_regs *regs)
2290 @@ -65,6 +70,33 @@ static inline int notify_page_fault(stru
2291 }
2292 #endif
2293
2294 +#ifdef CONFIG_PAX_PAGEEXEC
2295 +/*
2296 + * PaX: decide what to do with offenders (regs->nip = fault address)
2297 + *
2298 + * returns 1 when task should be killed
2299 + */
2300 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2301 +{
2302 + return 1;
2303 +}
2304 +
2305 +void pax_report_insns(void *pc, void *sp)
2306 +{
2307 + unsigned long i;
2308 +
2309 + printk(KERN_ERR "PAX: bytes at PC: ");
2310 + for (i = 0; i < 5; i++) {
2311 + unsigned int c;
2312 + if (get_user(c, (unsigned int __user *)pc+i))
2313 + printk(KERN_CONT "???????? ");
2314 + else
2315 + printk(KERN_CONT "%08x ", c);
2316 + }
2317 + printk("\n");
2318 +}
2319 +#endif
2320 +
2321 /*
2322 * Check whether the instruction at regs->nip is a store using
2323 * an update addressing form which will update r1.
2324 @@ -135,7 +167,7 @@ int __kprobes do_page_fault(struct pt_re
2325 * indicate errors in DSISR but can validly be set in SRR1.
2326 */
2327 if (trap == 0x400)
2328 - error_code &= 0x48200000;
2329 + error_code &= 0x58200000;
2330 else
2331 is_write = error_code & DSISR_ISSTORE;
2332 #else
2333 @@ -258,7 +290,7 @@ good_area:
2334 * "undefined". Of those that can be set, this is the only
2335 * one which seems bad.
2336 */
2337 - if (error_code & 0x10000000)
2338 + if (error_code & DSISR_GUARDED)
2339 /* Guarded storage error. */
2340 goto bad_area;
2341 #endif /* CONFIG_8xx */
2342 @@ -273,7 +305,7 @@ good_area:
2343 * processors use the same I/D cache coherency mechanism
2344 * as embedded.
2345 */
2346 - if (error_code & DSISR_PROTFAULT)
2347 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2348 goto bad_area;
2349 #endif /* CONFIG_PPC_STD_MMU */
2350
2351 @@ -342,6 +374,23 @@ bad_area:
2352 bad_area_nosemaphore:
2353 /* User mode accesses cause a SIGSEGV */
2354 if (user_mode(regs)) {
2355 +
2356 +#ifdef CONFIG_PAX_PAGEEXEC
2357 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2358 +#ifdef CONFIG_PPC_STD_MMU
2359 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2360 +#else
2361 + if (is_exec && regs->nip == address) {
2362 +#endif
2363 + switch (pax_handle_fetch_fault(regs)) {
2364 + }
2365 +
2366 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2367 + do_group_exit(SIGKILL);
2368 + }
2369 + }
2370 +#endif
2371 +
2372 _exception(SIGSEGV, regs, code, address);
2373 return 0;
2374 }
2375 diff -urNp linux-2.6.39.4/arch/powerpc/mm/mmap_64.c linux-2.6.39.4/arch/powerpc/mm/mmap_64.c
2376 --- linux-2.6.39.4/arch/powerpc/mm/mmap_64.c 2011-05-19 00:06:34.000000000 -0400
2377 +++ linux-2.6.39.4/arch/powerpc/mm/mmap_64.c 2011-08-05 19:44:33.000000000 -0400
2378 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2379 */
2380 if (mmap_is_legacy()) {
2381 mm->mmap_base = TASK_UNMAPPED_BASE;
2382 +
2383 +#ifdef CONFIG_PAX_RANDMMAP
2384 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2385 + mm->mmap_base += mm->delta_mmap;
2386 +#endif
2387 +
2388 mm->get_unmapped_area = arch_get_unmapped_area;
2389 mm->unmap_area = arch_unmap_area;
2390 } else {
2391 mm->mmap_base = mmap_base();
2392 +
2393 +#ifdef CONFIG_PAX_RANDMMAP
2394 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2395 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2396 +#endif
2397 +
2398 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2399 mm->unmap_area = arch_unmap_area_topdown;
2400 }
2401 diff -urNp linux-2.6.39.4/arch/powerpc/mm/slice.c linux-2.6.39.4/arch/powerpc/mm/slice.c
2402 --- linux-2.6.39.4/arch/powerpc/mm/slice.c 2011-05-19 00:06:34.000000000 -0400
2403 +++ linux-2.6.39.4/arch/powerpc/mm/slice.c 2011-08-05 19:44:33.000000000 -0400
2404 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2405 if ((mm->task_size - len) < addr)
2406 return 0;
2407 vma = find_vma(mm, addr);
2408 - return (!vma || (addr + len) <= vma->vm_start);
2409 + return check_heap_stack_gap(vma, addr, len);
2410 }
2411
2412 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2413 @@ -256,7 +256,7 @@ full_search:
2414 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2415 continue;
2416 }
2417 - if (!vma || addr + len <= vma->vm_start) {
2418 + if (check_heap_stack_gap(vma, addr, len)) {
2419 /*
2420 * Remember the place where we stopped the search:
2421 */
2422 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2423 }
2424 }
2425
2426 - addr = mm->mmap_base;
2427 - while (addr > len) {
2428 + if (mm->mmap_base < len)
2429 + addr = -ENOMEM;
2430 + else
2431 + addr = mm->mmap_base - len;
2432 +
2433 + while (!IS_ERR_VALUE(addr)) {
2434 /* Go down by chunk size */
2435 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2436 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
2437
2438 /* Check for hit with different page size */
2439 mask = slice_range_to_mask(addr, len);
2440 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2441 * return with success:
2442 */
2443 vma = find_vma(mm, addr);
2444 - if (!vma || (addr + len) <= vma->vm_start) {
2445 + if (check_heap_stack_gap(vma, addr, len)) {
2446 /* remember the address as a hint for next time */
2447 if (use_cache)
2448 mm->free_area_cache = addr;
2449 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2450 mm->cached_hole_size = vma->vm_start - addr;
2451
2452 /* try just below the current vma->vm_start */
2453 - addr = vma->vm_start;
2454 + addr = skip_heap_stack_gap(vma, len);
2455 }
2456
2457 /*
2458 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2459 if (fixed && addr > (mm->task_size - len))
2460 return -EINVAL;
2461
2462 +#ifdef CONFIG_PAX_RANDMMAP
2463 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2464 + addr = 0;
2465 +#endif
2466 +
2467 /* If hint, make sure it matches our alignment restrictions */
2468 if (!fixed && addr) {
2469 addr = _ALIGN_UP(addr, 1ul << pshift);
2470 diff -urNp linux-2.6.39.4/arch/s390/include/asm/elf.h linux-2.6.39.4/arch/s390/include/asm/elf.h
2471 --- linux-2.6.39.4/arch/s390/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
2472 +++ linux-2.6.39.4/arch/s390/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
2473 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2474 the loader. We need to make sure that it is out of the way of the program
2475 that it will "exec", and that there is sufficient room for the brk. */
2476
2477 -extern unsigned long randomize_et_dyn(unsigned long base);
2478 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2479 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2480 +
2481 +#ifdef CONFIG_PAX_ASLR
2482 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2483 +
2484 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2485 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2486 +#endif
2487
2488 /* This yields a mask that user programs can use to figure out what
2489 instruction set this CPU supports. */
2490 @@ -222,7 +228,4 @@ struct linux_binprm;
2491 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2492 int arch_setup_additional_pages(struct linux_binprm *, int);
2493
2494 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2495 -#define arch_randomize_brk arch_randomize_brk
2496 -
2497 #endif
2498 diff -urNp linux-2.6.39.4/arch/s390/include/asm/system.h linux-2.6.39.4/arch/s390/include/asm/system.h
2499 --- linux-2.6.39.4/arch/s390/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
2500 +++ linux-2.6.39.4/arch/s390/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
2501 @@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co
2502 extern void (*_machine_halt)(void);
2503 extern void (*_machine_power_off)(void);
2504
2505 -extern unsigned long arch_align_stack(unsigned long sp);
2506 +#define arch_align_stack(x) ((x) & ~0xfUL)
2507
2508 static inline int tprot(unsigned long addr)
2509 {
2510 diff -urNp linux-2.6.39.4/arch/s390/include/asm/uaccess.h linux-2.6.39.4/arch/s390/include/asm/uaccess.h
2511 --- linux-2.6.39.4/arch/s390/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
2512 +++ linux-2.6.39.4/arch/s390/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
2513 @@ -234,6 +234,10 @@ static inline unsigned long __must_check
2514 copy_to_user(void __user *to, const void *from, unsigned long n)
2515 {
2516 might_fault();
2517 +
2518 + if ((long)n < 0)
2519 + return n;
2520 +
2521 if (access_ok(VERIFY_WRITE, to, n))
2522 n = __copy_to_user(to, from, n);
2523 return n;
2524 @@ -259,6 +263,9 @@ copy_to_user(void __user *to, const void
2525 static inline unsigned long __must_check
2526 __copy_from_user(void *to, const void __user *from, unsigned long n)
2527 {
2528 + if ((long)n < 0)
2529 + return n;
2530 +
2531 if (__builtin_constant_p(n) && (n <= 256))
2532 return uaccess.copy_from_user_small(n, from, to);
2533 else
2534 @@ -293,6 +300,10 @@ copy_from_user(void *to, const void __us
2535 unsigned int sz = __compiletime_object_size(to);
2536
2537 might_fault();
2538 +
2539 + if ((long)n < 0)
2540 + return n;
2541 +
2542 if (unlikely(sz != -1 && sz < n)) {
2543 copy_from_user_overflow();
2544 return n;
2545 diff -urNp linux-2.6.39.4/arch/s390/Kconfig linux-2.6.39.4/arch/s390/Kconfig
2546 --- linux-2.6.39.4/arch/s390/Kconfig 2011-05-19 00:06:34.000000000 -0400
2547 +++ linux-2.6.39.4/arch/s390/Kconfig 2011-08-05 19:44:33.000000000 -0400
2548 @@ -234,11 +234,9 @@ config S390_EXEC_PROTECT
2549 prompt "Data execute protection"
2550 help
2551 This option allows to enable a buffer overflow protection for user
2552 - space programs and it also selects the addressing mode option above.
2553 - The kernel parameter noexec=on will enable this feature and also
2554 - switch the addressing modes, default is disabled. Enabling this (via
2555 - kernel parameter) on machines earlier than IBM System z9 this will
2556 - reduce system performance.
2557 + space programs.
2558 + Enabling this (via kernel parameter) on machines earlier than IBM
2559 + System z9 this will reduce system performance.
2560
2561 comment "Code generation options"
2562
2563 diff -urNp linux-2.6.39.4/arch/s390/kernel/module.c linux-2.6.39.4/arch/s390/kernel/module.c
2564 --- linux-2.6.39.4/arch/s390/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
2565 +++ linux-2.6.39.4/arch/s390/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
2566 @@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2567
2568 /* Increase core size by size of got & plt and set start
2569 offsets for got and plt. */
2570 - me->core_size = ALIGN(me->core_size, 4);
2571 - me->arch.got_offset = me->core_size;
2572 - me->core_size += me->arch.got_size;
2573 - me->arch.plt_offset = me->core_size;
2574 - me->core_size += me->arch.plt_size;
2575 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
2576 + me->arch.got_offset = me->core_size_rw;
2577 + me->core_size_rw += me->arch.got_size;
2578 + me->arch.plt_offset = me->core_size_rx;
2579 + me->core_size_rx += me->arch.plt_size;
2580 return 0;
2581 }
2582
2583 @@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2584 if (info->got_initialized == 0) {
2585 Elf_Addr *gotent;
2586
2587 - gotent = me->module_core + me->arch.got_offset +
2588 + gotent = me->module_core_rw + me->arch.got_offset +
2589 info->got_offset;
2590 *gotent = val;
2591 info->got_initialized = 1;
2592 @@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2593 else if (r_type == R_390_GOTENT ||
2594 r_type == R_390_GOTPLTENT)
2595 *(unsigned int *) loc =
2596 - (val + (Elf_Addr) me->module_core - loc) >> 1;
2597 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2598 else if (r_type == R_390_GOT64 ||
2599 r_type == R_390_GOTPLT64)
2600 *(unsigned long *) loc = val;
2601 @@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2602 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2603 if (info->plt_initialized == 0) {
2604 unsigned int *ip;
2605 - ip = me->module_core + me->arch.plt_offset +
2606 + ip = me->module_core_rx + me->arch.plt_offset +
2607 info->plt_offset;
2608 #ifndef CONFIG_64BIT
2609 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2610 @@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2611 val - loc + 0xffffUL < 0x1ffffeUL) ||
2612 (r_type == R_390_PLT32DBL &&
2613 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2614 - val = (Elf_Addr) me->module_core +
2615 + val = (Elf_Addr) me->module_core_rx +
2616 me->arch.plt_offset +
2617 info->plt_offset;
2618 val += rela->r_addend - loc;
2619 @@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2620 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2621 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2622 val = val + rela->r_addend -
2623 - ((Elf_Addr) me->module_core + me->arch.got_offset);
2624 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2625 if (r_type == R_390_GOTOFF16)
2626 *(unsigned short *) loc = val;
2627 else if (r_type == R_390_GOTOFF32)
2628 @@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2629 break;
2630 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2631 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2632 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
2633 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2634 rela->r_addend - loc;
2635 if (r_type == R_390_GOTPC)
2636 *(unsigned int *) loc = val;
2637 diff -urNp linux-2.6.39.4/arch/s390/kernel/process.c linux-2.6.39.4/arch/s390/kernel/process.c
2638 --- linux-2.6.39.4/arch/s390/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
2639 +++ linux-2.6.39.4/arch/s390/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
2640 @@ -334,39 +334,3 @@ unsigned long get_wchan(struct task_stru
2641 }
2642 return 0;
2643 }
2644 -
2645 -unsigned long arch_align_stack(unsigned long sp)
2646 -{
2647 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2648 - sp -= get_random_int() & ~PAGE_MASK;
2649 - return sp & ~0xf;
2650 -}
2651 -
2652 -static inline unsigned long brk_rnd(void)
2653 -{
2654 - /* 8MB for 32bit, 1GB for 64bit */
2655 - if (is_32bit_task())
2656 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2657 - else
2658 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2659 -}
2660 -
2661 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2662 -{
2663 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2664 -
2665 - if (ret < mm->brk)
2666 - return mm->brk;
2667 - return ret;
2668 -}
2669 -
2670 -unsigned long randomize_et_dyn(unsigned long base)
2671 -{
2672 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2673 -
2674 - if (!(current->flags & PF_RANDOMIZE))
2675 - return base;
2676 - if (ret < base)
2677 - return base;
2678 - return ret;
2679 -}
2680 diff -urNp linux-2.6.39.4/arch/s390/kernel/setup.c linux-2.6.39.4/arch/s390/kernel/setup.c
2681 --- linux-2.6.39.4/arch/s390/kernel/setup.c 2011-05-19 00:06:34.000000000 -0400
2682 +++ linux-2.6.39.4/arch/s390/kernel/setup.c 2011-08-05 19:44:33.000000000 -0400
2683 @@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2684 }
2685 early_param("mem", early_parse_mem);
2686
2687 -unsigned int user_mode = HOME_SPACE_MODE;
2688 +unsigned int user_mode = SECONDARY_SPACE_MODE;
2689 EXPORT_SYMBOL_GPL(user_mode);
2690
2691 static int set_amode_and_uaccess(unsigned long user_amode,
2692 @@ -300,17 +300,6 @@ static int set_amode_and_uaccess(unsigne
2693 }
2694 }
2695
2696 -/*
2697 - * Switch kernel/user addressing modes?
2698 - */
2699 -static int __init early_parse_switch_amode(char *p)
2700 -{
2701 - if (user_mode != SECONDARY_SPACE_MODE)
2702 - user_mode = PRIMARY_SPACE_MODE;
2703 - return 0;
2704 -}
2705 -early_param("switch_amode", early_parse_switch_amode);
2706 -
2707 static int __init early_parse_user_mode(char *p)
2708 {
2709 if (p && strcmp(p, "primary") == 0)
2710 @@ -327,20 +316,6 @@ static int __init early_parse_user_mode(
2711 }
2712 early_param("user_mode", early_parse_user_mode);
2713
2714 -#ifdef CONFIG_S390_EXEC_PROTECT
2715 -/*
2716 - * Enable execute protection?
2717 - */
2718 -static int __init early_parse_noexec(char *p)
2719 -{
2720 - if (!strncmp(p, "off", 3))
2721 - return 0;
2722 - user_mode = SECONDARY_SPACE_MODE;
2723 - return 0;
2724 -}
2725 -early_param("noexec", early_parse_noexec);
2726 -#endif /* CONFIG_S390_EXEC_PROTECT */
2727 -
2728 static void setup_addressing_mode(void)
2729 {
2730 if (user_mode == SECONDARY_SPACE_MODE) {
2731 diff -urNp linux-2.6.39.4/arch/s390/mm/mmap.c linux-2.6.39.4/arch/s390/mm/mmap.c
2732 --- linux-2.6.39.4/arch/s390/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
2733 +++ linux-2.6.39.4/arch/s390/mm/mmap.c 2011-08-05 19:44:33.000000000 -0400
2734 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2735 */
2736 if (mmap_is_legacy()) {
2737 mm->mmap_base = TASK_UNMAPPED_BASE;
2738 +
2739 +#ifdef CONFIG_PAX_RANDMMAP
2740 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2741 + mm->mmap_base += mm->delta_mmap;
2742 +#endif
2743 +
2744 mm->get_unmapped_area = arch_get_unmapped_area;
2745 mm->unmap_area = arch_unmap_area;
2746 } else {
2747 mm->mmap_base = mmap_base();
2748 +
2749 +#ifdef CONFIG_PAX_RANDMMAP
2750 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2751 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2752 +#endif
2753 +
2754 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2755 mm->unmap_area = arch_unmap_area_topdown;
2756 }
2757 @@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2758 */
2759 if (mmap_is_legacy()) {
2760 mm->mmap_base = TASK_UNMAPPED_BASE;
2761 +
2762 +#ifdef CONFIG_PAX_RANDMMAP
2763 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2764 + mm->mmap_base += mm->delta_mmap;
2765 +#endif
2766 +
2767 mm->get_unmapped_area = s390_get_unmapped_area;
2768 mm->unmap_area = arch_unmap_area;
2769 } else {
2770 mm->mmap_base = mmap_base();
2771 +
2772 +#ifdef CONFIG_PAX_RANDMMAP
2773 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2774 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2775 +#endif
2776 +
2777 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2778 mm->unmap_area = arch_unmap_area_topdown;
2779 }
2780 diff -urNp linux-2.6.39.4/arch/score/include/asm/system.h linux-2.6.39.4/arch/score/include/asm/system.h
2781 --- linux-2.6.39.4/arch/score/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
2782 +++ linux-2.6.39.4/arch/score/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
2783 @@ -17,7 +17,7 @@ do { \
2784 #define finish_arch_switch(prev) do {} while (0)
2785
2786 typedef void (*vi_handler_t)(void);
2787 -extern unsigned long arch_align_stack(unsigned long sp);
2788 +#define arch_align_stack(x) (x)
2789
2790 #define mb() barrier()
2791 #define rmb() barrier()
2792 diff -urNp linux-2.6.39.4/arch/score/kernel/process.c linux-2.6.39.4/arch/score/kernel/process.c
2793 --- linux-2.6.39.4/arch/score/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
2794 +++ linux-2.6.39.4/arch/score/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
2795 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2796
2797 return task_pt_regs(task)->cp0_epc;
2798 }
2799 -
2800 -unsigned long arch_align_stack(unsigned long sp)
2801 -{
2802 - return sp;
2803 -}
2804 diff -urNp linux-2.6.39.4/arch/sh/mm/mmap.c linux-2.6.39.4/arch/sh/mm/mmap.c
2805 --- linux-2.6.39.4/arch/sh/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
2806 +++ linux-2.6.39.4/arch/sh/mm/mmap.c 2011-08-05 19:44:33.000000000 -0400
2807 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2808 addr = PAGE_ALIGN(addr);
2809
2810 vma = find_vma(mm, addr);
2811 - if (TASK_SIZE - len >= addr &&
2812 - (!vma || addr + len <= vma->vm_start))
2813 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2814 return addr;
2815 }
2816
2817 @@ -106,7 +105,7 @@ full_search:
2818 }
2819 return -ENOMEM;
2820 }
2821 - if (likely(!vma || addr + len <= vma->vm_start)) {
2822 + if (likely(check_heap_stack_gap(vma, addr, len))) {
2823 /*
2824 * Remember the place where we stopped the search:
2825 */
2826 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2827 addr = PAGE_ALIGN(addr);
2828
2829 vma = find_vma(mm, addr);
2830 - if (TASK_SIZE - len >= addr &&
2831 - (!vma || addr + len <= vma->vm_start))
2832 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2833 return addr;
2834 }
2835
2836 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2837 /* make sure it can fit in the remaining address space */
2838 if (likely(addr > len)) {
2839 vma = find_vma(mm, addr-len);
2840 - if (!vma || addr <= vma->vm_start) {
2841 + if (check_heap_stack_gap(vma, addr - len, len)) {
2842 /* remember the address as a hint for next time */
2843 return (mm->free_area_cache = addr-len);
2844 }
2845 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2846 if (unlikely(mm->mmap_base < len))
2847 goto bottomup;
2848
2849 - addr = mm->mmap_base-len;
2850 - if (do_colour_align)
2851 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2852 + addr = mm->mmap_base - len;
2853
2854 do {
2855 + if (do_colour_align)
2856 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2857 /*
2858 * Lookup failure means no vma is above this address,
2859 * else if new region fits below vma->vm_start,
2860 * return with success:
2861 */
2862 vma = find_vma(mm, addr);
2863 - if (likely(!vma || addr+len <= vma->vm_start)) {
2864 + if (likely(check_heap_stack_gap(vma, addr, len))) {
2865 /* remember the address as a hint for next time */
2866 return (mm->free_area_cache = addr);
2867 }
2868 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2869 mm->cached_hole_size = vma->vm_start - addr;
2870
2871 /* try just below the current vma->vm_start */
2872 - addr = vma->vm_start-len;
2873 - if (do_colour_align)
2874 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2875 - } while (likely(len < vma->vm_start));
2876 + addr = skip_heap_stack_gap(vma, len);
2877 + } while (!IS_ERR_VALUE(addr));
2878
2879 bottomup:
2880 /*
2881 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h
2882 --- linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h 2011-05-19 00:06:34.000000000 -0400
2883 +++ linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h 2011-08-18 23:17:16.000000000 -0400
2884 @@ -14,18 +14,40 @@
2885 #define ATOMIC64_INIT(i) { (i) }
2886
2887 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2888 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2889 +{
2890 + return v->counter;
2891 +}
2892 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2893 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2894 +{
2895 + return v->counter;
2896 +}
2897
2898 #define atomic_set(v, i) (((v)->counter) = i)
2899 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2900 +{
2901 + v->counter = i;
2902 +}
2903 #define atomic64_set(v, i) (((v)->counter) = i)
2904 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2905 +{
2906 + v->counter = i;
2907 +}
2908
2909 extern void atomic_add(int, atomic_t *);
2910 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2911 extern void atomic64_add(long, atomic64_t *);
2912 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2913 extern void atomic_sub(int, atomic_t *);
2914 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2915 extern void atomic64_sub(long, atomic64_t *);
2916 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2917
2918 extern int atomic_add_ret(int, atomic_t *);
2919 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2920 extern long atomic64_add_ret(long, atomic64_t *);
2921 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2922 extern int atomic_sub_ret(int, atomic_t *);
2923 extern long atomic64_sub_ret(long, atomic64_t *);
2924
2925 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2926 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2927
2928 #define atomic_inc_return(v) atomic_add_ret(1, v)
2929 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2930 +{
2931 + return atomic_add_ret_unchecked(1, v);
2932 +}
2933 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2934 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2935 +{
2936 + return atomic64_add_ret_unchecked(1, v);
2937 +}
2938
2939 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2940 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2941
2942 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2943 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2944 +{
2945 + return atomic_add_ret_unchecked(i, v);
2946 +}
2947 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2948 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2949 +{
2950 + return atomic64_add_ret_unchecked(i, v);
2951 +}
2952
2953 /*
2954 * atomic_inc_and_test - increment and test
2955 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
2956 * other cases.
2957 */
2958 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2959 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
2960 +{
2961 + return atomic_inc_return_unchecked(v) == 0;
2962 +}
2963 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2964
2965 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2966 @@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
2967 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2968
2969 #define atomic_inc(v) atomic_add(1, v)
2970 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2971 +{
2972 + atomic_add_unchecked(1, v);
2973 +}
2974 #define atomic64_inc(v) atomic64_add(1, v)
2975 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2976 +{
2977 + atomic64_add_unchecked(1, v);
2978 +}
2979
2980 #define atomic_dec(v) atomic_sub(1, v)
2981 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2982 +{
2983 + atomic_sub_unchecked(1, v);
2984 +}
2985 #define atomic64_dec(v) atomic64_sub(1, v)
2986 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2987 +{
2988 + atomic64_sub_unchecked(1, v);
2989 +}
2990
2991 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2992 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2993
2994 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2995 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
2996 +{
2997 + return cmpxchg(&v->counter, old, new);
2998 +}
2999 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3000 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3001 +{
3002 + return xchg(&v->counter, new);
3003 +}
3004
3005 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3006 {
3007 - int c, old;
3008 + int c, old, new;
3009 c = atomic_read(v);
3010 for (;;) {
3011 - if (unlikely(c == (u)))
3012 + if (unlikely(c == u))
3013 break;
3014 - old = atomic_cmpxchg((v), c, c + (a));
3015 +
3016 + asm volatile("addcc %2, %0, %0\n"
3017 +
3018 +#ifdef CONFIG_PAX_REFCOUNT
3019 + "tvs %%icc, 6\n"
3020 +#endif
3021 +
3022 + : "=r" (new)
3023 + : "0" (c), "ir" (a)
3024 + : "cc");
3025 +
3026 + old = atomic_cmpxchg(v, c, new);
3027 if (likely(old == c))
3028 break;
3029 c = old;
3030 }
3031 - return c != (u);
3032 + return c != u;
3033 }
3034
3035 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3036 @@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
3037 #define atomic64_cmpxchg(v, o, n) \
3038 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3039 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3040 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3041 +{
3042 + return xchg(&v->counter, new);
3043 +}
3044
3045 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3046 {
3047 - long c, old;
3048 + long c, old, new;
3049 c = atomic64_read(v);
3050 for (;;) {
3051 - if (unlikely(c == (u)))
3052 + if (unlikely(c == u))
3053 break;
3054 - old = atomic64_cmpxchg((v), c, c + (a));
3055 +
3056 + asm volatile("addcc %2, %0, %0\n"
3057 +
3058 +#ifdef CONFIG_PAX_REFCOUNT
3059 + "tvs %%xcc, 6\n"
3060 +#endif
3061 +
3062 + : "=r" (new)
3063 + : "0" (c), "ir" (a)
3064 + : "cc");
3065 +
3066 + old = atomic64_cmpxchg(v, c, new);
3067 if (likely(old == c))
3068 break;
3069 c = old;
3070 }
3071 - return c != (u);
3072 + return c != u;
3073 }
3074
3075 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3076 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/cache.h linux-2.6.39.4/arch/sparc/include/asm/cache.h
3077 --- linux-2.6.39.4/arch/sparc/include/asm/cache.h 2011-05-19 00:06:34.000000000 -0400
3078 +++ linux-2.6.39.4/arch/sparc/include/asm/cache.h 2011-08-05 19:44:33.000000000 -0400
3079 @@ -10,7 +10,7 @@
3080 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3081
3082 #define L1_CACHE_SHIFT 5
3083 -#define L1_CACHE_BYTES 32
3084 +#define L1_CACHE_BYTES 32UL
3085
3086 #ifdef CONFIG_SPARC32
3087 #define SMP_CACHE_BYTES_SHIFT 5
3088 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/elf_32.h linux-2.6.39.4/arch/sparc/include/asm/elf_32.h
3089 --- linux-2.6.39.4/arch/sparc/include/asm/elf_32.h 2011-05-19 00:06:34.000000000 -0400
3090 +++ linux-2.6.39.4/arch/sparc/include/asm/elf_32.h 2011-08-05 19:44:33.000000000 -0400
3091 @@ -114,6 +114,13 @@ typedef struct {
3092
3093 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3094
3095 +#ifdef CONFIG_PAX_ASLR
3096 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3097 +
3098 +#define PAX_DELTA_MMAP_LEN 16
3099 +#define PAX_DELTA_STACK_LEN 16
3100 +#endif
3101 +
3102 /* This yields a mask that user programs can use to figure out what
3103 instruction set this cpu supports. This can NOT be done in userspace
3104 on Sparc. */
3105 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/elf_64.h linux-2.6.39.4/arch/sparc/include/asm/elf_64.h
3106 --- linux-2.6.39.4/arch/sparc/include/asm/elf_64.h 2011-05-19 00:06:34.000000000 -0400
3107 +++ linux-2.6.39.4/arch/sparc/include/asm/elf_64.h 2011-08-05 19:44:33.000000000 -0400
3108 @@ -162,6 +162,12 @@ typedef struct {
3109 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3110 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3111
3112 +#ifdef CONFIG_PAX_ASLR
3113 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3114 +
3115 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3116 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3117 +#endif
3118
3119 /* This yields a mask that user programs can use to figure out what
3120 instruction set this cpu supports. */
3121 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h
3122 --- linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h 2011-05-19 00:06:34.000000000 -0400
3123 +++ linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h 2011-08-05 19:44:33.000000000 -0400
3124 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3125 BTFIXUPDEF_INT(page_none)
3126 BTFIXUPDEF_INT(page_copy)
3127 BTFIXUPDEF_INT(page_readonly)
3128 +
3129 +#ifdef CONFIG_PAX_PAGEEXEC
3130 +BTFIXUPDEF_INT(page_shared_noexec)
3131 +BTFIXUPDEF_INT(page_copy_noexec)
3132 +BTFIXUPDEF_INT(page_readonly_noexec)
3133 +#endif
3134 +
3135 BTFIXUPDEF_INT(page_kernel)
3136
3137 #define PMD_SHIFT SUN4C_PMD_SHIFT
3138 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
3139 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3140 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3141
3142 +#ifdef CONFIG_PAX_PAGEEXEC
3143 +extern pgprot_t PAGE_SHARED_NOEXEC;
3144 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3145 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3146 +#else
3147 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3148 +# define PAGE_COPY_NOEXEC PAGE_COPY
3149 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3150 +#endif
3151 +
3152 extern unsigned long page_kernel;
3153
3154 #ifdef MODULE
3155 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h
3156 --- linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h 2011-05-19 00:06:34.000000000 -0400
3157 +++ linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h 2011-08-05 19:44:33.000000000 -0400
3158 @@ -115,6 +115,13 @@
3159 SRMMU_EXEC | SRMMU_REF)
3160 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3161 SRMMU_EXEC | SRMMU_REF)
3162 +
3163 +#ifdef CONFIG_PAX_PAGEEXEC
3164 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3165 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3166 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3167 +#endif
3168 +
3169 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3170 SRMMU_DIRTY | SRMMU_REF)
3171
3172 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h
3173 --- linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h 2011-05-19 00:06:34.000000000 -0400
3174 +++ linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h 2011-08-18 23:17:16.000000000 -0400
3175 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3176
3177 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3178
3179 -static void inline arch_read_lock(arch_rwlock_t *lock)
3180 +static inline void arch_read_lock(arch_rwlock_t *lock)
3181 {
3182 unsigned long tmp1, tmp2;
3183
3184 __asm__ __volatile__ (
3185 "1: ldsw [%2], %0\n"
3186 " brlz,pn %0, 2f\n"
3187 -"4: add %0, 1, %1\n"
3188 +"4: addcc %0, 1, %1\n"
3189 +
3190 +#ifdef CONFIG_PAX_REFCOUNT
3191 +" tvs %%icc, 6\n"
3192 +#endif
3193 +
3194 " cas [%2], %0, %1\n"
3195 " cmp %0, %1\n"
3196 " bne,pn %%icc, 1b\n"
3197 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3198 " .previous"
3199 : "=&r" (tmp1), "=&r" (tmp2)
3200 : "r" (lock)
3201 - : "memory");
3202 + : "memory", "cc");
3203 }
3204
3205 -static int inline arch_read_trylock(arch_rwlock_t *lock)
3206 +static inline int arch_read_trylock(arch_rwlock_t *lock)
3207 {
3208 int tmp1, tmp2;
3209
3210 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3211 "1: ldsw [%2], %0\n"
3212 " brlz,a,pn %0, 2f\n"
3213 " mov 0, %0\n"
3214 -" add %0, 1, %1\n"
3215 +" addcc %0, 1, %1\n"
3216 +
3217 +#ifdef CONFIG_PAX_REFCOUNT
3218 +" tvs %%icc, 6\n"
3219 +#endif
3220 +
3221 " cas [%2], %0, %1\n"
3222 " cmp %0, %1\n"
3223 " bne,pn %%icc, 1b\n"
3224 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3225 return tmp1;
3226 }
3227
3228 -static void inline arch_read_unlock(arch_rwlock_t *lock)
3229 +static inline void arch_read_unlock(arch_rwlock_t *lock)
3230 {
3231 unsigned long tmp1, tmp2;
3232
3233 __asm__ __volatile__(
3234 "1: lduw [%2], %0\n"
3235 -" sub %0, 1, %1\n"
3236 +" subcc %0, 1, %1\n"
3237 +
3238 +#ifdef CONFIG_PAX_REFCOUNT
3239 +" tvs %%icc, 6\n"
3240 +#endif
3241 +
3242 " cas [%2], %0, %1\n"
3243 " cmp %0, %1\n"
3244 " bne,pn %%xcc, 1b\n"
3245 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3246 : "memory");
3247 }
3248
3249 -static void inline arch_write_lock(arch_rwlock_t *lock)
3250 +static inline void arch_write_lock(arch_rwlock_t *lock)
3251 {
3252 unsigned long mask, tmp1, tmp2;
3253
3254 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3255 : "memory");
3256 }
3257
3258 -static void inline arch_write_unlock(arch_rwlock_t *lock)
3259 +static inline void arch_write_unlock(arch_rwlock_t *lock)
3260 {
3261 __asm__ __volatile__(
3262 " stw %%g0, [%0]"
3263 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3264 : "memory");
3265 }
3266
3267 -static int inline arch_write_trylock(arch_rwlock_t *lock)
3268 +static inline int arch_write_trylock(arch_rwlock_t *lock)
3269 {
3270 unsigned long mask, tmp1, tmp2, result;
3271
3272 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h
3273 --- linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h 2011-05-19 00:06:34.000000000 -0400
3274 +++ linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h 2011-08-05 19:44:33.000000000 -0400
3275 @@ -50,6 +50,8 @@ struct thread_info {
3276 unsigned long w_saved;
3277
3278 struct restart_block restart_block;
3279 +
3280 + unsigned long lowest_stack;
3281 };
3282
3283 /*
3284 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h
3285 --- linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h 2011-05-19 00:06:34.000000000 -0400
3286 +++ linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h 2011-08-05 19:44:33.000000000 -0400
3287 @@ -63,6 +63,8 @@ struct thread_info {
3288 struct pt_regs *kern_una_regs;
3289 unsigned int kern_una_insn;
3290
3291 + unsigned long lowest_stack;
3292 +
3293 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3294 };
3295
3296 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h
3297 --- linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h 2011-05-19 00:06:34.000000000 -0400
3298 +++ linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h 2011-08-05 19:44:33.000000000 -0400
3299 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3300
3301 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3302 {
3303 - if (n && __access_ok((unsigned long) to, n))
3304 + if ((long)n < 0)
3305 + return n;
3306 +
3307 + if (n && __access_ok((unsigned long) to, n)) {
3308 + if (!__builtin_constant_p(n))
3309 + check_object_size(from, n, true);
3310 return __copy_user(to, (__force void __user *) from, n);
3311 - else
3312 + } else
3313 return n;
3314 }
3315
3316 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3317 {
3318 + if ((long)n < 0)
3319 + return n;
3320 +
3321 + if (!__builtin_constant_p(n))
3322 + check_object_size(from, n, true);
3323 +
3324 return __copy_user(to, (__force void __user *) from, n);
3325 }
3326
3327 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3328 {
3329 - if (n && __access_ok((unsigned long) from, n))
3330 + if ((long)n < 0)
3331 + return n;
3332 +
3333 + if (n && __access_ok((unsigned long) from, n)) {
3334 + if (!__builtin_constant_p(n))
3335 + check_object_size(to, n, false);
3336 return __copy_user((__force void __user *) to, from, n);
3337 - else
3338 + } else
3339 return n;
3340 }
3341
3342 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3343 {
3344 + if ((long)n < 0)
3345 + return n;
3346 +
3347 return __copy_user((__force void __user *) to, from, n);
3348 }
3349
3350 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h
3351 --- linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h 2011-05-19 00:06:34.000000000 -0400
3352 +++ linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h 2011-08-05 19:44:33.000000000 -0400
3353 @@ -10,6 +10,7 @@
3354 #include <linux/compiler.h>
3355 #include <linux/string.h>
3356 #include <linux/thread_info.h>
3357 +#include <linux/kernel.h>
3358 #include <asm/asi.h>
3359 #include <asm/system.h>
3360 #include <asm/spitfire.h>
3361 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3362 static inline unsigned long __must_check
3363 copy_from_user(void *to, const void __user *from, unsigned long size)
3364 {
3365 - unsigned long ret = ___copy_from_user(to, from, size);
3366 + unsigned long ret;
3367
3368 + if ((long)size < 0 || size > INT_MAX)
3369 + return size;
3370 +
3371 + if (!__builtin_constant_p(size))
3372 + check_object_size(to, size, false);
3373 +
3374 + ret = ___copy_from_user(to, from, size);
3375 if (unlikely(ret))
3376 ret = copy_from_user_fixup(to, from, size);
3377
3378 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3379 static inline unsigned long __must_check
3380 copy_to_user(void __user *to, const void *from, unsigned long size)
3381 {
3382 - unsigned long ret = ___copy_to_user(to, from, size);
3383 + unsigned long ret;
3384 +
3385 + if ((long)size < 0 || size > INT_MAX)
3386 + return size;
3387 +
3388 + if (!__builtin_constant_p(size))
3389 + check_object_size(from, size, true);
3390
3391 + ret = ___copy_to_user(to, from, size);
3392 if (unlikely(ret))
3393 ret = copy_to_user_fixup(to, from, size);
3394 return ret;
3395 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/uaccess.h linux-2.6.39.4/arch/sparc/include/asm/uaccess.h
3396 --- linux-2.6.39.4/arch/sparc/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
3397 +++ linux-2.6.39.4/arch/sparc/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
3398 @@ -1,5 +1,13 @@
3399 #ifndef ___ASM_SPARC_UACCESS_H
3400 #define ___ASM_SPARC_UACCESS_H
3401 +
3402 +#ifdef __KERNEL__
3403 +#ifndef __ASSEMBLY__
3404 +#include <linux/types.h>
3405 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3406 +#endif
3407 +#endif
3408 +
3409 #if defined(__sparc__) && defined(__arch64__)
3410 #include <asm/uaccess_64.h>
3411 #else
3412 diff -urNp linux-2.6.39.4/arch/sparc/kernel/Makefile linux-2.6.39.4/arch/sparc/kernel/Makefile
3413 --- linux-2.6.39.4/arch/sparc/kernel/Makefile 2011-05-19 00:06:34.000000000 -0400
3414 +++ linux-2.6.39.4/arch/sparc/kernel/Makefile 2011-08-05 19:44:33.000000000 -0400
3415 @@ -3,7 +3,7 @@
3416 #
3417
3418 asflags-y := -ansi
3419 -ccflags-y := -Werror
3420 +#ccflags-y := -Werror
3421
3422 extra-y := head_$(BITS).o
3423 extra-y += init_task.o
3424 diff -urNp linux-2.6.39.4/arch/sparc/kernel/process_32.c linux-2.6.39.4/arch/sparc/kernel/process_32.c
3425 --- linux-2.6.39.4/arch/sparc/kernel/process_32.c 2011-05-19 00:06:34.000000000 -0400
3426 +++ linux-2.6.39.4/arch/sparc/kernel/process_32.c 2011-08-05 19:44:33.000000000 -0400
3427 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
3428 rw->ins[4], rw->ins[5],
3429 rw->ins[6],
3430 rw->ins[7]);
3431 - printk("%pS\n", (void *) rw->ins[7]);
3432 + printk("%pA\n", (void *) rw->ins[7]);
3433 rw = (struct reg_window32 *) rw->ins[6];
3434 }
3435 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3436 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
3437
3438 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3439 r->psr, r->pc, r->npc, r->y, print_tainted());
3440 - printk("PC: <%pS>\n", (void *) r->pc);
3441 + printk("PC: <%pA>\n", (void *) r->pc);
3442 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3443 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3444 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3445 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3446 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3447 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3448 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3449 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3450
3451 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3452 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3453 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
3454 rw = (struct reg_window32 *) fp;
3455 pc = rw->ins[7];
3456 printk("[%08lx : ", pc);
3457 - printk("%pS ] ", (void *) pc);
3458 + printk("%pA ] ", (void *) pc);
3459 fp = rw->ins[6];
3460 } while (++count < 16);
3461 printk("\n");
3462 diff -urNp linux-2.6.39.4/arch/sparc/kernel/process_64.c linux-2.6.39.4/arch/sparc/kernel/process_64.c
3463 --- linux-2.6.39.4/arch/sparc/kernel/process_64.c 2011-05-19 00:06:34.000000000 -0400
3464 +++ linux-2.6.39.4/arch/sparc/kernel/process_64.c 2011-08-05 19:44:33.000000000 -0400
3465 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3466 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3467 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3468 if (regs->tstate & TSTATE_PRIV)
3469 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3470 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3471 }
3472
3473 void show_regs(struct pt_regs *regs)
3474 {
3475 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3476 regs->tpc, regs->tnpc, regs->y, print_tainted());
3477 - printk("TPC: <%pS>\n", (void *) regs->tpc);
3478 + printk("TPC: <%pA>\n", (void *) regs->tpc);
3479 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3480 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3481 regs->u_regs[3]);
3482 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3483 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3484 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3485 regs->u_regs[15]);
3486 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3487 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3488 show_regwindow(regs);
3489 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3490 }
3491 @@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3492 ((tp && tp->task) ? tp->task->pid : -1));
3493
3494 if (gp->tstate & TSTATE_PRIV) {
3495 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3496 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3497 (void *) gp->tpc,
3498 (void *) gp->o7,
3499 (void *) gp->i7,
3500 diff -urNp linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c
3501 --- linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c 2011-05-19 00:06:34.000000000 -0400
3502 +++ linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c 2011-08-05 19:44:33.000000000 -0400
3503 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3504 if (ARCH_SUN4C && len > 0x20000000)
3505 return -ENOMEM;
3506 if (!addr)
3507 - addr = TASK_UNMAPPED_BASE;
3508 + addr = current->mm->mmap_base;
3509
3510 if (flags & MAP_SHARED)
3511 addr = COLOUR_ALIGN(addr);
3512 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3513 }
3514 if (TASK_SIZE - PAGE_SIZE - len < addr)
3515 return -ENOMEM;
3516 - if (!vmm || addr + len <= vmm->vm_start)
3517 + if (check_heap_stack_gap(vmm, addr, len))
3518 return addr;
3519 addr = vmm->vm_end;
3520 if (flags & MAP_SHARED)
3521 diff -urNp linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c
3522 --- linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c 2011-05-19 00:06:34.000000000 -0400
3523 +++ linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c 2011-08-05 19:44:33.000000000 -0400
3524 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3525 /* We do not accept a shared mapping if it would violate
3526 * cache aliasing constraints.
3527 */
3528 - if ((flags & MAP_SHARED) &&
3529 + if ((filp || (flags & MAP_SHARED)) &&
3530 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3531 return -EINVAL;
3532 return addr;
3533 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3534 if (filp || (flags & MAP_SHARED))
3535 do_color_align = 1;
3536
3537 +#ifdef CONFIG_PAX_RANDMMAP
3538 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3539 +#endif
3540 +
3541 if (addr) {
3542 if (do_color_align)
3543 addr = COLOUR_ALIGN(addr, pgoff);
3544 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3545 addr = PAGE_ALIGN(addr);
3546
3547 vma = find_vma(mm, addr);
3548 - if (task_size - len >= addr &&
3549 - (!vma || addr + len <= vma->vm_start))
3550 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3551 return addr;
3552 }
3553
3554 if (len > mm->cached_hole_size) {
3555 - start_addr = addr = mm->free_area_cache;
3556 + start_addr = addr = mm->free_area_cache;
3557 } else {
3558 - start_addr = addr = TASK_UNMAPPED_BASE;
3559 + start_addr = addr = mm->mmap_base;
3560 mm->cached_hole_size = 0;
3561 }
3562
3563 @@ -174,14 +177,14 @@ full_search:
3564 vma = find_vma(mm, VA_EXCLUDE_END);
3565 }
3566 if (unlikely(task_size < addr)) {
3567 - if (start_addr != TASK_UNMAPPED_BASE) {
3568 - start_addr = addr = TASK_UNMAPPED_BASE;
3569 + if (start_addr != mm->mmap_base) {
3570 + start_addr = addr = mm->mmap_base;
3571 mm->cached_hole_size = 0;
3572 goto full_search;
3573 }
3574 return -ENOMEM;
3575 }
3576 - if (likely(!vma || addr + len <= vma->vm_start)) {
3577 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3578 /*
3579 * Remember the place where we stopped the search:
3580 */
3581 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3582 /* We do not accept a shared mapping if it would violate
3583 * cache aliasing constraints.
3584 */
3585 - if ((flags & MAP_SHARED) &&
3586 + if ((filp || (flags & MAP_SHARED)) &&
3587 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3588 return -EINVAL;
3589 return addr;
3590 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3591 addr = PAGE_ALIGN(addr);
3592
3593 vma = find_vma(mm, addr);
3594 - if (task_size - len >= addr &&
3595 - (!vma || addr + len <= vma->vm_start))
3596 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3597 return addr;
3598 }
3599
3600 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3601 /* make sure it can fit in the remaining address space */
3602 if (likely(addr > len)) {
3603 vma = find_vma(mm, addr-len);
3604 - if (!vma || addr <= vma->vm_start) {
3605 + if (check_heap_stack_gap(vma, addr - len, len)) {
3606 /* remember the address as a hint for next time */
3607 return (mm->free_area_cache = addr-len);
3608 }
3609 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3610 if (unlikely(mm->mmap_base < len))
3611 goto bottomup;
3612
3613 - addr = mm->mmap_base-len;
3614 - if (do_color_align)
3615 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3616 + addr = mm->mmap_base - len;
3617
3618 do {
3619 + if (do_color_align)
3620 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3621 /*
3622 * Lookup failure means no vma is above this address,
3623 * else if new region fits below vma->vm_start,
3624 * return with success:
3625 */
3626 vma = find_vma(mm, addr);
3627 - if (likely(!vma || addr+len <= vma->vm_start)) {
3628 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3629 /* remember the address as a hint for next time */
3630 return (mm->free_area_cache = addr);
3631 }
3632 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3633 mm->cached_hole_size = vma->vm_start - addr;
3634
3635 /* try just below the current vma->vm_start */
3636 - addr = vma->vm_start-len;
3637 - if (do_color_align)
3638 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3639 - } while (likely(len < vma->vm_start));
3640 + addr = skip_heap_stack_gap(vma, len);
3641 + } while (!IS_ERR_VALUE(addr));
3642
3643 bottomup:
3644 /*
3645 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3646 gap == RLIM_INFINITY ||
3647 sysctl_legacy_va_layout) {
3648 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3649 +
3650 +#ifdef CONFIG_PAX_RANDMMAP
3651 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3652 + mm->mmap_base += mm->delta_mmap;
3653 +#endif
3654 +
3655 mm->get_unmapped_area = arch_get_unmapped_area;
3656 mm->unmap_area = arch_unmap_area;
3657 } else {
3658 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3659 gap = (task_size / 6 * 5);
3660
3661 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3662 +
3663 +#ifdef CONFIG_PAX_RANDMMAP
3664 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3665 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3666 +#endif
3667 +
3668 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3669 mm->unmap_area = arch_unmap_area_topdown;
3670 }
3671 diff -urNp linux-2.6.39.4/arch/sparc/kernel/traps_32.c linux-2.6.39.4/arch/sparc/kernel/traps_32.c
3672 --- linux-2.6.39.4/arch/sparc/kernel/traps_32.c 2011-05-19 00:06:34.000000000 -0400
3673 +++ linux-2.6.39.4/arch/sparc/kernel/traps_32.c 2011-08-05 19:44:33.000000000 -0400
3674 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3675 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3676 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3677
3678 +extern void gr_handle_kernel_exploit(void);
3679 +
3680 void die_if_kernel(char *str, struct pt_regs *regs)
3681 {
3682 static int die_counter;
3683 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3684 count++ < 30 &&
3685 (((unsigned long) rw) >= PAGE_OFFSET) &&
3686 !(((unsigned long) rw) & 0x7)) {
3687 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
3688 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
3689 (void *) rw->ins[7]);
3690 rw = (struct reg_window32 *)rw->ins[6];
3691 }
3692 }
3693 printk("Instruction DUMP:");
3694 instruction_dump ((unsigned long *) regs->pc);
3695 - if(regs->psr & PSR_PS)
3696 + if(regs->psr & PSR_PS) {
3697 + gr_handle_kernel_exploit();
3698 do_exit(SIGKILL);
3699 + }
3700 do_exit(SIGSEGV);
3701 }
3702
3703 diff -urNp linux-2.6.39.4/arch/sparc/kernel/traps_64.c linux-2.6.39.4/arch/sparc/kernel/traps_64.c
3704 --- linux-2.6.39.4/arch/sparc/kernel/traps_64.c 2011-05-19 00:06:34.000000000 -0400
3705 +++ linux-2.6.39.4/arch/sparc/kernel/traps_64.c 2011-08-05 19:44:33.000000000 -0400
3706 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3707 i + 1,
3708 p->trapstack[i].tstate, p->trapstack[i].tpc,
3709 p->trapstack[i].tnpc, p->trapstack[i].tt);
3710 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3711 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3712 }
3713 }
3714
3715 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3716
3717 lvl -= 0x100;
3718 if (regs->tstate & TSTATE_PRIV) {
3719 +
3720 +#ifdef CONFIG_PAX_REFCOUNT
3721 + if (lvl == 6)
3722 + pax_report_refcount_overflow(regs);
3723 +#endif
3724 +
3725 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3726 die_if_kernel(buffer, regs);
3727 }
3728 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3729 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3730 {
3731 char buffer[32];
3732 -
3733 +
3734 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3735 0, lvl, SIGTRAP) == NOTIFY_STOP)
3736 return;
3737
3738 +#ifdef CONFIG_PAX_REFCOUNT
3739 + if (lvl == 6)
3740 + pax_report_refcount_overflow(regs);
3741 +#endif
3742 +
3743 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3744
3745 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3746 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3747 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3748 printk("%s" "ERROR(%d): ",
3749 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3750 - printk("TPC<%pS>\n", (void *) regs->tpc);
3751 + printk("TPC<%pA>\n", (void *) regs->tpc);
3752 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3753 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3754 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3755 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3756 smp_processor_id(),
3757 (type & 0x1) ? 'I' : 'D',
3758 regs->tpc);
3759 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3760 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3761 panic("Irrecoverable Cheetah+ parity error.");
3762 }
3763
3764 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3765 smp_processor_id(),
3766 (type & 0x1) ? 'I' : 'D',
3767 regs->tpc);
3768 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3769 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3770 }
3771
3772 struct sun4v_error_entry {
3773 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3774
3775 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3776 regs->tpc, tl);
3777 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3778 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3779 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3780 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3781 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3782 (void *) regs->u_regs[UREG_I7]);
3783 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3784 "pte[%lx] error[%lx]\n",
3785 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3786
3787 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3788 regs->tpc, tl);
3789 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3790 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3791 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3792 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3793 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3794 (void *) regs->u_regs[UREG_I7]);
3795 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3796 "pte[%lx] error[%lx]\n",
3797 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3798 fp = (unsigned long)sf->fp + STACK_BIAS;
3799 }
3800
3801 - printk(" [%016lx] %pS\n", pc, (void *) pc);
3802 + printk(" [%016lx] %pA\n", pc, (void *) pc);
3803 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3804 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3805 int index = tsk->curr_ret_stack;
3806 if (tsk->ret_stack && index >= graph) {
3807 pc = tsk->ret_stack[index - graph].ret;
3808 - printk(" [%016lx] %pS\n", pc, (void *) pc);
3809 + printk(" [%016lx] %pA\n", pc, (void *) pc);
3810 graph++;
3811 }
3812 }
3813 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3814 return (struct reg_window *) (fp + STACK_BIAS);
3815 }
3816
3817 +extern void gr_handle_kernel_exploit(void);
3818 +
3819 void die_if_kernel(char *str, struct pt_regs *regs)
3820 {
3821 static int die_counter;
3822 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3823 while (rw &&
3824 count++ < 30 &&
3825 kstack_valid(tp, (unsigned long) rw)) {
3826 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
3827 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
3828 (void *) rw->ins[7]);
3829
3830 rw = kernel_stack_up(rw);
3831 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3832 }
3833 user_instruction_dump ((unsigned int __user *) regs->tpc);
3834 }
3835 - if (regs->tstate & TSTATE_PRIV)
3836 + if (regs->tstate & TSTATE_PRIV) {
3837 + gr_handle_kernel_exploit();
3838 do_exit(SIGKILL);
3839 + }
3840 do_exit(SIGSEGV);
3841 }
3842 EXPORT_SYMBOL(die_if_kernel);
3843 diff -urNp linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c
3844 --- linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c 2011-05-19 00:06:34.000000000 -0400
3845 +++ linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c 2011-08-05 19:44:33.000000000 -0400
3846 @@ -278,7 +278,7 @@ static void log_unaligned(struct pt_regs
3847 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3848
3849 if (__ratelimit(&ratelimit)) {
3850 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
3851 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
3852 regs->tpc, (void *) regs->tpc);
3853 }
3854 }
3855 diff -urNp linux-2.6.39.4/arch/sparc/lib/atomic_64.S linux-2.6.39.4/arch/sparc/lib/atomic_64.S
3856 --- linux-2.6.39.4/arch/sparc/lib/atomic_64.S 2011-05-19 00:06:34.000000000 -0400
3857 +++ linux-2.6.39.4/arch/sparc/lib/atomic_64.S 2011-08-05 19:44:33.000000000 -0400
3858 @@ -18,7 +18,12 @@
3859 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3860 BACKOFF_SETUP(%o2)
3861 1: lduw [%o1], %g1
3862 - add %g1, %o0, %g7
3863 + addcc %g1, %o0, %g7
3864 +
3865 +#ifdef CONFIG_PAX_REFCOUNT
3866 + tvs %icc, 6
3867 +#endif
3868 +
3869 cas [%o1], %g1, %g7
3870 cmp %g1, %g7
3871 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3872 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3873 2: BACKOFF_SPIN(%o2, %o3, 1b)
3874 .size atomic_add, .-atomic_add
3875
3876 + .globl atomic_add_unchecked
3877 + .type atomic_add_unchecked,#function
3878 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3879 + BACKOFF_SETUP(%o2)
3880 +1: lduw [%o1], %g1
3881 + add %g1, %o0, %g7
3882 + cas [%o1], %g1, %g7
3883 + cmp %g1, %g7
3884 + bne,pn %icc, 2f
3885 + nop
3886 + retl
3887 + nop
3888 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3889 + .size atomic_add_unchecked, .-atomic_add_unchecked
3890 +
3891 .globl atomic_sub
3892 .type atomic_sub,#function
3893 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3894 BACKOFF_SETUP(%o2)
3895 1: lduw [%o1], %g1
3896 - sub %g1, %o0, %g7
3897 + subcc %g1, %o0, %g7
3898 +
3899 +#ifdef CONFIG_PAX_REFCOUNT
3900 + tvs %icc, 6
3901 +#endif
3902 +
3903 cas [%o1], %g1, %g7
3904 cmp %g1, %g7
3905 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3906 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3907 2: BACKOFF_SPIN(%o2, %o3, 1b)
3908 .size atomic_sub, .-atomic_sub
3909
3910 + .globl atomic_sub_unchecked
3911 + .type atomic_sub_unchecked,#function
3912 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3913 + BACKOFF_SETUP(%o2)
3914 +1: lduw [%o1], %g1
3915 + sub %g1, %o0, %g7
3916 + cas [%o1], %g1, %g7
3917 + cmp %g1, %g7
3918 + bne,pn %icc, 2f
3919 + nop
3920 + retl
3921 + nop
3922 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3923 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
3924 +
3925 .globl atomic_add_ret
3926 .type atomic_add_ret,#function
3927 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3928 BACKOFF_SETUP(%o2)
3929 1: lduw [%o1], %g1
3930 - add %g1, %o0, %g7
3931 + addcc %g1, %o0, %g7
3932 +
3933 +#ifdef CONFIG_PAX_REFCOUNT
3934 + tvs %icc, 6
3935 +#endif
3936 +
3937 cas [%o1], %g1, %g7
3938 cmp %g1, %g7
3939 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3940 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3941 2: BACKOFF_SPIN(%o2, %o3, 1b)
3942 .size atomic_add_ret, .-atomic_add_ret
3943
3944 + .globl atomic_add_ret_unchecked
3945 + .type atomic_add_ret_unchecked,#function
3946 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3947 + BACKOFF_SETUP(%o2)
3948 +1: lduw [%o1], %g1
3949 + addcc %g1, %o0, %g7
3950 + cas [%o1], %g1, %g7
3951 + cmp %g1, %g7
3952 + bne,pn %icc, 2f
3953 + add %g7, %o0, %g7
3954 + sra %g7, 0, %o0
3955 + retl
3956 + nop
3957 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3958 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3959 +
3960 .globl atomic_sub_ret
3961 .type atomic_sub_ret,#function
3962 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3963 BACKOFF_SETUP(%o2)
3964 1: lduw [%o1], %g1
3965 - sub %g1, %o0, %g7
3966 + subcc %g1, %o0, %g7
3967 +
3968 +#ifdef CONFIG_PAX_REFCOUNT
3969 + tvs %icc, 6
3970 +#endif
3971 +
3972 cas [%o1], %g1, %g7
3973 cmp %g1, %g7
3974 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3975 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3976 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3977 BACKOFF_SETUP(%o2)
3978 1: ldx [%o1], %g1
3979 - add %g1, %o0, %g7
3980 + addcc %g1, %o0, %g7
3981 +
3982 +#ifdef CONFIG_PAX_REFCOUNT
3983 + tvs %xcc, 6
3984 +#endif
3985 +
3986 casx [%o1], %g1, %g7
3987 cmp %g1, %g7
3988 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3989 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3990 2: BACKOFF_SPIN(%o2, %o3, 1b)
3991 .size atomic64_add, .-atomic64_add
3992
3993 + .globl atomic64_add_unchecked
3994 + .type atomic64_add_unchecked,#function
3995 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3996 + BACKOFF_SETUP(%o2)
3997 +1: ldx [%o1], %g1
3998 + addcc %g1, %o0, %g7
3999 + casx [%o1], %g1, %g7
4000 + cmp %g1, %g7
4001 + bne,pn %xcc, 2f
4002 + nop
4003 + retl
4004 + nop
4005 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4006 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4007 +
4008 .globl atomic64_sub
4009 .type atomic64_sub,#function
4010 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4011 BACKOFF_SETUP(%o2)
4012 1: ldx [%o1], %g1
4013 - sub %g1, %o0, %g7
4014 + subcc %g1, %o0, %g7
4015 +
4016 +#ifdef CONFIG_PAX_REFCOUNT
4017 + tvs %xcc, 6
4018 +#endif
4019 +
4020 casx [%o1], %g1, %g7
4021 cmp %g1, %g7
4022 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4023 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4024 2: BACKOFF_SPIN(%o2, %o3, 1b)
4025 .size atomic64_sub, .-atomic64_sub
4026
4027 + .globl atomic64_sub_unchecked
4028 + .type atomic64_sub_unchecked,#function
4029 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4030 + BACKOFF_SETUP(%o2)
4031 +1: ldx [%o1], %g1
4032 + subcc %g1, %o0, %g7
4033 + casx [%o1], %g1, %g7
4034 + cmp %g1, %g7
4035 + bne,pn %xcc, 2f
4036 + nop
4037 + retl
4038 + nop
4039 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4040 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4041 +
4042 .globl atomic64_add_ret
4043 .type atomic64_add_ret,#function
4044 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4045 BACKOFF_SETUP(%o2)
4046 1: ldx [%o1], %g1
4047 - add %g1, %o0, %g7
4048 + addcc %g1, %o0, %g7
4049 +
4050 +#ifdef CONFIG_PAX_REFCOUNT
4051 + tvs %xcc, 6
4052 +#endif
4053 +
4054 casx [%o1], %g1, %g7
4055 cmp %g1, %g7
4056 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4057 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4058 2: BACKOFF_SPIN(%o2, %o3, 1b)
4059 .size atomic64_add_ret, .-atomic64_add_ret
4060
4061 + .globl atomic64_add_ret_unchecked
4062 + .type atomic64_add_ret_unchecked,#function
4063 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4064 + BACKOFF_SETUP(%o2)
4065 +1: ldx [%o1], %g1
4066 + addcc %g1, %o0, %g7
4067 + casx [%o1], %g1, %g7
4068 + cmp %g1, %g7
4069 + bne,pn %xcc, 2f
4070 + add %g7, %o0, %g7
4071 + mov %g7, %o0
4072 + retl
4073 + nop
4074 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4075 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4076 +
4077 .globl atomic64_sub_ret
4078 .type atomic64_sub_ret,#function
4079 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4080 BACKOFF_SETUP(%o2)
4081 1: ldx [%o1], %g1
4082 - sub %g1, %o0, %g7
4083 + subcc %g1, %o0, %g7
4084 +
4085 +#ifdef CONFIG_PAX_REFCOUNT
4086 + tvs %xcc, 6
4087 +#endif
4088 +
4089 casx [%o1], %g1, %g7
4090 cmp %g1, %g7
4091 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4092 diff -urNp linux-2.6.39.4/arch/sparc/lib/ksyms.c linux-2.6.39.4/arch/sparc/lib/ksyms.c
4093 --- linux-2.6.39.4/arch/sparc/lib/ksyms.c 2011-05-19 00:06:34.000000000 -0400
4094 +++ linux-2.6.39.4/arch/sparc/lib/ksyms.c 2011-08-19 23:06:32.000000000 -0400
4095 @@ -142,12 +142,18 @@ EXPORT_SYMBOL(__downgrade_write);
4096
4097 /* Atomic counter implementation. */
4098 EXPORT_SYMBOL(atomic_add);
4099 +EXPORT_SYMBOL(atomic_add_unchecked);
4100 EXPORT_SYMBOL(atomic_add_ret);
4101 +EXPORT_SYMBOL(atomic_add_ret_unchecked);
4102 EXPORT_SYMBOL(atomic_sub);
4103 +EXPORT_SYMBOL(atomic_sub_unchecked);
4104 EXPORT_SYMBOL(atomic_sub_ret);
4105 EXPORT_SYMBOL(atomic64_add);
4106 +EXPORT_SYMBOL(atomic64_add_unchecked);
4107 EXPORT_SYMBOL(atomic64_add_ret);
4108 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4109 EXPORT_SYMBOL(atomic64_sub);
4110 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4111 EXPORT_SYMBOL(atomic64_sub_ret);
4112
4113 /* Atomic bit operations. */
4114 diff -urNp linux-2.6.39.4/arch/sparc/lib/Makefile linux-2.6.39.4/arch/sparc/lib/Makefile
4115 --- linux-2.6.39.4/arch/sparc/lib/Makefile 2011-05-19 00:06:34.000000000 -0400
4116 +++ linux-2.6.39.4/arch/sparc/lib/Makefile 2011-08-05 19:44:33.000000000 -0400
4117 @@ -2,7 +2,7 @@
4118 #
4119
4120 asflags-y := -ansi -DST_DIV0=0x02
4121 -ccflags-y := -Werror
4122 +#ccflags-y := -Werror
4123
4124 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4125 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4126 diff -urNp linux-2.6.39.4/arch/sparc/Makefile linux-2.6.39.4/arch/sparc/Makefile
4127 --- linux-2.6.39.4/arch/sparc/Makefile 2011-05-19 00:06:34.000000000 -0400
4128 +++ linux-2.6.39.4/arch/sparc/Makefile 2011-08-05 19:44:33.000000000 -0400
4129 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4130 # Export what is needed by arch/sparc/boot/Makefile
4131 export VMLINUX_INIT VMLINUX_MAIN
4132 VMLINUX_INIT := $(head-y) $(init-y)
4133 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4134 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4135 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4136 VMLINUX_MAIN += $(drivers-y) $(net-y)
4137
4138 diff -urNp linux-2.6.39.4/arch/sparc/mm/fault_32.c linux-2.6.39.4/arch/sparc/mm/fault_32.c
4139 --- linux-2.6.39.4/arch/sparc/mm/fault_32.c 2011-05-19 00:06:34.000000000 -0400
4140 +++ linux-2.6.39.4/arch/sparc/mm/fault_32.c 2011-08-05 19:44:33.000000000 -0400
4141 @@ -22,6 +22,9 @@
4142 #include <linux/interrupt.h>
4143 #include <linux/module.h>
4144 #include <linux/kdebug.h>
4145 +#include <linux/slab.h>
4146 +#include <linux/pagemap.h>
4147 +#include <linux/compiler.h>
4148
4149 #include <asm/system.h>
4150 #include <asm/page.h>
4151 @@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4152 return safe_compute_effective_address(regs, insn);
4153 }
4154
4155 +#ifdef CONFIG_PAX_PAGEEXEC
4156 +#ifdef CONFIG_PAX_DLRESOLVE
4157 +static void pax_emuplt_close(struct vm_area_struct *vma)
4158 +{
4159 + vma->vm_mm->call_dl_resolve = 0UL;
4160 +}
4161 +
4162 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4163 +{
4164 + unsigned int *kaddr;
4165 +
4166 + vmf->page = alloc_page(GFP_HIGHUSER);
4167 + if (!vmf->page)
4168 + return VM_FAULT_OOM;
4169 +
4170 + kaddr = kmap(vmf->page);
4171 + memset(kaddr, 0, PAGE_SIZE);
4172 + kaddr[0] = 0x9DE3BFA8U; /* save */
4173 + flush_dcache_page(vmf->page);
4174 + kunmap(vmf->page);
4175 + return VM_FAULT_MAJOR;
4176 +}
4177 +
4178 +static const struct vm_operations_struct pax_vm_ops = {
4179 + .close = pax_emuplt_close,
4180 + .fault = pax_emuplt_fault
4181 +};
4182 +
4183 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4184 +{
4185 + int ret;
4186 +
4187 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4188 + vma->vm_mm = current->mm;
4189 + vma->vm_start = addr;
4190 + vma->vm_end = addr + PAGE_SIZE;
4191 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4192 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4193 + vma->vm_ops = &pax_vm_ops;
4194 +
4195 + ret = insert_vm_struct(current->mm, vma);
4196 + if (ret)
4197 + return ret;
4198 +
4199 + ++current->mm->total_vm;
4200 + return 0;
4201 +}
4202 +#endif
4203 +
4204 +/*
4205 + * PaX: decide what to do with offenders (regs->pc = fault address)
4206 + *
4207 + * returns 1 when task should be killed
4208 + * 2 when patched PLT trampoline was detected
4209 + * 3 when unpatched PLT trampoline was detected
4210 + */
4211 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4212 +{
4213 +
4214 +#ifdef CONFIG_PAX_EMUPLT
4215 + int err;
4216 +
4217 + do { /* PaX: patched PLT emulation #1 */
4218 + unsigned int sethi1, sethi2, jmpl;
4219 +
4220 + err = get_user(sethi1, (unsigned int *)regs->pc);
4221 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4222 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4223 +
4224 + if (err)
4225 + break;
4226 +
4227 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4228 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4229 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4230 + {
4231 + unsigned int addr;
4232 +
4233 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4234 + addr = regs->u_regs[UREG_G1];
4235 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4236 + regs->pc = addr;
4237 + regs->npc = addr+4;
4238 + return 2;
4239 + }
4240 + } while (0);
4241 +
4242 + { /* PaX: patched PLT emulation #2 */
4243 + unsigned int ba;
4244 +
4245 + err = get_user(ba, (unsigned int *)regs->pc);
4246 +
4247 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4248 + unsigned int addr;
4249 +
4250 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4251 + regs->pc = addr;
4252 + regs->npc = addr+4;
4253 + return 2;
4254 + }
4255 + }
4256 +
4257 + do { /* PaX: patched PLT emulation #3 */
4258 + unsigned int sethi, jmpl, nop;
4259 +
4260 + err = get_user(sethi, (unsigned int *)regs->pc);
4261 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4262 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4263 +
4264 + if (err)
4265 + break;
4266 +
4267 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4268 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4269 + nop == 0x01000000U)
4270 + {
4271 + unsigned int addr;
4272 +
4273 + addr = (sethi & 0x003FFFFFU) << 10;
4274 + regs->u_regs[UREG_G1] = addr;
4275 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4276 + regs->pc = addr;
4277 + regs->npc = addr+4;
4278 + return 2;
4279 + }
4280 + } while (0);
4281 +
4282 + do { /* PaX: unpatched PLT emulation step 1 */
4283 + unsigned int sethi, ba, nop;
4284 +
4285 + err = get_user(sethi, (unsigned int *)regs->pc);
4286 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4287 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4288 +
4289 + if (err)
4290 + break;
4291 +
4292 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4293 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4294 + nop == 0x01000000U)
4295 + {
4296 + unsigned int addr, save, call;
4297 +
4298 + if ((ba & 0xFFC00000U) == 0x30800000U)
4299 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4300 + else
4301 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4302 +
4303 + err = get_user(save, (unsigned int *)addr);
4304 + err |= get_user(call, (unsigned int *)(addr+4));
4305 + err |= get_user(nop, (unsigned int *)(addr+8));
4306 + if (err)
4307 + break;
4308 +
4309 +#ifdef CONFIG_PAX_DLRESOLVE
4310 + if (save == 0x9DE3BFA8U &&
4311 + (call & 0xC0000000U) == 0x40000000U &&
4312 + nop == 0x01000000U)
4313 + {
4314 + struct vm_area_struct *vma;
4315 + unsigned long call_dl_resolve;
4316 +
4317 + down_read(&current->mm->mmap_sem);
4318 + call_dl_resolve = current->mm->call_dl_resolve;
4319 + up_read(&current->mm->mmap_sem);
4320 + if (likely(call_dl_resolve))
4321 + goto emulate;
4322 +
4323 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4324 +
4325 + down_write(&current->mm->mmap_sem);
4326 + if (current->mm->call_dl_resolve) {
4327 + call_dl_resolve = current->mm->call_dl_resolve;
4328 + up_write(&current->mm->mmap_sem);
4329 + if (vma)
4330 + kmem_cache_free(vm_area_cachep, vma);
4331 + goto emulate;
4332 + }
4333 +
4334 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4335 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4336 + up_write(&current->mm->mmap_sem);
4337 + if (vma)
4338 + kmem_cache_free(vm_area_cachep, vma);
4339 + return 1;
4340 + }
4341 +
4342 + if (pax_insert_vma(vma, call_dl_resolve)) {
4343 + up_write(&current->mm->mmap_sem);
4344 + kmem_cache_free(vm_area_cachep, vma);
4345 + return 1;
4346 + }
4347 +
4348 + current->mm->call_dl_resolve = call_dl_resolve;
4349 + up_write(&current->mm->mmap_sem);
4350 +
4351 +emulate:
4352 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4353 + regs->pc = call_dl_resolve;
4354 + regs->npc = addr+4;
4355 + return 3;
4356 + }
4357 +#endif
4358 +
4359 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4360 + if ((save & 0xFFC00000U) == 0x05000000U &&
4361 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4362 + nop == 0x01000000U)
4363 + {
4364 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4365 + regs->u_regs[UREG_G2] = addr + 4;
4366 + addr = (save & 0x003FFFFFU) << 10;
4367 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4368 + regs->pc = addr;
4369 + regs->npc = addr+4;
4370 + return 3;
4371 + }
4372 + }
4373 + } while (0);
4374 +
4375 + do { /* PaX: unpatched PLT emulation step 2 */
4376 + unsigned int save, call, nop;
4377 +
4378 + err = get_user(save, (unsigned int *)(regs->pc-4));
4379 + err |= get_user(call, (unsigned int *)regs->pc);
4380 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
4381 + if (err)
4382 + break;
4383 +
4384 + if (save == 0x9DE3BFA8U &&
4385 + (call & 0xC0000000U) == 0x40000000U &&
4386 + nop == 0x01000000U)
4387 + {
4388 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4389 +
4390 + regs->u_regs[UREG_RETPC] = regs->pc;
4391 + regs->pc = dl_resolve;
4392 + regs->npc = dl_resolve+4;
4393 + return 3;
4394 + }
4395 + } while (0);
4396 +#endif
4397 +
4398 + return 1;
4399 +}
4400 +
4401 +void pax_report_insns(void *pc, void *sp)
4402 +{
4403 + unsigned long i;
4404 +
4405 + printk(KERN_ERR "PAX: bytes at PC: ");
4406 + for (i = 0; i < 8; i++) {
4407 + unsigned int c;
4408 + if (get_user(c, (unsigned int *)pc+i))
4409 + printk(KERN_CONT "???????? ");
4410 + else
4411 + printk(KERN_CONT "%08x ", c);
4412 + }
4413 + printk("\n");
4414 +}
4415 +#endif
4416 +
4417 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4418 int text_fault)
4419 {
4420 @@ -281,6 +546,24 @@ good_area:
4421 if(!(vma->vm_flags & VM_WRITE))
4422 goto bad_area;
4423 } else {
4424 +
4425 +#ifdef CONFIG_PAX_PAGEEXEC
4426 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4427 + up_read(&mm->mmap_sem);
4428 + switch (pax_handle_fetch_fault(regs)) {
4429 +
4430 +#ifdef CONFIG_PAX_EMUPLT
4431 + case 2:
4432 + case 3:
4433 + return;
4434 +#endif
4435 +
4436 + }
4437 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4438 + do_group_exit(SIGKILL);
4439 + }
4440 +#endif
4441 +
4442 /* Allow reads even for write-only mappings */
4443 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4444 goto bad_area;
4445 diff -urNp linux-2.6.39.4/arch/sparc/mm/fault_64.c linux-2.6.39.4/arch/sparc/mm/fault_64.c
4446 --- linux-2.6.39.4/arch/sparc/mm/fault_64.c 2011-05-19 00:06:34.000000000 -0400
4447 +++ linux-2.6.39.4/arch/sparc/mm/fault_64.c 2011-08-05 19:44:33.000000000 -0400
4448 @@ -21,6 +21,9 @@
4449 #include <linux/kprobes.h>
4450 #include <linux/kdebug.h>
4451 #include <linux/percpu.h>
4452 +#include <linux/slab.h>
4453 +#include <linux/pagemap.h>
4454 +#include <linux/compiler.h>
4455
4456 #include <asm/page.h>
4457 #include <asm/pgtable.h>
4458 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4459 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4460 regs->tpc);
4461 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4462 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4463 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4464 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4465 dump_stack();
4466 unhandled_fault(regs->tpc, current, regs);
4467 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4468 show_regs(regs);
4469 }
4470
4471 +#ifdef CONFIG_PAX_PAGEEXEC
4472 +#ifdef CONFIG_PAX_DLRESOLVE
4473 +static void pax_emuplt_close(struct vm_area_struct *vma)
4474 +{
4475 + vma->vm_mm->call_dl_resolve = 0UL;
4476 +}
4477 +
4478 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4479 +{
4480 + unsigned int *kaddr;
4481 +
4482 + vmf->page = alloc_page(GFP_HIGHUSER);
4483 + if (!vmf->page)
4484 + return VM_FAULT_OOM;
4485 +
4486 + kaddr = kmap(vmf->page);
4487 + memset(kaddr, 0, PAGE_SIZE);
4488 + kaddr[0] = 0x9DE3BFA8U; /* save */
4489 + flush_dcache_page(vmf->page);
4490 + kunmap(vmf->page);
4491 + return VM_FAULT_MAJOR;
4492 +}
4493 +
4494 +static const struct vm_operations_struct pax_vm_ops = {
4495 + .close = pax_emuplt_close,
4496 + .fault = pax_emuplt_fault
4497 +};
4498 +
4499 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4500 +{
4501 + int ret;
4502 +
4503 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4504 + vma->vm_mm = current->mm;
4505 + vma->vm_start = addr;
4506 + vma->vm_end = addr + PAGE_SIZE;
4507 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4508 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4509 + vma->vm_ops = &pax_vm_ops;
4510 +
4511 + ret = insert_vm_struct(current->mm, vma);
4512 + if (ret)
4513 + return ret;
4514 +
4515 + ++current->mm->total_vm;
4516 + return 0;
4517 +}
4518 +#endif
4519 +
4520 +/*
4521 + * PaX: decide what to do with offenders (regs->tpc = fault address)
4522 + *
4523 + * returns 1 when task should be killed
4524 + * 2 when patched PLT trampoline was detected
4525 + * 3 when unpatched PLT trampoline was detected
4526 + */
4527 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4528 +{
4529 +
4530 +#ifdef CONFIG_PAX_EMUPLT
4531 + int err;
4532 +
4533 + do { /* PaX: patched PLT emulation #1 */
4534 + unsigned int sethi1, sethi2, jmpl;
4535 +
4536 + err = get_user(sethi1, (unsigned int *)regs->tpc);
4537 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4538 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4539 +
4540 + if (err)
4541 + break;
4542 +
4543 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4544 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4545 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4546 + {
4547 + unsigned long addr;
4548 +
4549 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4550 + addr = regs->u_regs[UREG_G1];
4551 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4552 +
4553 + if (test_thread_flag(TIF_32BIT))
4554 + addr &= 0xFFFFFFFFUL;
4555 +
4556 + regs->tpc = addr;
4557 + regs->tnpc = addr+4;
4558 + return 2;
4559 + }
4560 + } while (0);
4561 +
4562 + { /* PaX: patched PLT emulation #2 */
4563 + unsigned int ba;
4564 +
4565 + err = get_user(ba, (unsigned int *)regs->tpc);
4566 +
4567 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4568 + unsigned long addr;
4569 +
4570 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4571 +
4572 + if (test_thread_flag(TIF_32BIT))
4573 + addr &= 0xFFFFFFFFUL;
4574 +
4575 + regs->tpc = addr;
4576 + regs->tnpc = addr+4;
4577 + return 2;
4578 + }
4579 + }
4580 +
4581 + do { /* PaX: patched PLT emulation #3 */
4582 + unsigned int sethi, jmpl, nop;
4583 +
4584 + err = get_user(sethi, (unsigned int *)regs->tpc);
4585 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4586 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4587 +
4588 + if (err)
4589 + break;
4590 +
4591 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4592 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4593 + nop == 0x01000000U)
4594 + {
4595 + unsigned long addr;
4596 +
4597 + addr = (sethi & 0x003FFFFFU) << 10;
4598 + regs->u_regs[UREG_G1] = addr;
4599 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4600 +
4601 + if (test_thread_flag(TIF_32BIT))
4602 + addr &= 0xFFFFFFFFUL;
4603 +
4604 + regs->tpc = addr;
4605 + regs->tnpc = addr+4;
4606 + return 2;
4607 + }
4608 + } while (0);
4609 +
4610 + do { /* PaX: patched PLT emulation #4 */
4611 + unsigned int sethi, mov1, call, mov2;
4612 +
4613 + err = get_user(sethi, (unsigned int *)regs->tpc);
4614 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4615 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
4616 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4617 +
4618 + if (err)
4619 + break;
4620 +
4621 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4622 + mov1 == 0x8210000FU &&
4623 + (call & 0xC0000000U) == 0x40000000U &&
4624 + mov2 == 0x9E100001U)
4625 + {
4626 + unsigned long addr;
4627 +
4628 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4629 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4630 +
4631 + if (test_thread_flag(TIF_32BIT))
4632 + addr &= 0xFFFFFFFFUL;
4633 +
4634 + regs->tpc = addr;
4635 + regs->tnpc = addr+4;
4636 + return 2;
4637 + }
4638 + } while (0);
4639 +
4640 + do { /* PaX: patched PLT emulation #5 */
4641 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4642 +
4643 + err = get_user(sethi, (unsigned int *)regs->tpc);
4644 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4645 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4646 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4647 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4648 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4649 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4650 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4651 +
4652 + if (err)
4653 + break;
4654 +
4655 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4656 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4657 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4658 + (or1 & 0xFFFFE000U) == 0x82106000U &&
4659 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
4660 + sllx == 0x83287020U &&
4661 + jmpl == 0x81C04005U &&
4662 + nop == 0x01000000U)
4663 + {
4664 + unsigned long addr;
4665 +
4666 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4667 + regs->u_regs[UREG_G1] <<= 32;
4668 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4669 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4670 + regs->tpc = addr;
4671 + regs->tnpc = addr+4;
4672 + return 2;
4673 + }
4674 + } while (0);
4675 +
4676 + do { /* PaX: patched PLT emulation #6 */
4677 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4678 +
4679 + err = get_user(sethi, (unsigned int *)regs->tpc);
4680 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4681 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4682 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4683 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
4684 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4685 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4686 +
4687 + if (err)
4688 + break;
4689 +
4690 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4691 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4692 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4693 + sllx == 0x83287020U &&
4694 + (or & 0xFFFFE000U) == 0x8A116000U &&
4695 + jmpl == 0x81C04005U &&
4696 + nop == 0x01000000U)
4697 + {
4698 + unsigned long addr;
4699 +
4700 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4701 + regs->u_regs[UREG_G1] <<= 32;
4702 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4703 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4704 + regs->tpc = addr;
4705 + regs->tnpc = addr+4;
4706 + return 2;
4707 + }
4708 + } while (0);
4709 +
4710 + do { /* PaX: unpatched PLT emulation step 1 */
4711 + unsigned int sethi, ba, nop;
4712 +
4713 + err = get_user(sethi, (unsigned int *)regs->tpc);
4714 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4715 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4716 +
4717 + if (err)
4718 + break;
4719 +
4720 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4721 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4722 + nop == 0x01000000U)
4723 + {
4724 + unsigned long addr;
4725 + unsigned int save, call;
4726 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4727 +
4728 + if ((ba & 0xFFC00000U) == 0x30800000U)
4729 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4730 + else
4731 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4732 +
4733 + if (test_thread_flag(TIF_32BIT))
4734 + addr &= 0xFFFFFFFFUL;
4735 +
4736 + err = get_user(save, (unsigned int *)addr);
4737 + err |= get_user(call, (unsigned int *)(addr+4));
4738 + err |= get_user(nop, (unsigned int *)(addr+8));
4739 + if (err)
4740 + break;
4741 +
4742 +#ifdef CONFIG_PAX_DLRESOLVE
4743 + if (save == 0x9DE3BFA8U &&
4744 + (call & 0xC0000000U) == 0x40000000U &&
4745 + nop == 0x01000000U)
4746 + {
4747 + struct vm_area_struct *vma;
4748 + unsigned long call_dl_resolve;
4749 +
4750 + down_read(&current->mm->mmap_sem);
4751 + call_dl_resolve = current->mm->call_dl_resolve;
4752 + up_read(&current->mm->mmap_sem);
4753 + if (likely(call_dl_resolve))
4754 + goto emulate;
4755 +
4756 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4757 +
4758 + down_write(&current->mm->mmap_sem);
4759 + if (current->mm->call_dl_resolve) {
4760 + call_dl_resolve = current->mm->call_dl_resolve;
4761 + up_write(&current->mm->mmap_sem);
4762 + if (vma)
4763 + kmem_cache_free(vm_area_cachep, vma);
4764 + goto emulate;
4765 + }
4766 +
4767 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4768 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4769 + up_write(&current->mm->mmap_sem);
4770 + if (vma)
4771 + kmem_cache_free(vm_area_cachep, vma);
4772 + return 1;
4773 + }
4774 +
4775 + if (pax_insert_vma(vma, call_dl_resolve)) {
4776 + up_write(&current->mm->mmap_sem);
4777 + kmem_cache_free(vm_area_cachep, vma);
4778 + return 1;
4779 + }
4780 +
4781 + current->mm->call_dl_resolve = call_dl_resolve;
4782 + up_write(&current->mm->mmap_sem);
4783 +
4784 +emulate:
4785 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4786 + regs->tpc = call_dl_resolve;
4787 + regs->tnpc = addr+4;
4788 + return 3;
4789 + }
4790 +#endif
4791 +
4792 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4793 + if ((save & 0xFFC00000U) == 0x05000000U &&
4794 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4795 + nop == 0x01000000U)
4796 + {
4797 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4798 + regs->u_regs[UREG_G2] = addr + 4;
4799 + addr = (save & 0x003FFFFFU) << 10;
4800 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4801 +
4802 + if (test_thread_flag(TIF_32BIT))
4803 + addr &= 0xFFFFFFFFUL;
4804 +
4805 + regs->tpc = addr;
4806 + regs->tnpc = addr+4;
4807 + return 3;
4808 + }
4809 +
4810 + /* PaX: 64-bit PLT stub */
4811 + err = get_user(sethi1, (unsigned int *)addr);
4812 + err |= get_user(sethi2, (unsigned int *)(addr+4));
4813 + err |= get_user(or1, (unsigned int *)(addr+8));
4814 + err |= get_user(or2, (unsigned int *)(addr+12));
4815 + err |= get_user(sllx, (unsigned int *)(addr+16));
4816 + err |= get_user(add, (unsigned int *)(addr+20));
4817 + err |= get_user(jmpl, (unsigned int *)(addr+24));
4818 + err |= get_user(nop, (unsigned int *)(addr+28));
4819 + if (err)
4820 + break;
4821 +
4822 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4823 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4824 + (or1 & 0xFFFFE000U) == 0x88112000U &&
4825 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
4826 + sllx == 0x89293020U &&
4827 + add == 0x8A010005U &&
4828 + jmpl == 0x89C14000U &&
4829 + nop == 0x01000000U)
4830 + {
4831 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4832 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4833 + regs->u_regs[UREG_G4] <<= 32;
4834 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4835 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4836 + regs->u_regs[UREG_G4] = addr + 24;
4837 + addr = regs->u_regs[UREG_G5];
4838 + regs->tpc = addr;
4839 + regs->tnpc = addr+4;
4840 + return 3;
4841 + }
4842 + }
4843 + } while (0);
4844 +
4845 +#ifdef CONFIG_PAX_DLRESOLVE
4846 + do { /* PaX: unpatched PLT emulation step 2 */
4847 + unsigned int save, call, nop;
4848 +
4849 + err = get_user(save, (unsigned int *)(regs->tpc-4));
4850 + err |= get_user(call, (unsigned int *)regs->tpc);
4851 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4852 + if (err)
4853 + break;
4854 +
4855 + if (save == 0x9DE3BFA8U &&
4856 + (call & 0xC0000000U) == 0x40000000U &&
4857 + nop == 0x01000000U)
4858 + {
4859 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4860 +
4861 + if (test_thread_flag(TIF_32BIT))
4862 + dl_resolve &= 0xFFFFFFFFUL;
4863 +
4864 + regs->u_regs[UREG_RETPC] = regs->tpc;
4865 + regs->tpc = dl_resolve;
4866 + regs->tnpc = dl_resolve+4;
4867 + return 3;
4868 + }
4869 + } while (0);
4870 +#endif
4871 +
4872 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4873 + unsigned int sethi, ba, nop;
4874 +
4875 + err = get_user(sethi, (unsigned int *)regs->tpc);
4876 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4877 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4878 +
4879 + if (err)
4880 + break;
4881 +
4882 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4883 + (ba & 0xFFF00000U) == 0x30600000U &&
4884 + nop == 0x01000000U)
4885 + {
4886 + unsigned long addr;
4887 +
4888 + addr = (sethi & 0x003FFFFFU) << 10;
4889 + regs->u_regs[UREG_G1] = addr;
4890 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4891 +
4892 + if (test_thread_flag(TIF_32BIT))
4893 + addr &= 0xFFFFFFFFUL;
4894 +
4895 + regs->tpc = addr;
4896 + regs->tnpc = addr+4;
4897 + return 2;
4898 + }
4899 + } while (0);
4900 +
4901 +#endif
4902 +
4903 + return 1;
4904 +}
4905 +
4906 +void pax_report_insns(void *pc, void *sp)
4907 +{
4908 + unsigned long i;
4909 +
4910 + printk(KERN_ERR "PAX: bytes at PC: ");
4911 + for (i = 0; i < 8; i++) {
4912 + unsigned int c;
4913 + if (get_user(c, (unsigned int *)pc+i))
4914 + printk(KERN_CONT "???????? ");
4915 + else
4916 + printk(KERN_CONT "%08x ", c);
4917 + }
4918 + printk("\n");
4919 +}
4920 +#endif
4921 +
4922 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4923 {
4924 struct mm_struct *mm = current->mm;
4925 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4926 if (!vma)
4927 goto bad_area;
4928
4929 +#ifdef CONFIG_PAX_PAGEEXEC
4930 + /* PaX: detect ITLB misses on non-exec pages */
4931 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4932 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4933 + {
4934 + if (address != regs->tpc)
4935 + goto good_area;
4936 +
4937 + up_read(&mm->mmap_sem);
4938 + switch (pax_handle_fetch_fault(regs)) {
4939 +
4940 +#ifdef CONFIG_PAX_EMUPLT
4941 + case 2:
4942 + case 3:
4943 + return;
4944 +#endif
4945 +
4946 + }
4947 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4948 + do_group_exit(SIGKILL);
4949 + }
4950 +#endif
4951 +
4952 /* Pure DTLB misses do not tell us whether the fault causing
4953 * load/store/atomic was a write or not, it only says that there
4954 * was no match. So in such a case we (carefully) read the
4955 diff -urNp linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c
4956 --- linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c 2011-05-19 00:06:34.000000000 -0400
4957 +++ linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c 2011-08-05 19:44:33.000000000 -0400
4958 @@ -68,7 +68,7 @@ full_search:
4959 }
4960 return -ENOMEM;
4961 }
4962 - if (likely(!vma || addr + len <= vma->vm_start)) {
4963 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4964 /*
4965 * Remember the place where we stopped the search:
4966 */
4967 @@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4968 /* make sure it can fit in the remaining address space */
4969 if (likely(addr > len)) {
4970 vma = find_vma(mm, addr-len);
4971 - if (!vma || addr <= vma->vm_start) {
4972 + if (check_heap_stack_gap(vma, addr - len, len)) {
4973 /* remember the address as a hint for next time */
4974 return (mm->free_area_cache = addr-len);
4975 }
4976 @@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4977 if (unlikely(mm->mmap_base < len))
4978 goto bottomup;
4979
4980 - addr = (mm->mmap_base-len) & HPAGE_MASK;
4981 + addr = mm->mmap_base - len;
4982
4983 do {
4984 + addr &= HPAGE_MASK;
4985 /*
4986 * Lookup failure means no vma is above this address,
4987 * else if new region fits below vma->vm_start,
4988 * return with success:
4989 */
4990 vma = find_vma(mm, addr);
4991 - if (likely(!vma || addr+len <= vma->vm_start)) {
4992 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4993 /* remember the address as a hint for next time */
4994 return (mm->free_area_cache = addr);
4995 }
4996 @@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4997 mm->cached_hole_size = vma->vm_start - addr;
4998
4999 /* try just below the current vma->vm_start */
5000 - addr = (vma->vm_start-len) & HPAGE_MASK;
5001 - } while (likely(len < vma->vm_start));
5002 + addr = skip_heap_stack_gap(vma, len);
5003 + } while (!IS_ERR_VALUE(addr));
5004
5005 bottomup:
5006 /*
5007 @@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
5008 if (addr) {
5009 addr = ALIGN(addr, HPAGE_SIZE);
5010 vma = find_vma(mm, addr);
5011 - if (task_size - len >= addr &&
5012 - (!vma || addr + len <= vma->vm_start))
5013 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5014 return addr;
5015 }
5016 if (mm->get_unmapped_area == arch_get_unmapped_area)
5017 diff -urNp linux-2.6.39.4/arch/sparc/mm/init_32.c linux-2.6.39.4/arch/sparc/mm/init_32.c
5018 --- linux-2.6.39.4/arch/sparc/mm/init_32.c 2011-05-19 00:06:34.000000000 -0400
5019 +++ linux-2.6.39.4/arch/sparc/mm/init_32.c 2011-08-05 19:44:33.000000000 -0400
5020 @@ -318,6 +318,9 @@ extern void device_scan(void);
5021 pgprot_t PAGE_SHARED __read_mostly;
5022 EXPORT_SYMBOL(PAGE_SHARED);
5023
5024 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5025 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5026 +
5027 void __init paging_init(void)
5028 {
5029 switch(sparc_cpu_model) {
5030 @@ -346,17 +349,17 @@ void __init paging_init(void)
5031
5032 /* Initialize the protection map with non-constant, MMU dependent values. */
5033 protection_map[0] = PAGE_NONE;
5034 - protection_map[1] = PAGE_READONLY;
5035 - protection_map[2] = PAGE_COPY;
5036 - protection_map[3] = PAGE_COPY;
5037 + protection_map[1] = PAGE_READONLY_NOEXEC;
5038 + protection_map[2] = PAGE_COPY_NOEXEC;
5039 + protection_map[3] = PAGE_COPY_NOEXEC;
5040 protection_map[4] = PAGE_READONLY;
5041 protection_map[5] = PAGE_READONLY;
5042 protection_map[6] = PAGE_COPY;
5043 protection_map[7] = PAGE_COPY;
5044 protection_map[8] = PAGE_NONE;
5045 - protection_map[9] = PAGE_READONLY;
5046 - protection_map[10] = PAGE_SHARED;
5047 - protection_map[11] = PAGE_SHARED;
5048 + protection_map[9] = PAGE_READONLY_NOEXEC;
5049 + protection_map[10] = PAGE_SHARED_NOEXEC;
5050 + protection_map[11] = PAGE_SHARED_NOEXEC;
5051 protection_map[12] = PAGE_READONLY;
5052 protection_map[13] = PAGE_READONLY;
5053 protection_map[14] = PAGE_SHARED;
5054 diff -urNp linux-2.6.39.4/arch/sparc/mm/Makefile linux-2.6.39.4/arch/sparc/mm/Makefile
5055 --- linux-2.6.39.4/arch/sparc/mm/Makefile 2011-05-19 00:06:34.000000000 -0400
5056 +++ linux-2.6.39.4/arch/sparc/mm/Makefile 2011-08-05 19:44:33.000000000 -0400
5057 @@ -2,7 +2,7 @@
5058 #
5059
5060 asflags-y := -ansi
5061 -ccflags-y := -Werror
5062 +#ccflags-y := -Werror
5063
5064 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5065 obj-y += fault_$(BITS).o
5066 diff -urNp linux-2.6.39.4/arch/sparc/mm/srmmu.c linux-2.6.39.4/arch/sparc/mm/srmmu.c
5067 --- linux-2.6.39.4/arch/sparc/mm/srmmu.c 2011-05-19 00:06:34.000000000 -0400
5068 +++ linux-2.6.39.4/arch/sparc/mm/srmmu.c 2011-08-05 19:44:33.000000000 -0400
5069 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5070 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5071 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5072 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5073 +
5074 +#ifdef CONFIG_PAX_PAGEEXEC
5075 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5076 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5077 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5078 +#endif
5079 +
5080 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5081 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5082
5083 diff -urNp linux-2.6.39.4/arch/um/include/asm/kmap_types.h linux-2.6.39.4/arch/um/include/asm/kmap_types.h
5084 --- linux-2.6.39.4/arch/um/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
5085 +++ linux-2.6.39.4/arch/um/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
5086 @@ -23,6 +23,7 @@ enum km_type {
5087 KM_IRQ1,
5088 KM_SOFTIRQ0,
5089 KM_SOFTIRQ1,
5090 + KM_CLEARPAGE,
5091 KM_TYPE_NR
5092 };
5093
5094 diff -urNp linux-2.6.39.4/arch/um/include/asm/page.h linux-2.6.39.4/arch/um/include/asm/page.h
5095 --- linux-2.6.39.4/arch/um/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400
5096 +++ linux-2.6.39.4/arch/um/include/asm/page.h 2011-08-05 19:44:33.000000000 -0400
5097 @@ -14,6 +14,9 @@
5098 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5099 #define PAGE_MASK (~(PAGE_SIZE-1))
5100
5101 +#define ktla_ktva(addr) (addr)
5102 +#define ktva_ktla(addr) (addr)
5103 +
5104 #ifndef __ASSEMBLY__
5105
5106 struct page;
5107 diff -urNp linux-2.6.39.4/arch/um/kernel/process.c linux-2.6.39.4/arch/um/kernel/process.c
5108 --- linux-2.6.39.4/arch/um/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
5109 +++ linux-2.6.39.4/arch/um/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
5110 @@ -404,22 +404,6 @@ int singlestepping(void * t)
5111 return 2;
5112 }
5113
5114 -/*
5115 - * Only x86 and x86_64 have an arch_align_stack().
5116 - * All other arches have "#define arch_align_stack(x) (x)"
5117 - * in their asm/system.h
5118 - * As this is included in UML from asm-um/system-generic.h,
5119 - * we can use it to behave as the subarch does.
5120 - */
5121 -#ifndef arch_align_stack
5122 -unsigned long arch_align_stack(unsigned long sp)
5123 -{
5124 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5125 - sp -= get_random_int() % 8192;
5126 - return sp & ~0xf;
5127 -}
5128 -#endif
5129 -
5130 unsigned long get_wchan(struct task_struct *p)
5131 {
5132 unsigned long stack_page, sp, ip;
5133 diff -urNp linux-2.6.39.4/arch/um/sys-i386/syscalls.c linux-2.6.39.4/arch/um/sys-i386/syscalls.c
5134 --- linux-2.6.39.4/arch/um/sys-i386/syscalls.c 2011-05-19 00:06:34.000000000 -0400
5135 +++ linux-2.6.39.4/arch/um/sys-i386/syscalls.c 2011-08-05 19:44:33.000000000 -0400
5136 @@ -11,6 +11,21 @@
5137 #include "asm/uaccess.h"
5138 #include "asm/unistd.h"
5139
5140 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5141 +{
5142 + unsigned long pax_task_size = TASK_SIZE;
5143 +
5144 +#ifdef CONFIG_PAX_SEGMEXEC
5145 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5146 + pax_task_size = SEGMEXEC_TASK_SIZE;
5147 +#endif
5148 +
5149 + if (len > pax_task_size || addr > pax_task_size - len)
5150 + return -EINVAL;
5151 +
5152 + return 0;
5153 +}
5154 +
5155 /*
5156 * The prototype on i386 is:
5157 *
5158 diff -urNp linux-2.6.39.4/arch/x86/boot/bitops.h linux-2.6.39.4/arch/x86/boot/bitops.h
5159 --- linux-2.6.39.4/arch/x86/boot/bitops.h 2011-05-19 00:06:34.000000000 -0400
5160 +++ linux-2.6.39.4/arch/x86/boot/bitops.h 2011-08-05 19:44:33.000000000 -0400
5161 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5162 u8 v;
5163 const u32 *p = (const u32 *)addr;
5164
5165 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5166 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5167 return v;
5168 }
5169
5170 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5171
5172 static inline void set_bit(int nr, void *addr)
5173 {
5174 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5175 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5176 }
5177
5178 #endif /* BOOT_BITOPS_H */
5179 diff -urNp linux-2.6.39.4/arch/x86/boot/boot.h linux-2.6.39.4/arch/x86/boot/boot.h
5180 --- linux-2.6.39.4/arch/x86/boot/boot.h 2011-05-19 00:06:34.000000000 -0400
5181 +++ linux-2.6.39.4/arch/x86/boot/boot.h 2011-08-05 19:44:33.000000000 -0400
5182 @@ -85,7 +85,7 @@ static inline void io_delay(void)
5183 static inline u16 ds(void)
5184 {
5185 u16 seg;
5186 - asm("movw %%ds,%0" : "=rm" (seg));
5187 + asm volatile("movw %%ds,%0" : "=rm" (seg));
5188 return seg;
5189 }
5190
5191 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5192 static inline int memcmp(const void *s1, const void *s2, size_t len)
5193 {
5194 u8 diff;
5195 - asm("repe; cmpsb; setnz %0"
5196 + asm volatile("repe; cmpsb; setnz %0"
5197 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5198 return diff;
5199 }
5200 diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/head_32.S linux-2.6.39.4/arch/x86/boot/compressed/head_32.S
5201 --- linux-2.6.39.4/arch/x86/boot/compressed/head_32.S 2011-05-19 00:06:34.000000000 -0400
5202 +++ linux-2.6.39.4/arch/x86/boot/compressed/head_32.S 2011-08-05 19:44:33.000000000 -0400
5203 @@ -76,7 +76,7 @@ ENTRY(startup_32)
5204 notl %eax
5205 andl %eax, %ebx
5206 #else
5207 - movl $LOAD_PHYSICAL_ADDR, %ebx
5208 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5209 #endif
5210
5211 /* Target address to relocate to for decompression */
5212 @@ -162,7 +162,7 @@ relocated:
5213 * and where it was actually loaded.
5214 */
5215 movl %ebp, %ebx
5216 - subl $LOAD_PHYSICAL_ADDR, %ebx
5217 + subl $____LOAD_PHYSICAL_ADDR, %ebx
5218 jz 2f /* Nothing to be done if loaded at compiled addr. */
5219 /*
5220 * Process relocations.
5221 @@ -170,8 +170,7 @@ relocated:
5222
5223 1: subl $4, %edi
5224 movl (%edi), %ecx
5225 - testl %ecx, %ecx
5226 - jz 2f
5227 + jecxz 2f
5228 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5229 jmp 1b
5230 2:
5231 diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/head_64.S linux-2.6.39.4/arch/x86/boot/compressed/head_64.S
5232 --- linux-2.6.39.4/arch/x86/boot/compressed/head_64.S 2011-05-19 00:06:34.000000000 -0400
5233 +++ linux-2.6.39.4/arch/x86/boot/compressed/head_64.S 2011-08-05 19:44:33.000000000 -0400
5234 @@ -91,7 +91,7 @@ ENTRY(startup_32)
5235 notl %eax
5236 andl %eax, %ebx
5237 #else
5238 - movl $LOAD_PHYSICAL_ADDR, %ebx
5239 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5240 #endif
5241
5242 /* Target address to relocate to for decompression */
5243 @@ -233,7 +233,7 @@ ENTRY(startup_64)
5244 notq %rax
5245 andq %rax, %rbp
5246 #else
5247 - movq $LOAD_PHYSICAL_ADDR, %rbp
5248 + movq $____LOAD_PHYSICAL_ADDR, %rbp
5249 #endif
5250
5251 /* Target address to relocate to for decompression */
5252 diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/Makefile linux-2.6.39.4/arch/x86/boot/compressed/Makefile
5253 --- linux-2.6.39.4/arch/x86/boot/compressed/Makefile 2011-05-19 00:06:34.000000000 -0400
5254 +++ linux-2.6.39.4/arch/x86/boot/compressed/Makefile 2011-08-05 20:34:06.000000000 -0400
5255 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5256 KBUILD_CFLAGS += $(cflags-y)
5257 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5258 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5259 +ifdef CONSTIFY_PLUGIN
5260 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5261 +endif
5262
5263 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5264 GCOV_PROFILE := n
5265 diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/misc.c linux-2.6.39.4/arch/x86/boot/compressed/misc.c
5266 --- linux-2.6.39.4/arch/x86/boot/compressed/misc.c 2011-05-19 00:06:34.000000000 -0400
5267 +++ linux-2.6.39.4/arch/x86/boot/compressed/misc.c 2011-08-05 19:44:33.000000000 -0400
5268 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
5269 case PT_LOAD:
5270 #ifdef CONFIG_RELOCATABLE
5271 dest = output;
5272 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5273 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5274 #else
5275 dest = (void *)(phdr->p_paddr);
5276 #endif
5277 @@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5278 error("Destination address too large");
5279 #endif
5280 #ifndef CONFIG_RELOCATABLE
5281 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5282 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5283 error("Wrong destination address");
5284 #endif
5285
5286 diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/relocs.c linux-2.6.39.4/arch/x86/boot/compressed/relocs.c
5287 --- linux-2.6.39.4/arch/x86/boot/compressed/relocs.c 2011-05-19 00:06:34.000000000 -0400
5288 +++ linux-2.6.39.4/arch/x86/boot/compressed/relocs.c 2011-08-05 19:44:33.000000000 -0400
5289 @@ -13,8 +13,11 @@
5290
5291 static void die(char *fmt, ...);
5292
5293 +#include "../../../../include/generated/autoconf.h"
5294 +
5295 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5296 static Elf32_Ehdr ehdr;
5297 +static Elf32_Phdr *phdr;
5298 static unsigned long reloc_count, reloc_idx;
5299 static unsigned long *relocs;
5300
5301 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5302 }
5303 }
5304
5305 +static void read_phdrs(FILE *fp)
5306 +{
5307 + unsigned int i;
5308 +
5309 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5310 + if (!phdr) {
5311 + die("Unable to allocate %d program headers\n",
5312 + ehdr.e_phnum);
5313 + }
5314 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5315 + die("Seek to %d failed: %s\n",
5316 + ehdr.e_phoff, strerror(errno));
5317 + }
5318 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5319 + die("Cannot read ELF program headers: %s\n",
5320 + strerror(errno));
5321 + }
5322 + for(i = 0; i < ehdr.e_phnum; i++) {
5323 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5324 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5325 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5326 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5327 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5328 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5329 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5330 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5331 + }
5332 +
5333 +}
5334 +
5335 static void read_shdrs(FILE *fp)
5336 {
5337 - int i;
5338 + unsigned int i;
5339 Elf32_Shdr shdr;
5340
5341 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5342 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5343
5344 static void read_strtabs(FILE *fp)
5345 {
5346 - int i;
5347 + unsigned int i;
5348 for (i = 0; i < ehdr.e_shnum; i++) {
5349 struct section *sec = &secs[i];
5350 if (sec->shdr.sh_type != SHT_STRTAB) {
5351 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5352
5353 static void read_symtabs(FILE *fp)
5354 {
5355 - int i,j;
5356 + unsigned int i,j;
5357 for (i = 0; i < ehdr.e_shnum; i++) {
5358 struct section *sec = &secs[i];
5359 if (sec->shdr.sh_type != SHT_SYMTAB) {
5360 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5361
5362 static void read_relocs(FILE *fp)
5363 {
5364 - int i,j;
5365 + unsigned int i,j;
5366 + uint32_t base;
5367 +
5368 for (i = 0; i < ehdr.e_shnum; i++) {
5369 struct section *sec = &secs[i];
5370 if (sec->shdr.sh_type != SHT_REL) {
5371 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5372 die("Cannot read symbol table: %s\n",
5373 strerror(errno));
5374 }
5375 + base = 0;
5376 + for (j = 0; j < ehdr.e_phnum; j++) {
5377 + if (phdr[j].p_type != PT_LOAD )
5378 + continue;
5379 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5380 + continue;
5381 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5382 + break;
5383 + }
5384 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5385 Elf32_Rel *rel = &sec->reltab[j];
5386 - rel->r_offset = elf32_to_cpu(rel->r_offset);
5387 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5388 rel->r_info = elf32_to_cpu(rel->r_info);
5389 }
5390 }
5391 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5392
5393 static void print_absolute_symbols(void)
5394 {
5395 - int i;
5396 + unsigned int i;
5397 printf("Absolute symbols\n");
5398 printf(" Num: Value Size Type Bind Visibility Name\n");
5399 for (i = 0; i < ehdr.e_shnum; i++) {
5400 struct section *sec = &secs[i];
5401 char *sym_strtab;
5402 Elf32_Sym *sh_symtab;
5403 - int j;
5404 + unsigned int j;
5405
5406 if (sec->shdr.sh_type != SHT_SYMTAB) {
5407 continue;
5408 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5409
5410 static void print_absolute_relocs(void)
5411 {
5412 - int i, printed = 0;
5413 + unsigned int i, printed = 0;
5414
5415 for (i = 0; i < ehdr.e_shnum; i++) {
5416 struct section *sec = &secs[i];
5417 struct section *sec_applies, *sec_symtab;
5418 char *sym_strtab;
5419 Elf32_Sym *sh_symtab;
5420 - int j;
5421 + unsigned int j;
5422 if (sec->shdr.sh_type != SHT_REL) {
5423 continue;
5424 }
5425 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5426
5427 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5428 {
5429 - int i;
5430 + unsigned int i;
5431 /* Walk through the relocations */
5432 for (i = 0; i < ehdr.e_shnum; i++) {
5433 char *sym_strtab;
5434 Elf32_Sym *sh_symtab;
5435 struct section *sec_applies, *sec_symtab;
5436 - int j;
5437 + unsigned int j;
5438 struct section *sec = &secs[i];
5439
5440 if (sec->shdr.sh_type != SHT_REL) {
5441 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5442 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5443 continue;
5444 }
5445 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5446 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5447 + continue;
5448 +
5449 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5450 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5451 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5452 + continue;
5453 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5454 + continue;
5455 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5456 + continue;
5457 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5458 + continue;
5459 +#endif
5460 +
5461 switch (r_type) {
5462 case R_386_NONE:
5463 case R_386_PC32:
5464 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5465
5466 static void emit_relocs(int as_text)
5467 {
5468 - int i;
5469 + unsigned int i;
5470 /* Count how many relocations I have and allocate space for them. */
5471 reloc_count = 0;
5472 walk_relocs(count_reloc);
5473 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
5474 fname, strerror(errno));
5475 }
5476 read_ehdr(fp);
5477 + read_phdrs(fp);
5478 read_shdrs(fp);
5479 read_strtabs(fp);
5480 read_symtabs(fp);
5481 diff -urNp linux-2.6.39.4/arch/x86/boot/cpucheck.c linux-2.6.39.4/arch/x86/boot/cpucheck.c
5482 --- linux-2.6.39.4/arch/x86/boot/cpucheck.c 2011-05-19 00:06:34.000000000 -0400
5483 +++ linux-2.6.39.4/arch/x86/boot/cpucheck.c 2011-08-05 19:44:33.000000000 -0400
5484 @@ -74,7 +74,7 @@ static int has_fpu(void)
5485 u16 fcw = -1, fsw = -1;
5486 u32 cr0;
5487
5488 - asm("movl %%cr0,%0" : "=r" (cr0));
5489 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
5490 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5491 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5492 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5493 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5494 {
5495 u32 f0, f1;
5496
5497 - asm("pushfl ; "
5498 + asm volatile("pushfl ; "
5499 "pushfl ; "
5500 "popl %0 ; "
5501 "movl %0,%1 ; "
5502 @@ -115,7 +115,7 @@ static void get_flags(void)
5503 set_bit(X86_FEATURE_FPU, cpu.flags);
5504
5505 if (has_eflag(X86_EFLAGS_ID)) {
5506 - asm("cpuid"
5507 + asm volatile("cpuid"
5508 : "=a" (max_intel_level),
5509 "=b" (cpu_vendor[0]),
5510 "=d" (cpu_vendor[1]),
5511 @@ -124,7 +124,7 @@ static void get_flags(void)
5512
5513 if (max_intel_level >= 0x00000001 &&
5514 max_intel_level <= 0x0000ffff) {
5515 - asm("cpuid"
5516 + asm volatile("cpuid"
5517 : "=a" (tfms),
5518 "=c" (cpu.flags[4]),
5519 "=d" (cpu.flags[0])
5520 @@ -136,7 +136,7 @@ static void get_flags(void)
5521 cpu.model += ((tfms >> 16) & 0xf) << 4;
5522 }
5523
5524 - asm("cpuid"
5525 + asm volatile("cpuid"
5526 : "=a" (max_amd_level)
5527 : "a" (0x80000000)
5528 : "ebx", "ecx", "edx");
5529 @@ -144,7 +144,7 @@ static void get_flags(void)
5530 if (max_amd_level >= 0x80000001 &&
5531 max_amd_level <= 0x8000ffff) {
5532 u32 eax = 0x80000001;
5533 - asm("cpuid"
5534 + asm volatile("cpuid"
5535 : "+a" (eax),
5536 "=c" (cpu.flags[6]),
5537 "=d" (cpu.flags[1])
5538 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5539 u32 ecx = MSR_K7_HWCR;
5540 u32 eax, edx;
5541
5542 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5543 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5544 eax &= ~(1 << 15);
5545 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5546 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5547
5548 get_flags(); /* Make sure it really did something */
5549 err = check_flags();
5550 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5551 u32 ecx = MSR_VIA_FCR;
5552 u32 eax, edx;
5553
5554 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5555 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5556 eax |= (1<<1)|(1<<7);
5557 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5558 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5559
5560 set_bit(X86_FEATURE_CX8, cpu.flags);
5561 err = check_flags();
5562 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5563 u32 eax, edx;
5564 u32 level = 1;
5565
5566 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5567 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5568 - asm("cpuid"
5569 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5570 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5571 + asm volatile("cpuid"
5572 : "+a" (level), "=d" (cpu.flags[0])
5573 : : "ecx", "ebx");
5574 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5575 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5576
5577 err = check_flags();
5578 }
5579 diff -urNp linux-2.6.39.4/arch/x86/boot/header.S linux-2.6.39.4/arch/x86/boot/header.S
5580 --- linux-2.6.39.4/arch/x86/boot/header.S 2011-05-19 00:06:34.000000000 -0400
5581 +++ linux-2.6.39.4/arch/x86/boot/header.S 2011-08-05 19:44:33.000000000 -0400
5582 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5583 # single linked list of
5584 # struct setup_data
5585
5586 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5587 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5588
5589 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5590 #define VO_INIT_SIZE (VO__end - VO__text)
5591 diff -urNp linux-2.6.39.4/arch/x86/boot/Makefile linux-2.6.39.4/arch/x86/boot/Makefile
5592 --- linux-2.6.39.4/arch/x86/boot/Makefile 2011-05-19 00:06:34.000000000 -0400
5593 +++ linux-2.6.39.4/arch/x86/boot/Makefile 2011-08-05 20:34:06.000000000 -0400
5594 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5595 $(call cc-option, -fno-stack-protector) \
5596 $(call cc-option, -mpreferred-stack-boundary=2)
5597 KBUILD_CFLAGS += $(call cc-option, -m32)
5598 +ifdef CONSTIFY_PLUGIN
5599 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5600 +endif
5601 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5602 GCOV_PROFILE := n
5603
5604 diff -urNp linux-2.6.39.4/arch/x86/boot/memory.c linux-2.6.39.4/arch/x86/boot/memory.c
5605 --- linux-2.6.39.4/arch/x86/boot/memory.c 2011-05-19 00:06:34.000000000 -0400
5606 +++ linux-2.6.39.4/arch/x86/boot/memory.c 2011-08-05 19:44:33.000000000 -0400
5607 @@ -19,7 +19,7 @@
5608
5609 static int detect_memory_e820(void)
5610 {
5611 - int count = 0;
5612 + unsigned int count = 0;
5613 struct biosregs ireg, oreg;
5614 struct e820entry *desc = boot_params.e820_map;
5615 static struct e820entry buf; /* static so it is zeroed */
5616 diff -urNp linux-2.6.39.4/arch/x86/boot/video.c linux-2.6.39.4/arch/x86/boot/video.c
5617 --- linux-2.6.39.4/arch/x86/boot/video.c 2011-05-19 00:06:34.000000000 -0400
5618 +++ linux-2.6.39.4/arch/x86/boot/video.c 2011-08-05 19:44:33.000000000 -0400
5619 @@ -96,7 +96,7 @@ static void store_mode_params(void)
5620 static unsigned int get_entry(void)
5621 {
5622 char entry_buf[4];
5623 - int i, len = 0;
5624 + unsigned int i, len = 0;
5625 int key;
5626 unsigned int v;
5627
5628 diff -urNp linux-2.6.39.4/arch/x86/boot/video-vesa.c linux-2.6.39.4/arch/x86/boot/video-vesa.c
5629 --- linux-2.6.39.4/arch/x86/boot/video-vesa.c 2011-05-19 00:06:34.000000000 -0400
5630 +++ linux-2.6.39.4/arch/x86/boot/video-vesa.c 2011-08-05 19:44:33.000000000 -0400
5631 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5632
5633 boot_params.screen_info.vesapm_seg = oreg.es;
5634 boot_params.screen_info.vesapm_off = oreg.di;
5635 + boot_params.screen_info.vesapm_size = oreg.cx;
5636 }
5637
5638 /*
5639 diff -urNp linux-2.6.39.4/arch/x86/ia32/ia32_aout.c linux-2.6.39.4/arch/x86/ia32/ia32_aout.c
5640 --- linux-2.6.39.4/arch/x86/ia32/ia32_aout.c 2011-05-19 00:06:34.000000000 -0400
5641 +++ linux-2.6.39.4/arch/x86/ia32/ia32_aout.c 2011-08-05 19:44:33.000000000 -0400
5642 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5643 unsigned long dump_start, dump_size;
5644 struct user32 dump;
5645
5646 + memset(&dump, 0, sizeof(dump));
5647 +
5648 fs = get_fs();
5649 set_fs(KERNEL_DS);
5650 has_dumped = 1;
5651 diff -urNp linux-2.6.39.4/arch/x86/ia32/ia32entry.S linux-2.6.39.4/arch/x86/ia32/ia32entry.S
5652 --- linux-2.6.39.4/arch/x86/ia32/ia32entry.S 2011-05-19 00:06:34.000000000 -0400
5653 +++ linux-2.6.39.4/arch/x86/ia32/ia32entry.S 2011-08-05 19:44:33.000000000 -0400
5654 @@ -13,6 +13,7 @@
5655 #include <asm/thread_info.h>
5656 #include <asm/segment.h>
5657 #include <asm/irqflags.h>
5658 +#include <asm/pgtable.h>
5659 #include <linux/linkage.h>
5660
5661 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5662 @@ -95,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
5663 ENDPROC(native_irq_enable_sysexit)
5664 #endif
5665
5666 + .macro pax_enter_kernel_user
5667 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5668 + call pax_enter_kernel_user
5669 +#endif
5670 + .endm
5671 +
5672 + .macro pax_exit_kernel_user
5673 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5674 + call pax_exit_kernel_user
5675 +#endif
5676 +#ifdef CONFIG_PAX_RANDKSTACK
5677 + pushq %rax
5678 + call pax_randomize_kstack
5679 + popq %rax
5680 +#endif
5681 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5682 + call pax_erase_kstack
5683 +#endif
5684 + .endm
5685 +
5686 + .macro pax_erase_kstack
5687 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5688 + call pax_erase_kstack
5689 +#endif
5690 + .endm
5691 +
5692 /*
5693 * 32bit SYSENTER instruction entry.
5694 *
5695 @@ -121,7 +148,7 @@ ENTRY(ia32_sysenter_target)
5696 CFI_REGISTER rsp,rbp
5697 SWAPGS_UNSAFE_STACK
5698 movq PER_CPU_VAR(kernel_stack), %rsp
5699 - addq $(KERNEL_STACK_OFFSET),%rsp
5700 + pax_enter_kernel_user
5701 /*
5702 * No need to follow this irqs on/off section: the syscall
5703 * disabled irqs, here we enable it straight after entry:
5704 @@ -134,7 +161,8 @@ ENTRY(ia32_sysenter_target)
5705 CFI_REL_OFFSET rsp,0
5706 pushfq_cfi
5707 /*CFI_REL_OFFSET rflags,0*/
5708 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5709 + GET_THREAD_INFO(%r10)
5710 + movl TI_sysenter_return(%r10), %r10d
5711 CFI_REGISTER rip,r10
5712 pushq_cfi $__USER32_CS
5713 /*CFI_REL_OFFSET cs,0*/
5714 @@ -146,6 +174,12 @@ ENTRY(ia32_sysenter_target)
5715 SAVE_ARGS 0,0,1
5716 /* no need to do an access_ok check here because rbp has been
5717 32bit zero extended */
5718 +
5719 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5720 + mov $PAX_USER_SHADOW_BASE,%r10
5721 + add %r10,%rbp
5722 +#endif
5723 +
5724 1: movl (%rbp),%ebp
5725 .section __ex_table,"a"
5726 .quad 1b,ia32_badarg
5727 @@ -168,6 +202,7 @@ sysenter_dispatch:
5728 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5729 jnz sysexit_audit
5730 sysexit_from_sys_call:
5731 + pax_exit_kernel_user
5732 andl $~TS_COMPAT,TI_status(%r10)
5733 /* clear IF, that popfq doesn't enable interrupts early */
5734 andl $~0x200,EFLAGS-R11(%rsp)
5735 @@ -194,6 +229,9 @@ sysexit_from_sys_call:
5736 movl %eax,%esi /* 2nd arg: syscall number */
5737 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5738 call audit_syscall_entry
5739 +
5740 + pax_erase_kstack
5741 +
5742 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5743 cmpq $(IA32_NR_syscalls-1),%rax
5744 ja ia32_badsys
5745 @@ -246,6 +284,9 @@ sysenter_tracesys:
5746 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5747 movq %rsp,%rdi /* &pt_regs -> arg1 */
5748 call syscall_trace_enter
5749 +
5750 + pax_erase_kstack
5751 +
5752 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5753 RESTORE_REST
5754 cmpq $(IA32_NR_syscalls-1),%rax
5755 @@ -277,19 +318,24 @@ ENDPROC(ia32_sysenter_target)
5756 ENTRY(ia32_cstar_target)
5757 CFI_STARTPROC32 simple
5758 CFI_SIGNAL_FRAME
5759 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5760 + CFI_DEF_CFA rsp,0
5761 CFI_REGISTER rip,rcx
5762 /*CFI_REGISTER rflags,r11*/
5763 SWAPGS_UNSAFE_STACK
5764 movl %esp,%r8d
5765 CFI_REGISTER rsp,r8
5766 movq PER_CPU_VAR(kernel_stack),%rsp
5767 +
5768 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5769 + pax_enter_kernel_user
5770 +#endif
5771 +
5772 /*
5773 * No need to follow this irqs on/off section: the syscall
5774 * disabled irqs and here we enable it straight after entry:
5775 */
5776 ENABLE_INTERRUPTS(CLBR_NONE)
5777 - SAVE_ARGS 8,1,1
5778 + SAVE_ARGS 8*6,1,1
5779 movl %eax,%eax /* zero extension */
5780 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5781 movq %rcx,RIP-ARGOFFSET(%rsp)
5782 @@ -305,6 +351,12 @@ ENTRY(ia32_cstar_target)
5783 /* no need to do an access_ok check here because r8 has been
5784 32bit zero extended */
5785 /* hardware stack frame is complete now */
5786 +
5787 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5788 + mov $PAX_USER_SHADOW_BASE,%r10
5789 + add %r10,%r8
5790 +#endif
5791 +
5792 1: movl (%r8),%r9d
5793 .section __ex_table,"a"
5794 .quad 1b,ia32_badarg
5795 @@ -327,6 +379,7 @@ cstar_dispatch:
5796 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5797 jnz sysretl_audit
5798 sysretl_from_sys_call:
5799 + pax_exit_kernel_user
5800 andl $~TS_COMPAT,TI_status(%r10)
5801 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
5802 movl RIP-ARGOFFSET(%rsp),%ecx
5803 @@ -364,6 +417,9 @@ cstar_tracesys:
5804 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5805 movq %rsp,%rdi /* &pt_regs -> arg1 */
5806 call syscall_trace_enter
5807 +
5808 + pax_erase_kstack
5809 +
5810 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5811 RESTORE_REST
5812 xchgl %ebp,%r9d
5813 @@ -409,6 +465,7 @@ ENTRY(ia32_syscall)
5814 CFI_REL_OFFSET rip,RIP-RIP
5815 PARAVIRT_ADJUST_EXCEPTION_FRAME
5816 SWAPGS
5817 + pax_enter_kernel_user
5818 /*
5819 * No need to follow this irqs on/off section: the syscall
5820 * disabled irqs and here we enable it straight after entry:
5821 @@ -441,6 +498,9 @@ ia32_tracesys:
5822 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5823 movq %rsp,%rdi /* &pt_regs -> arg1 */
5824 call syscall_trace_enter
5825 +
5826 + pax_erase_kstack
5827 +
5828 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5829 RESTORE_REST
5830 cmpq $(IA32_NR_syscalls-1),%rax
5831 diff -urNp linux-2.6.39.4/arch/x86/ia32/ia32_signal.c linux-2.6.39.4/arch/x86/ia32/ia32_signal.c
5832 --- linux-2.6.39.4/arch/x86/ia32/ia32_signal.c 2011-05-19 00:06:34.000000000 -0400
5833 +++ linux-2.6.39.4/arch/x86/ia32/ia32_signal.c 2011-08-05 19:44:33.000000000 -0400
5834 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
5835 sp -= frame_size;
5836 /* Align the stack pointer according to the i386 ABI,
5837 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5838 - sp = ((sp + 4) & -16ul) - 4;
5839 + sp = ((sp - 12) & -16ul) - 4;
5840 return (void __user *) sp;
5841 }
5842
5843 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
5844 * These are actually not used anymore, but left because some
5845 * gdb versions depend on them as a marker.
5846 */
5847 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5848 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5849 } put_user_catch(err);
5850
5851 if (err)
5852 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
5853 0xb8,
5854 __NR_ia32_rt_sigreturn,
5855 0x80cd,
5856 - 0,
5857 + 0
5858 };
5859
5860 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5861 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
5862
5863 if (ka->sa.sa_flags & SA_RESTORER)
5864 restorer = ka->sa.sa_restorer;
5865 + else if (current->mm->context.vdso)
5866 + /* Return stub is in 32bit vsyscall page */
5867 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5868 else
5869 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5870 - rt_sigreturn);
5871 + restorer = &frame->retcode;
5872 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5873
5874 /*
5875 * Not actually used anymore, but left because some gdb
5876 * versions need it.
5877 */
5878 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5879 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5880 } put_user_catch(err);
5881
5882 if (err)
5883 diff -urNp linux-2.6.39.4/arch/x86/include/asm/alternative.h linux-2.6.39.4/arch/x86/include/asm/alternative.h
5884 --- linux-2.6.39.4/arch/x86/include/asm/alternative.h 2011-05-19 00:06:34.000000000 -0400
5885 +++ linux-2.6.39.4/arch/x86/include/asm/alternative.h 2011-08-05 19:44:33.000000000 -0400
5886 @@ -94,7 +94,7 @@ static inline int alternatives_text_rese
5887 ".section .discard,\"aw\",@progbits\n" \
5888 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
5889 ".previous\n" \
5890 - ".section .altinstr_replacement, \"ax\"\n" \
5891 + ".section .altinstr_replacement, \"a\"\n" \
5892 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
5893 ".previous"
5894
5895 diff -urNp linux-2.6.39.4/arch/x86/include/asm/apic.h linux-2.6.39.4/arch/x86/include/asm/apic.h
5896 --- linux-2.6.39.4/arch/x86/include/asm/apic.h 2011-05-19 00:06:34.000000000 -0400
5897 +++ linux-2.6.39.4/arch/x86/include/asm/apic.h 2011-08-17 20:01:35.000000000 -0400
5898 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
5899
5900 #ifdef CONFIG_X86_LOCAL_APIC
5901
5902 -extern unsigned int apic_verbosity;
5903 +extern int apic_verbosity;
5904 extern int local_apic_timer_c2_ok;
5905
5906 extern int disable_apic;
5907 diff -urNp linux-2.6.39.4/arch/x86/include/asm/apm.h linux-2.6.39.4/arch/x86/include/asm/apm.h
5908 --- linux-2.6.39.4/arch/x86/include/asm/apm.h 2011-05-19 00:06:34.000000000 -0400
5909 +++ linux-2.6.39.4/arch/x86/include/asm/apm.h 2011-08-05 19:44:33.000000000 -0400
5910 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
5911 __asm__ __volatile__(APM_DO_ZERO_SEGS
5912 "pushl %%edi\n\t"
5913 "pushl %%ebp\n\t"
5914 - "lcall *%%cs:apm_bios_entry\n\t"
5915 + "lcall *%%ss:apm_bios_entry\n\t"
5916 "setc %%al\n\t"
5917 "popl %%ebp\n\t"
5918 "popl %%edi\n\t"
5919 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
5920 __asm__ __volatile__(APM_DO_ZERO_SEGS
5921 "pushl %%edi\n\t"
5922 "pushl %%ebp\n\t"
5923 - "lcall *%%cs:apm_bios_entry\n\t"
5924 + "lcall *%%ss:apm_bios_entry\n\t"
5925 "setc %%bl\n\t"
5926 "popl %%ebp\n\t"
5927 "popl %%edi\n\t"
5928 diff -urNp linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h
5929 --- linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h 2011-05-19 00:06:34.000000000 -0400
5930 +++ linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h 2011-08-05 19:44:33.000000000 -0400
5931 @@ -12,6 +12,14 @@ typedef struct {
5932 u64 __aligned(8) counter;
5933 } atomic64_t;
5934
5935 +#ifdef CONFIG_PAX_REFCOUNT
5936 +typedef struct {
5937 + u64 __aligned(8) counter;
5938 +} atomic64_unchecked_t;
5939 +#else
5940 +typedef atomic64_t atomic64_unchecked_t;
5941 +#endif
5942 +
5943 #define ATOMIC64_INIT(val) { (val) }
5944
5945 #ifdef CONFIG_X86_CMPXCHG64
5946 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
5947 }
5948
5949 /**
5950 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
5951 + * @p: pointer to type atomic64_unchecked_t
5952 + * @o: expected value
5953 + * @n: new value
5954 + *
5955 + * Atomically sets @v to @n if it was equal to @o and returns
5956 + * the old value.
5957 + */
5958 +
5959 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
5960 +{
5961 + return cmpxchg64(&v->counter, o, n);
5962 +}
5963 +
5964 +/**
5965 * atomic64_xchg - xchg atomic64 variable
5966 * @v: pointer to type atomic64_t
5967 * @n: value to assign
5968 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
5969 }
5970
5971 /**
5972 + * atomic64_set_unchecked - set atomic64 variable
5973 + * @v: pointer to type atomic64_unchecked_t
5974 + * @n: value to assign
5975 + *
5976 + * Atomically sets the value of @v to @n.
5977 + */
5978 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
5979 +{
5980 + unsigned high = (unsigned)(i >> 32);
5981 + unsigned low = (unsigned)i;
5982 + asm volatile(ATOMIC64_ALTERNATIVE(set)
5983 + : "+b" (low), "+c" (high)
5984 + : "S" (v)
5985 + : "eax", "edx", "memory"
5986 + );
5987 +}
5988 +
5989 +/**
5990 * atomic64_read - read atomic64 variable
5991 * @v: pointer to type atomic64_t
5992 *
5993 @@ -93,6 +134,22 @@ static inline long long atomic64_read(at
5994 }
5995
5996 /**
5997 + * atomic64_read_unchecked - read atomic64 variable
5998 + * @v: pointer to type atomic64_unchecked_t
5999 + *
6000 + * Atomically reads the value of @v and returns it.
6001 + */
6002 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
6003 +{
6004 + long long r;
6005 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
6006 + : "=A" (r), "+c" (v)
6007 + : : "memory"
6008 + );
6009 + return r;
6010 + }
6011 +
6012 +/**
6013 * atomic64_add_return - add and return
6014 * @i: integer value to add
6015 * @v: pointer to type atomic64_t
6016 @@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
6017 return i;
6018 }
6019
6020 +/**
6021 + * atomic64_add_return_unchecked - add and return
6022 + * @i: integer value to add
6023 + * @v: pointer to type atomic64_unchecked_t
6024 + *
6025 + * Atomically adds @i to @v and returns @i + *@v
6026 + */
6027 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
6028 +{
6029 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
6030 + : "+A" (i), "+c" (v)
6031 + : : "memory"
6032 + );
6033 + return i;
6034 +}
6035 +
6036 /*
6037 * Other variants with different arithmetic operators:
6038 */
6039 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
6040 return a;
6041 }
6042
6043 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6044 +{
6045 + long long a;
6046 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
6047 + : "=A" (a)
6048 + : "S" (v)
6049 + : "memory", "ecx"
6050 + );
6051 + return a;
6052 +}
6053 +
6054 static inline long long atomic64_dec_return(atomic64_t *v)
6055 {
6056 long long a;
6057 @@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6058 }
6059
6060 /**
6061 + * atomic64_add_unchecked - add integer to atomic64 variable
6062 + * @i: integer value to add
6063 + * @v: pointer to type atomic64_unchecked_t
6064 + *
6065 + * Atomically adds @i to @v.
6066 + */
6067 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6068 +{
6069 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6070 + : "+A" (i), "+c" (v)
6071 + : : "memory"
6072 + );
6073 + return i;
6074 +}
6075 +
6076 +/**
6077 * atomic64_sub - subtract the atomic64 variable
6078 * @i: integer value to subtract
6079 * @v: pointer to type atomic64_t
6080 diff -urNp linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h
6081 --- linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h 2011-05-19 00:06:34.000000000 -0400
6082 +++ linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h 2011-08-05 19:44:33.000000000 -0400
6083 @@ -18,7 +18,19 @@
6084 */
6085 static inline long atomic64_read(const atomic64_t *v)
6086 {
6087 - return (*(volatile long *)&(v)->counter);
6088 + return (*(volatile const long *)&(v)->counter);
6089 +}
6090 +
6091 +/**
6092 + * atomic64_read_unchecked - read atomic64 variable
6093 + * @v: pointer of type atomic64_unchecked_t
6094 + *
6095 + * Atomically reads the value of @v.
6096 + * Doesn't imply a read memory barrier.
6097 + */
6098 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6099 +{
6100 + return (*(volatile const long *)&(v)->counter);
6101 }
6102
6103 /**
6104 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6105 }
6106
6107 /**
6108 + * atomic64_set_unchecked - set atomic64 variable
6109 + * @v: pointer to type atomic64_unchecked_t
6110 + * @i: required value
6111 + *
6112 + * Atomically sets the value of @v to @i.
6113 + */
6114 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6115 +{
6116 + v->counter = i;
6117 +}
6118 +
6119 +/**
6120 * atomic64_add - add integer to atomic64 variable
6121 * @i: integer value to add
6122 * @v: pointer to type atomic64_t
6123 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6124 */
6125 static inline void atomic64_add(long i, atomic64_t *v)
6126 {
6127 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
6128 +
6129 +#ifdef CONFIG_PAX_REFCOUNT
6130 + "jno 0f\n"
6131 + LOCK_PREFIX "subq %1,%0\n"
6132 + "int $4\n0:\n"
6133 + _ASM_EXTABLE(0b, 0b)
6134 +#endif
6135 +
6136 + : "=m" (v->counter)
6137 + : "er" (i), "m" (v->counter));
6138 +}
6139 +
6140 +/**
6141 + * atomic64_add_unchecked - add integer to atomic64 variable
6142 + * @i: integer value to add
6143 + * @v: pointer to type atomic64_unchecked_t
6144 + *
6145 + * Atomically adds @i to @v.
6146 + */
6147 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6148 +{
6149 asm volatile(LOCK_PREFIX "addq %1,%0"
6150 : "=m" (v->counter)
6151 : "er" (i), "m" (v->counter));
6152 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6153 */
6154 static inline void atomic64_sub(long i, atomic64_t *v)
6155 {
6156 - asm volatile(LOCK_PREFIX "subq %1,%0"
6157 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6158 +
6159 +#ifdef CONFIG_PAX_REFCOUNT
6160 + "jno 0f\n"
6161 + LOCK_PREFIX "addq %1,%0\n"
6162 + "int $4\n0:\n"
6163 + _ASM_EXTABLE(0b, 0b)
6164 +#endif
6165 +
6166 + : "=m" (v->counter)
6167 + : "er" (i), "m" (v->counter));
6168 +}
6169 +
6170 +/**
6171 + * atomic64_sub_unchecked - subtract the atomic64 variable
6172 + * @i: integer value to subtract
6173 + * @v: pointer to type atomic64_unchecked_t
6174 + *
6175 + * Atomically subtracts @i from @v.
6176 + */
6177 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6178 +{
6179 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6180 : "=m" (v->counter)
6181 : "er" (i), "m" (v->counter));
6182 }
6183 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6184 {
6185 unsigned char c;
6186
6187 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6188 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
6189 +
6190 +#ifdef CONFIG_PAX_REFCOUNT
6191 + "jno 0f\n"
6192 + LOCK_PREFIX "addq %2,%0\n"
6193 + "int $4\n0:\n"
6194 + _ASM_EXTABLE(0b, 0b)
6195 +#endif
6196 +
6197 + "sete %1\n"
6198 : "=m" (v->counter), "=qm" (c)
6199 : "er" (i), "m" (v->counter) : "memory");
6200 return c;
6201 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6202 */
6203 static inline void atomic64_inc(atomic64_t *v)
6204 {
6205 + asm volatile(LOCK_PREFIX "incq %0\n"
6206 +
6207 +#ifdef CONFIG_PAX_REFCOUNT
6208 + "jno 0f\n"
6209 + LOCK_PREFIX "decq %0\n"
6210 + "int $4\n0:\n"
6211 + _ASM_EXTABLE(0b, 0b)
6212 +#endif
6213 +
6214 + : "=m" (v->counter)
6215 + : "m" (v->counter));
6216 +}
6217 +
6218 +/**
6219 + * atomic64_inc_unchecked - increment atomic64 variable
6220 + * @v: pointer to type atomic64_unchecked_t
6221 + *
6222 + * Atomically increments @v by 1.
6223 + */
6224 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6225 +{
6226 asm volatile(LOCK_PREFIX "incq %0"
6227 : "=m" (v->counter)
6228 : "m" (v->counter));
6229 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6230 */
6231 static inline void atomic64_dec(atomic64_t *v)
6232 {
6233 - asm volatile(LOCK_PREFIX "decq %0"
6234 + asm volatile(LOCK_PREFIX "decq %0\n"
6235 +
6236 +#ifdef CONFIG_PAX_REFCOUNT
6237 + "jno 0f\n"
6238 + LOCK_PREFIX "incq %0\n"
6239 + "int $4\n0:\n"
6240 + _ASM_EXTABLE(0b, 0b)
6241 +#endif
6242 +
6243 + : "=m" (v->counter)
6244 + : "m" (v->counter));
6245 +}
6246 +
6247 +/**
6248 + * atomic64_dec_unchecked - decrement atomic64 variable
6249 + * @v: pointer to type atomic64_t
6250 + *
6251 + * Atomically decrements @v by 1.
6252 + */
6253 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6254 +{
6255 + asm volatile(LOCK_PREFIX "decq %0\n"
6256 : "=m" (v->counter)
6257 : "m" (v->counter));
6258 }
6259 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6260 {
6261 unsigned char c;
6262
6263 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
6264 + asm volatile(LOCK_PREFIX "decq %0\n"
6265 +
6266 +#ifdef CONFIG_PAX_REFCOUNT
6267 + "jno 0f\n"
6268 + LOCK_PREFIX "incq %0\n"
6269 + "int $4\n0:\n"
6270 + _ASM_EXTABLE(0b, 0b)
6271 +#endif
6272 +
6273 + "sete %1\n"
6274 : "=m" (v->counter), "=qm" (c)
6275 : "m" (v->counter) : "memory");
6276 return c != 0;
6277 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6278 {
6279 unsigned char c;
6280
6281 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
6282 + asm volatile(LOCK_PREFIX "incq %0\n"
6283 +
6284 +#ifdef CONFIG_PAX_REFCOUNT
6285 + "jno 0f\n"
6286 + LOCK_PREFIX "decq %0\n"
6287 + "int $4\n0:\n"
6288 + _ASM_EXTABLE(0b, 0b)
6289 +#endif
6290 +
6291 + "sete %1\n"
6292 : "=m" (v->counter), "=qm" (c)
6293 : "m" (v->counter) : "memory");
6294 return c != 0;
6295 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6296 {
6297 unsigned char c;
6298
6299 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6300 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
6301 +
6302 +#ifdef CONFIG_PAX_REFCOUNT
6303 + "jno 0f\n"
6304 + LOCK_PREFIX "subq %2,%0\n"
6305 + "int $4\n0:\n"
6306 + _ASM_EXTABLE(0b, 0b)
6307 +#endif
6308 +
6309 + "sets %1\n"
6310 : "=m" (v->counter), "=qm" (c)
6311 : "er" (i), "m" (v->counter) : "memory");
6312 return c;
6313 @@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6314 static inline long atomic64_add_return(long i, atomic64_t *v)
6315 {
6316 long __i = i;
6317 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6318 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6319 +
6320 +#ifdef CONFIG_PAX_REFCOUNT
6321 + "jno 0f\n"
6322 + "movq %0, %1\n"
6323 + "int $4\n0:\n"
6324 + _ASM_EXTABLE(0b, 0b)
6325 +#endif
6326 +
6327 + : "+r" (i), "+m" (v->counter)
6328 + : : "memory");
6329 + return i + __i;
6330 +}
6331 +
6332 +/**
6333 + * atomic64_add_return_unchecked - add and return
6334 + * @i: integer value to add
6335 + * @v: pointer to type atomic64_unchecked_t
6336 + *
6337 + * Atomically adds @i to @v and returns @i + @v
6338 + */
6339 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6340 +{
6341 + long __i = i;
6342 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
6343 : "+r" (i), "+m" (v->counter)
6344 : : "memory");
6345 return i + __i;
6346 @@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6347 }
6348
6349 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6350 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6351 +{
6352 + return atomic64_add_return_unchecked(1, v);
6353 +}
6354 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6355
6356 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6357 @@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6358 return cmpxchg(&v->counter, old, new);
6359 }
6360
6361 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6362 +{
6363 + return cmpxchg(&v->counter, old, new);
6364 +}
6365 +
6366 static inline long atomic64_xchg(atomic64_t *v, long new)
6367 {
6368 return xchg(&v->counter, new);
6369 @@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6370 */
6371 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6372 {
6373 - long c, old;
6374 + long c, old, new;
6375 c = atomic64_read(v);
6376 for (;;) {
6377 - if (unlikely(c == (u)))
6378 + if (unlikely(c == u))
6379 break;
6380 - old = atomic64_cmpxchg((v), c, c + (a));
6381 +
6382 + asm volatile("add %2,%0\n"
6383 +
6384 +#ifdef CONFIG_PAX_REFCOUNT
6385 + "jno 0f\n"
6386 + "sub %2,%0\n"
6387 + "int $4\n0:\n"
6388 + _ASM_EXTABLE(0b, 0b)
6389 +#endif
6390 +
6391 + : "=r" (new)
6392 + : "0" (c), "ir" (a));
6393 +
6394 + old = atomic64_cmpxchg(v, c, new);
6395 if (likely(old == c))
6396 break;
6397 c = old;
6398 }
6399 - return c != (u);
6400 + return c != u;
6401 }
6402
6403 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6404 diff -urNp linux-2.6.39.4/arch/x86/include/asm/atomic.h linux-2.6.39.4/arch/x86/include/asm/atomic.h
6405 --- linux-2.6.39.4/arch/x86/include/asm/atomic.h 2011-05-19 00:06:34.000000000 -0400
6406 +++ linux-2.6.39.4/arch/x86/include/asm/atomic.h 2011-08-05 19:44:33.000000000 -0400
6407 @@ -22,7 +22,18 @@
6408 */
6409 static inline int atomic_read(const atomic_t *v)
6410 {
6411 - return (*(volatile int *)&(v)->counter);
6412 + return (*(volatile const int *)&(v)->counter);
6413 +}
6414 +
6415 +/**
6416 + * atomic_read_unchecked - read atomic variable
6417 + * @v: pointer of type atomic_unchecked_t
6418 + *
6419 + * Atomically reads the value of @v.
6420 + */
6421 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6422 +{
6423 + return (*(volatile const int *)&(v)->counter);
6424 }
6425
6426 /**
6427 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6428 }
6429
6430 /**
6431 + * atomic_set_unchecked - set atomic variable
6432 + * @v: pointer of type atomic_unchecked_t
6433 + * @i: required value
6434 + *
6435 + * Atomically sets the value of @v to @i.
6436 + */
6437 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6438 +{
6439 + v->counter = i;
6440 +}
6441 +
6442 +/**
6443 * atomic_add - add integer to atomic variable
6444 * @i: integer value to add
6445 * @v: pointer of type atomic_t
6446 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6447 */
6448 static inline void atomic_add(int i, atomic_t *v)
6449 {
6450 - asm volatile(LOCK_PREFIX "addl %1,%0"
6451 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6452 +
6453 +#ifdef CONFIG_PAX_REFCOUNT
6454 + "jno 0f\n"
6455 + LOCK_PREFIX "subl %1,%0\n"
6456 + "int $4\n0:\n"
6457 + _ASM_EXTABLE(0b, 0b)
6458 +#endif
6459 +
6460 + : "+m" (v->counter)
6461 + : "ir" (i));
6462 +}
6463 +
6464 +/**
6465 + * atomic_add_unchecked - add integer to atomic variable
6466 + * @i: integer value to add
6467 + * @v: pointer of type atomic_unchecked_t
6468 + *
6469 + * Atomically adds @i to @v.
6470 + */
6471 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6472 +{
6473 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6474 : "+m" (v->counter)
6475 : "ir" (i));
6476 }
6477 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6478 */
6479 static inline void atomic_sub(int i, atomic_t *v)
6480 {
6481 - asm volatile(LOCK_PREFIX "subl %1,%0"
6482 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6483 +
6484 +#ifdef CONFIG_PAX_REFCOUNT
6485 + "jno 0f\n"
6486 + LOCK_PREFIX "addl %1,%0\n"
6487 + "int $4\n0:\n"
6488 + _ASM_EXTABLE(0b, 0b)
6489 +#endif
6490 +
6491 + : "+m" (v->counter)
6492 + : "ir" (i));
6493 +}
6494 +
6495 +/**
6496 + * atomic_sub_unchecked - subtract integer from atomic variable
6497 + * @i: integer value to subtract
6498 + * @v: pointer of type atomic_unchecked_t
6499 + *
6500 + * Atomically subtracts @i from @v.
6501 + */
6502 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6503 +{
6504 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6505 : "+m" (v->counter)
6506 : "ir" (i));
6507 }
6508 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6509 {
6510 unsigned char c;
6511
6512 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6513 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
6514 +
6515 +#ifdef CONFIG_PAX_REFCOUNT
6516 + "jno 0f\n"
6517 + LOCK_PREFIX "addl %2,%0\n"
6518 + "int $4\n0:\n"
6519 + _ASM_EXTABLE(0b, 0b)
6520 +#endif
6521 +
6522 + "sete %1\n"
6523 : "+m" (v->counter), "=qm" (c)
6524 : "ir" (i) : "memory");
6525 return c;
6526 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6527 */
6528 static inline void atomic_inc(atomic_t *v)
6529 {
6530 - asm volatile(LOCK_PREFIX "incl %0"
6531 + asm volatile(LOCK_PREFIX "incl %0\n"
6532 +
6533 +#ifdef CONFIG_PAX_REFCOUNT
6534 + "jno 0f\n"
6535 + LOCK_PREFIX "decl %0\n"
6536 + "int $4\n0:\n"
6537 + _ASM_EXTABLE(0b, 0b)
6538 +#endif
6539 +
6540 + : "+m" (v->counter));
6541 +}
6542 +
6543 +/**
6544 + * atomic_inc_unchecked - increment atomic variable
6545 + * @v: pointer of type atomic_unchecked_t
6546 + *
6547 + * Atomically increments @v by 1.
6548 + */
6549 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6550 +{
6551 + asm volatile(LOCK_PREFIX "incl %0\n"
6552 : "+m" (v->counter));
6553 }
6554
6555 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6556 */
6557 static inline void atomic_dec(atomic_t *v)
6558 {
6559 - asm volatile(LOCK_PREFIX "decl %0"
6560 + asm volatile(LOCK_PREFIX "decl %0\n"
6561 +
6562 +#ifdef CONFIG_PAX_REFCOUNT
6563 + "jno 0f\n"
6564 + LOCK_PREFIX "incl %0\n"
6565 + "int $4\n0:\n"
6566 + _ASM_EXTABLE(0b, 0b)
6567 +#endif
6568 +
6569 + : "+m" (v->counter));
6570 +}
6571 +
6572 +/**
6573 + * atomic_dec_unchecked - decrement atomic variable
6574 + * @v: pointer of type atomic_unchecked_t
6575 + *
6576 + * Atomically decrements @v by 1.
6577 + */
6578 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6579 +{
6580 + asm volatile(LOCK_PREFIX "decl %0\n"
6581 : "+m" (v->counter));
6582 }
6583
6584 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6585 {
6586 unsigned char c;
6587
6588 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
6589 + asm volatile(LOCK_PREFIX "decl %0\n"
6590 +
6591 +#ifdef CONFIG_PAX_REFCOUNT
6592 + "jno 0f\n"
6593 + LOCK_PREFIX "incl %0\n"
6594 + "int $4\n0:\n"
6595 + _ASM_EXTABLE(0b, 0b)
6596 +#endif
6597 +
6598 + "sete %1\n"
6599 : "+m" (v->counter), "=qm" (c)
6600 : : "memory");
6601 return c != 0;
6602 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6603 {
6604 unsigned char c;
6605
6606 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
6607 + asm volatile(LOCK_PREFIX "incl %0\n"
6608 +
6609 +#ifdef CONFIG_PAX_REFCOUNT
6610 + "jno 0f\n"
6611 + LOCK_PREFIX "decl %0\n"
6612 + "int $4\n0:\n"
6613 + _ASM_EXTABLE(0b, 0b)
6614 +#endif
6615 +
6616 + "sete %1\n"
6617 + : "+m" (v->counter), "=qm" (c)
6618 + : : "memory");
6619 + return c != 0;
6620 +}
6621 +
6622 +/**
6623 + * atomic_inc_and_test_unchecked - increment and test
6624 + * @v: pointer of type atomic_unchecked_t
6625 + *
6626 + * Atomically increments @v by 1
6627 + * and returns true if the result is zero, or false for all
6628 + * other cases.
6629 + */
6630 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6631 +{
6632 + unsigned char c;
6633 +
6634 + asm volatile(LOCK_PREFIX "incl %0\n"
6635 + "sete %1\n"
6636 : "+m" (v->counter), "=qm" (c)
6637 : : "memory");
6638 return c != 0;
6639 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6640 {
6641 unsigned char c;
6642
6643 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6644 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
6645 +
6646 +#ifdef CONFIG_PAX_REFCOUNT
6647 + "jno 0f\n"
6648 + LOCK_PREFIX "subl %2,%0\n"
6649 + "int $4\n0:\n"
6650 + _ASM_EXTABLE(0b, 0b)
6651 +#endif
6652 +
6653 + "sets %1\n"
6654 : "+m" (v->counter), "=qm" (c)
6655 : "ir" (i) : "memory");
6656 return c;
6657 @@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6658 #endif
6659 /* Modern 486+ processor */
6660 __i = i;
6661 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6662 +
6663 +#ifdef CONFIG_PAX_REFCOUNT
6664 + "jno 0f\n"
6665 + "movl %0, %1\n"
6666 + "int $4\n0:\n"
6667 + _ASM_EXTABLE(0b, 0b)
6668 +#endif
6669 +
6670 + : "+r" (i), "+m" (v->counter)
6671 + : : "memory");
6672 + return i + __i;
6673 +
6674 +#ifdef CONFIG_M386
6675 +no_xadd: /* Legacy 386 processor */
6676 + local_irq_save(flags);
6677 + __i = atomic_read(v);
6678 + atomic_set(v, i + __i);
6679 + local_irq_restore(flags);
6680 + return i + __i;
6681 +#endif
6682 +}
6683 +
6684 +/**
6685 + * atomic_add_return_unchecked - add integer and return
6686 + * @v: pointer of type atomic_unchecked_t
6687 + * @i: integer value to add
6688 + *
6689 + * Atomically adds @i to @v and returns @i + @v
6690 + */
6691 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6692 +{
6693 + int __i;
6694 +#ifdef CONFIG_M386
6695 + unsigned long flags;
6696 + if (unlikely(boot_cpu_data.x86 <= 3))
6697 + goto no_xadd;
6698 +#endif
6699 + /* Modern 486+ processor */
6700 + __i = i;
6701 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6702 : "+r" (i), "+m" (v->counter)
6703 : : "memory");
6704 @@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6705 }
6706
6707 #define atomic_inc_return(v) (atomic_add_return(1, v))
6708 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6709 +{
6710 + return atomic_add_return_unchecked(1, v);
6711 +}
6712 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6713
6714 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6715 @@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6716 return cmpxchg(&v->counter, old, new);
6717 }
6718
6719 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6720 +{
6721 + return cmpxchg(&v->counter, old, new);
6722 +}
6723 +
6724 static inline int atomic_xchg(atomic_t *v, int new)
6725 {
6726 return xchg(&v->counter, new);
6727 }
6728
6729 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6730 +{
6731 + return xchg(&v->counter, new);
6732 +}
6733 +
6734 /**
6735 * atomic_add_unless - add unless the number is already a given value
6736 * @v: pointer of type atomic_t
6737 @@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t *
6738 */
6739 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6740 {
6741 - int c, old;
6742 + int c, old, new;
6743 c = atomic_read(v);
6744 for (;;) {
6745 - if (unlikely(c == (u)))
6746 + if (unlikely(c == u))
6747 break;
6748 - old = atomic_cmpxchg((v), c, c + (a));
6749 +
6750 + asm volatile("addl %2,%0\n"
6751 +
6752 +#ifdef CONFIG_PAX_REFCOUNT
6753 + "jno 0f\n"
6754 + "subl %2,%0\n"
6755 + "int $4\n0:\n"
6756 + _ASM_EXTABLE(0b, 0b)
6757 +#endif
6758 +
6759 + : "=r" (new)
6760 + : "0" (c), "ir" (a));
6761 +
6762 + old = atomic_cmpxchg(v, c, new);
6763 if (likely(old == c))
6764 break;
6765 c = old;
6766 }
6767 - return c != (u);
6768 + return c != u;
6769 }
6770
6771 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6772
6773 +/**
6774 + * atomic_inc_not_zero_hint - increment if not null
6775 + * @v: pointer of type atomic_t
6776 + * @hint: probable value of the atomic before the increment
6777 + *
6778 + * This version of atomic_inc_not_zero() gives a hint of probable
6779 + * value of the atomic. This helps processor to not read the memory
6780 + * before doing the atomic read/modify/write cycle, lowering
6781 + * number of bus transactions on some arches.
6782 + *
6783 + * Returns: 0 if increment was not done, 1 otherwise.
6784 + */
6785 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6786 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6787 +{
6788 + int val, c = hint, new;
6789 +
6790 + /* sanity test, should be removed by compiler if hint is a constant */
6791 + if (!hint)
6792 + return atomic_inc_not_zero(v);
6793 +
6794 + do {
6795 + asm volatile("incl %0\n"
6796 +
6797 +#ifdef CONFIG_PAX_REFCOUNT
6798 + "jno 0f\n"
6799 + "decl %0\n"
6800 + "int $4\n0:\n"
6801 + _ASM_EXTABLE(0b, 0b)
6802 +#endif
6803 +
6804 + : "=r" (new)
6805 + : "0" (c));
6806 +
6807 + val = atomic_cmpxchg(v, c, new);
6808 + if (val == c)
6809 + return 1;
6810 + c = val;
6811 + } while (c);
6812 +
6813 + return 0;
6814 +}
6815 +
6816 /*
6817 * atomic_dec_if_positive - decrement by 1 if old value positive
6818 * @v: pointer of type atomic_t
6819 diff -urNp linux-2.6.39.4/arch/x86/include/asm/bitops.h linux-2.6.39.4/arch/x86/include/asm/bitops.h
6820 --- linux-2.6.39.4/arch/x86/include/asm/bitops.h 2011-05-19 00:06:34.000000000 -0400
6821 +++ linux-2.6.39.4/arch/x86/include/asm/bitops.h 2011-08-05 19:44:33.000000000 -0400
6822 @@ -38,7 +38,7 @@
6823 * a mask operation on a byte.
6824 */
6825 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
6826 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
6827 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
6828 #define CONST_MASK(nr) (1 << ((nr) & 7))
6829
6830 /**
6831 diff -urNp linux-2.6.39.4/arch/x86/include/asm/boot.h linux-2.6.39.4/arch/x86/include/asm/boot.h
6832 --- linux-2.6.39.4/arch/x86/include/asm/boot.h 2011-05-19 00:06:34.000000000 -0400
6833 +++ linux-2.6.39.4/arch/x86/include/asm/boot.h 2011-08-05 19:44:33.000000000 -0400
6834 @@ -11,10 +11,15 @@
6835 #include <asm/pgtable_types.h>
6836
6837 /* Physical address where kernel should be loaded. */
6838 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6839 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6840 + (CONFIG_PHYSICAL_ALIGN - 1)) \
6841 & ~(CONFIG_PHYSICAL_ALIGN - 1))
6842
6843 +#ifndef __ASSEMBLY__
6844 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
6845 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
6846 +#endif
6847 +
6848 /* Minimum kernel alignment, as a power of two */
6849 #ifdef CONFIG_X86_64
6850 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
6851 diff -urNp linux-2.6.39.4/arch/x86/include/asm/cacheflush.h linux-2.6.39.4/arch/x86/include/asm/cacheflush.h
6852 --- linux-2.6.39.4/arch/x86/include/asm/cacheflush.h 2011-05-19 00:06:34.000000000 -0400
6853 +++ linux-2.6.39.4/arch/x86/include/asm/cacheflush.h 2011-08-05 19:44:33.000000000 -0400
6854 @@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
6855 unsigned long pg_flags = pg->flags & _PGMT_MASK;
6856
6857 if (pg_flags == _PGMT_DEFAULT)
6858 - return -1;
6859 + return ~0UL;
6860 else if (pg_flags == _PGMT_WC)
6861 return _PAGE_CACHE_WC;
6862 else if (pg_flags == _PGMT_UC_MINUS)
6863 diff -urNp linux-2.6.39.4/arch/x86/include/asm/cache.h linux-2.6.39.4/arch/x86/include/asm/cache.h
6864 --- linux-2.6.39.4/arch/x86/include/asm/cache.h 2011-05-19 00:06:34.000000000 -0400
6865 +++ linux-2.6.39.4/arch/x86/include/asm/cache.h 2011-08-05 19:44:33.000000000 -0400
6866 @@ -5,12 +5,13 @@
6867
6868 /* L1 cache line size */
6869 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6870 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6871 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6872
6873 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
6874 +#define __read_only __attribute__((__section__(".data..read_only")))
6875
6876 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
6877 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
6878 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
6879
6880 #ifdef CONFIG_X86_VSMP
6881 #ifdef CONFIG_SMP
6882 diff -urNp linux-2.6.39.4/arch/x86/include/asm/checksum_32.h linux-2.6.39.4/arch/x86/include/asm/checksum_32.h
6883 --- linux-2.6.39.4/arch/x86/include/asm/checksum_32.h 2011-05-19 00:06:34.000000000 -0400
6884 +++ linux-2.6.39.4/arch/x86/include/asm/checksum_32.h 2011-08-05 19:44:33.000000000 -0400
6885 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
6886 int len, __wsum sum,
6887 int *src_err_ptr, int *dst_err_ptr);
6888
6889 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
6890 + int len, __wsum sum,
6891 + int *src_err_ptr, int *dst_err_ptr);
6892 +
6893 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
6894 + int len, __wsum sum,
6895 + int *src_err_ptr, int *dst_err_ptr);
6896 +
6897 /*
6898 * Note: when you get a NULL pointer exception here this means someone
6899 * passed in an incorrect kernel address to one of these functions.
6900 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
6901 int *err_ptr)
6902 {
6903 might_sleep();
6904 - return csum_partial_copy_generic((__force void *)src, dst,
6905 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
6906 len, sum, err_ptr, NULL);
6907 }
6908
6909 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
6910 {
6911 might_sleep();
6912 if (access_ok(VERIFY_WRITE, dst, len))
6913 - return csum_partial_copy_generic(src, (__force void *)dst,
6914 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
6915 len, sum, NULL, err_ptr);
6916
6917 if (len)
6918 diff -urNp linux-2.6.39.4/arch/x86/include/asm/cpufeature.h linux-2.6.39.4/arch/x86/include/asm/cpufeature.h
6919 --- linux-2.6.39.4/arch/x86/include/asm/cpufeature.h 2011-06-03 00:04:13.000000000 -0400
6920 +++ linux-2.6.39.4/arch/x86/include/asm/cpufeature.h 2011-08-05 19:44:33.000000000 -0400
6921 @@ -351,7 +351,7 @@ static __always_inline __pure bool __sta
6922 ".section .discard,\"aw\",@progbits\n"
6923 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
6924 ".previous\n"
6925 - ".section .altinstr_replacement,\"ax\"\n"
6926 + ".section .altinstr_replacement,\"a\"\n"
6927 "3: movb $1,%0\n"
6928 "4:\n"
6929 ".previous\n"
6930 diff -urNp linux-2.6.39.4/arch/x86/include/asm/desc_defs.h linux-2.6.39.4/arch/x86/include/asm/desc_defs.h
6931 --- linux-2.6.39.4/arch/x86/include/asm/desc_defs.h 2011-05-19 00:06:34.000000000 -0400
6932 +++ linux-2.6.39.4/arch/x86/include/asm/desc_defs.h 2011-08-05 19:44:33.000000000 -0400
6933 @@ -31,6 +31,12 @@ struct desc_struct {
6934 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
6935 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
6936 };
6937 + struct {
6938 + u16 offset_low;
6939 + u16 seg;
6940 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
6941 + unsigned offset_high: 16;
6942 + } gate;
6943 };
6944 } __attribute__((packed));
6945
6946 diff -urNp linux-2.6.39.4/arch/x86/include/asm/desc.h linux-2.6.39.4/arch/x86/include/asm/desc.h
6947 --- linux-2.6.39.4/arch/x86/include/asm/desc.h 2011-05-19 00:06:34.000000000 -0400
6948 +++ linux-2.6.39.4/arch/x86/include/asm/desc.h 2011-08-05 19:44:33.000000000 -0400
6949 @@ -4,6 +4,7 @@
6950 #include <asm/desc_defs.h>
6951 #include <asm/ldt.h>
6952 #include <asm/mmu.h>
6953 +#include <asm/pgtable.h>
6954 #include <linux/smp.h>
6955
6956 static inline void fill_ldt(struct desc_struct *desc,
6957 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
6958 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
6959 desc->type = (info->read_exec_only ^ 1) << 1;
6960 desc->type |= info->contents << 2;
6961 + desc->type |= info->seg_not_present ^ 1;
6962 desc->s = 1;
6963 desc->dpl = 0x3;
6964 desc->p = info->seg_not_present ^ 1;
6965 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
6966 }
6967
6968 extern struct desc_ptr idt_descr;
6969 -extern gate_desc idt_table[];
6970 -
6971 -struct gdt_page {
6972 - struct desc_struct gdt[GDT_ENTRIES];
6973 -} __attribute__((aligned(PAGE_SIZE)));
6974 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
6975 +extern gate_desc idt_table[256];
6976
6977 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
6978 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
6979 {
6980 - return per_cpu(gdt_page, cpu).gdt;
6981 + return cpu_gdt_table[cpu];
6982 }
6983
6984 #ifdef CONFIG_X86_64
6985 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
6986 unsigned long base, unsigned dpl, unsigned flags,
6987 unsigned short seg)
6988 {
6989 - gate->a = (seg << 16) | (base & 0xffff);
6990 - gate->b = (base & 0xffff0000) |
6991 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
6992 + gate->gate.offset_low = base;
6993 + gate->gate.seg = seg;
6994 + gate->gate.reserved = 0;
6995 + gate->gate.type = type;
6996 + gate->gate.s = 0;
6997 + gate->gate.dpl = dpl;
6998 + gate->gate.p = 1;
6999 + gate->gate.offset_high = base >> 16;
7000 }
7001
7002 #endif
7003 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
7004 static inline void native_write_idt_entry(gate_desc *idt, int entry,
7005 const gate_desc *gate)
7006 {
7007 + pax_open_kernel();
7008 memcpy(&idt[entry], gate, sizeof(*gate));
7009 + pax_close_kernel();
7010 }
7011
7012 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
7013 const void *desc)
7014 {
7015 + pax_open_kernel();
7016 memcpy(&ldt[entry], desc, 8);
7017 + pax_close_kernel();
7018 }
7019
7020 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
7021 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
7022 size = sizeof(struct desc_struct);
7023 break;
7024 }
7025 +
7026 + pax_open_kernel();
7027 memcpy(&gdt[entry], desc, size);
7028 + pax_close_kernel();
7029 }
7030
7031 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
7032 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const
7033
7034 static inline void native_load_tr_desc(void)
7035 {
7036 + pax_open_kernel();
7037 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
7038 + pax_close_kernel();
7039 }
7040
7041 static inline void native_load_gdt(const struct desc_ptr *dtr)
7042 @@ -246,8 +258,10 @@ static inline void native_load_tls(struc
7043 unsigned int i;
7044 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
7045
7046 + pax_open_kernel();
7047 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
7048 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
7049 + pax_close_kernel();
7050 }
7051
7052 #define _LDT_empty(info) \
7053 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
7054 desc->limit = (limit >> 16) & 0xf;
7055 }
7056
7057 -static inline void _set_gate(int gate, unsigned type, void *addr,
7058 +static inline void _set_gate(int gate, unsigned type, const void *addr,
7059 unsigned dpl, unsigned ist, unsigned seg)
7060 {
7061 gate_desc s;
7062 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
7063 * Pentium F0 0F bugfix can have resulted in the mapped
7064 * IDT being write-protected.
7065 */
7066 -static inline void set_intr_gate(unsigned int n, void *addr)
7067 +static inline void set_intr_gate(unsigned int n, const void *addr)
7068 {
7069 BUG_ON((unsigned)n > 0xFF);
7070 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7071 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
7072 /*
7073 * This routine sets up an interrupt gate at directory privilege level 3.
7074 */
7075 -static inline void set_system_intr_gate(unsigned int n, void *addr)
7076 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
7077 {
7078 BUG_ON((unsigned)n > 0xFF);
7079 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7080 }
7081
7082 -static inline void set_system_trap_gate(unsigned int n, void *addr)
7083 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
7084 {
7085 BUG_ON((unsigned)n > 0xFF);
7086 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7087 }
7088
7089 -static inline void set_trap_gate(unsigned int n, void *addr)
7090 +static inline void set_trap_gate(unsigned int n, const void *addr)
7091 {
7092 BUG_ON((unsigned)n > 0xFF);
7093 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7094 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
7095 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7096 {
7097 BUG_ON((unsigned)n > 0xFF);
7098 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7099 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7100 }
7101
7102 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7103 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7104 {
7105 BUG_ON((unsigned)n > 0xFF);
7106 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7107 }
7108
7109 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7110 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7111 {
7112 BUG_ON((unsigned)n > 0xFF);
7113 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7114 }
7115
7116 +#ifdef CONFIG_X86_32
7117 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7118 +{
7119 + struct desc_struct d;
7120 +
7121 + if (likely(limit))
7122 + limit = (limit - 1UL) >> PAGE_SHIFT;
7123 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
7124 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7125 +}
7126 +#endif
7127 +
7128 #endif /* _ASM_X86_DESC_H */
7129 diff -urNp linux-2.6.39.4/arch/x86/include/asm/e820.h linux-2.6.39.4/arch/x86/include/asm/e820.h
7130 --- linux-2.6.39.4/arch/x86/include/asm/e820.h 2011-05-19 00:06:34.000000000 -0400
7131 +++ linux-2.6.39.4/arch/x86/include/asm/e820.h 2011-08-05 19:44:33.000000000 -0400
7132 @@ -69,7 +69,7 @@ struct e820map {
7133 #define ISA_START_ADDRESS 0xa0000
7134 #define ISA_END_ADDRESS 0x100000
7135
7136 -#define BIOS_BEGIN 0x000a0000
7137 +#define BIOS_BEGIN 0x000c0000
7138 #define BIOS_END 0x00100000
7139
7140 #define BIOS_ROM_BASE 0xffe00000
7141 diff -urNp linux-2.6.39.4/arch/x86/include/asm/elf.h linux-2.6.39.4/arch/x86/include/asm/elf.h
7142 --- linux-2.6.39.4/arch/x86/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
7143 +++ linux-2.6.39.4/arch/x86/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
7144 @@ -237,7 +237,25 @@ extern int force_personality32;
7145 the loader. We need to make sure that it is out of the way of the program
7146 that it will "exec", and that there is sufficient room for the brk. */
7147
7148 +#ifdef CONFIG_PAX_SEGMEXEC
7149 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7150 +#else
7151 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7152 +#endif
7153 +
7154 +#ifdef CONFIG_PAX_ASLR
7155 +#ifdef CONFIG_X86_32
7156 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7157 +
7158 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7159 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7160 +#else
7161 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
7162 +
7163 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7164 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7165 +#endif
7166 +#endif
7167
7168 /* This yields a mask that user programs can use to figure out what
7169 instruction set this CPU supports. This could be done in user space,
7170 @@ -291,8 +309,7 @@ do { \
7171 #define ARCH_DLINFO \
7172 do { \
7173 if (vdso_enabled) \
7174 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7175 - (unsigned long)current->mm->context.vdso); \
7176 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
7177 } while (0)
7178
7179 #define AT_SYSINFO 32
7180 @@ -303,7 +320,7 @@ do { \
7181
7182 #endif /* !CONFIG_X86_32 */
7183
7184 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7185 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7186
7187 #define VDSO_ENTRY \
7188 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7189 @@ -317,7 +334,4 @@ extern int arch_setup_additional_pages(s
7190 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7191 #define compat_arch_setup_additional_pages syscall32_setup_pages
7192
7193 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7194 -#define arch_randomize_brk arch_randomize_brk
7195 -
7196 #endif /* _ASM_X86_ELF_H */
7197 diff -urNp linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h
7198 --- linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h 2011-05-19 00:06:34.000000000 -0400
7199 +++ linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h 2011-08-05 19:44:33.000000000 -0400
7200 @@ -15,6 +15,6 @@ enum reboot_type {
7201
7202 extern enum reboot_type reboot_type;
7203
7204 -extern void machine_emergency_restart(void);
7205 +extern void machine_emergency_restart(void) __noreturn;
7206
7207 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7208 diff -urNp linux-2.6.39.4/arch/x86/include/asm/futex.h linux-2.6.39.4/arch/x86/include/asm/futex.h
7209 --- linux-2.6.39.4/arch/x86/include/asm/futex.h 2011-05-19 00:06:34.000000000 -0400
7210 +++ linux-2.6.39.4/arch/x86/include/asm/futex.h 2011-08-05 19:44:33.000000000 -0400
7211 @@ -12,16 +12,18 @@
7212 #include <asm/system.h>
7213
7214 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7215 + typecheck(u32 *, uaddr); \
7216 asm volatile("1:\t" insn "\n" \
7217 "2:\t.section .fixup,\"ax\"\n" \
7218 "3:\tmov\t%3, %1\n" \
7219 "\tjmp\t2b\n" \
7220 "\t.previous\n" \
7221 _ASM_EXTABLE(1b, 3b) \
7222 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7223 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
7224 : "i" (-EFAULT), "0" (oparg), "1" (0))
7225
7226 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7227 + typecheck(u32 *, uaddr); \
7228 asm volatile("1:\tmovl %2, %0\n" \
7229 "\tmovl\t%0, %3\n" \
7230 "\t" insn "\n" \
7231 @@ -34,7 +36,7 @@
7232 _ASM_EXTABLE(1b, 4b) \
7233 _ASM_EXTABLE(2b, 4b) \
7234 : "=&a" (oldval), "=&r" (ret), \
7235 - "+m" (*uaddr), "=&r" (tem) \
7236 + "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
7237 : "r" (oparg), "i" (-EFAULT), "1" (0))
7238
7239 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7240 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7241
7242 switch (op) {
7243 case FUTEX_OP_SET:
7244 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7245 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7246 break;
7247 case FUTEX_OP_ADD:
7248 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7249 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7250 uaddr, oparg);
7251 break;
7252 case FUTEX_OP_OR:
7253 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7254 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7255 return -EFAULT;
7256
7257 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7258 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7259 "2:\t.section .fixup, \"ax\"\n"
7260 "3:\tmov %3, %0\n"
7261 "\tjmp 2b\n"
7262 "\t.previous\n"
7263 _ASM_EXTABLE(1b, 3b)
7264 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7265 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
7266 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7267 : "memory"
7268 );
7269 diff -urNp linux-2.6.39.4/arch/x86/include/asm/hw_irq.h linux-2.6.39.4/arch/x86/include/asm/hw_irq.h
7270 --- linux-2.6.39.4/arch/x86/include/asm/hw_irq.h 2011-05-19 00:06:34.000000000 -0400
7271 +++ linux-2.6.39.4/arch/x86/include/asm/hw_irq.h 2011-08-05 19:44:33.000000000 -0400
7272 @@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void);
7273 extern void enable_IO_APIC(void);
7274
7275 /* Statistics */
7276 -extern atomic_t irq_err_count;
7277 -extern atomic_t irq_mis_count;
7278 +extern atomic_unchecked_t irq_err_count;
7279 +extern atomic_unchecked_t irq_mis_count;
7280
7281 /* EISA */
7282 extern void eisa_set_level_irq(unsigned int irq);
7283 diff -urNp linux-2.6.39.4/arch/x86/include/asm/i387.h linux-2.6.39.4/arch/x86/include/asm/i387.h
7284 --- linux-2.6.39.4/arch/x86/include/asm/i387.h 2011-05-19 00:06:34.000000000 -0400
7285 +++ linux-2.6.39.4/arch/x86/include/asm/i387.h 2011-08-05 19:44:33.000000000 -0400
7286 @@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7287 {
7288 int err;
7289
7290 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7291 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7292 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
7293 +#endif
7294 +
7295 /* See comment in fxsave() below. */
7296 #ifdef CONFIG_AS_FXSAVEQ
7297 asm volatile("1: fxrstorq %[fx]\n\t"
7298 @@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7299 {
7300 int err;
7301
7302 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7303 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7304 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7305 +#endif
7306 +
7307 /*
7308 * Clear the bytes not touched by the fxsave and reserved
7309 * for the SW usage.
7310 @@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7311 #endif /* CONFIG_X86_64 */
7312
7313 /* We need a safe address that is cheap to find and that is already
7314 - in L1 during context switch. The best choices are unfortunately
7315 - different for UP and SMP */
7316 -#ifdef CONFIG_SMP
7317 -#define safe_address (__per_cpu_offset[0])
7318 -#else
7319 -#define safe_address (kstat_cpu(0).cpustat.user)
7320 -#endif
7321 + in L1 during context switch. */
7322 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7323
7324 /*
7325 * These must be called with preempt disabled
7326 @@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7327 struct thread_info *me = current_thread_info();
7328 preempt_disable();
7329 if (me->status & TS_USEDFPU)
7330 - __save_init_fpu(me->task);
7331 + __save_init_fpu(current);
7332 else
7333 clts();
7334 }
7335 diff -urNp linux-2.6.39.4/arch/x86/include/asm/io.h linux-2.6.39.4/arch/x86/include/asm/io.h
7336 --- linux-2.6.39.4/arch/x86/include/asm/io.h 2011-05-19 00:06:34.000000000 -0400
7337 +++ linux-2.6.39.4/arch/x86/include/asm/io.h 2011-08-05 19:44:33.000000000 -0400
7338 @@ -216,6 +216,17 @@ extern void set_iounmap_nonlazy(void);
7339
7340 #include <linux/vmalloc.h>
7341
7342 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7343 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7344 +{
7345 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7346 +}
7347 +
7348 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7349 +{
7350 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7351 +}
7352 +
7353 /*
7354 * Convert a virtual cached pointer to an uncached pointer
7355 */
7356 diff -urNp linux-2.6.39.4/arch/x86/include/asm/irqflags.h linux-2.6.39.4/arch/x86/include/asm/irqflags.h
7357 --- linux-2.6.39.4/arch/x86/include/asm/irqflags.h 2011-05-19 00:06:34.000000000 -0400
7358 +++ linux-2.6.39.4/arch/x86/include/asm/irqflags.h 2011-08-05 19:44:33.000000000 -0400
7359 @@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
7360 sti; \
7361 sysexit
7362
7363 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
7364 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7365 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
7366 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7367 +
7368 #else
7369 #define INTERRUPT_RETURN iret
7370 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7371 diff -urNp linux-2.6.39.4/arch/x86/include/asm/kprobes.h linux-2.6.39.4/arch/x86/include/asm/kprobes.h
7372 --- linux-2.6.39.4/arch/x86/include/asm/kprobes.h 2011-05-19 00:06:34.000000000 -0400
7373 +++ linux-2.6.39.4/arch/x86/include/asm/kprobes.h 2011-08-05 19:44:33.000000000 -0400
7374 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7375 #define RELATIVEJUMP_SIZE 5
7376 #define RELATIVECALL_OPCODE 0xe8
7377 #define RELATIVE_ADDR_SIZE 4
7378 -#define MAX_STACK_SIZE 64
7379 -#define MIN_STACK_SIZE(ADDR) \
7380 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7381 - THREAD_SIZE - (unsigned long)(ADDR))) \
7382 - ? (MAX_STACK_SIZE) \
7383 - : (((unsigned long)current_thread_info()) + \
7384 - THREAD_SIZE - (unsigned long)(ADDR)))
7385 +#define MAX_STACK_SIZE 64UL
7386 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7387
7388 #define flush_insn_slot(p) do { } while (0)
7389
7390 diff -urNp linux-2.6.39.4/arch/x86/include/asm/kvm_host.h linux-2.6.39.4/arch/x86/include/asm/kvm_host.h
7391 --- linux-2.6.39.4/arch/x86/include/asm/kvm_host.h 2011-05-19 00:06:34.000000000 -0400
7392 +++ linux-2.6.39.4/arch/x86/include/asm/kvm_host.h 2011-08-05 20:34:06.000000000 -0400
7393 @@ -419,7 +419,7 @@ struct kvm_arch {
7394 unsigned int n_used_mmu_pages;
7395 unsigned int n_requested_mmu_pages;
7396 unsigned int n_max_mmu_pages;
7397 - atomic_t invlpg_counter;
7398 + atomic_unchecked_t invlpg_counter;
7399 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7400 /*
7401 * Hash table of struct kvm_mmu_page.
7402 @@ -589,7 +589,7 @@ struct kvm_x86_ops {
7403 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
7404
7405 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
7406 - const struct trace_print_flags *exit_reasons_str;
7407 + const struct trace_print_flags * const exit_reasons_str;
7408 };
7409
7410 struct kvm_arch_async_pf {
7411 diff -urNp linux-2.6.39.4/arch/x86/include/asm/local.h linux-2.6.39.4/arch/x86/include/asm/local.h
7412 --- linux-2.6.39.4/arch/x86/include/asm/local.h 2011-05-19 00:06:34.000000000 -0400
7413 +++ linux-2.6.39.4/arch/x86/include/asm/local.h 2011-08-05 19:44:33.000000000 -0400
7414 @@ -18,26 +18,58 @@ typedef struct {
7415
7416 static inline void local_inc(local_t *l)
7417 {
7418 - asm volatile(_ASM_INC "%0"
7419 + asm volatile(_ASM_INC "%0\n"
7420 +
7421 +#ifdef CONFIG_PAX_REFCOUNT
7422 + "jno 0f\n"
7423 + _ASM_DEC "%0\n"
7424 + "int $4\n0:\n"
7425 + _ASM_EXTABLE(0b, 0b)
7426 +#endif
7427 +
7428 : "+m" (l->a.counter));
7429 }
7430
7431 static inline void local_dec(local_t *l)
7432 {
7433 - asm volatile(_ASM_DEC "%0"
7434 + asm volatile(_ASM_DEC "%0\n"
7435 +
7436 +#ifdef CONFIG_PAX_REFCOUNT
7437 + "jno 0f\n"
7438 + _ASM_INC "%0\n"
7439 + "int $4\n0:\n"
7440 + _ASM_EXTABLE(0b, 0b)
7441 +#endif
7442 +
7443 : "+m" (l->a.counter));
7444 }
7445
7446 static inline void local_add(long i, local_t *l)
7447 {
7448 - asm volatile(_ASM_ADD "%1,%0"
7449 + asm volatile(_ASM_ADD "%1,%0\n"
7450 +
7451 +#ifdef CONFIG_PAX_REFCOUNT
7452 + "jno 0f\n"
7453 + _ASM_SUB "%1,%0\n"
7454 + "int $4\n0:\n"
7455 + _ASM_EXTABLE(0b, 0b)
7456 +#endif
7457 +
7458 : "+m" (l->a.counter)
7459 : "ir" (i));
7460 }
7461
7462 static inline void local_sub(long i, local_t *l)
7463 {
7464 - asm volatile(_ASM_SUB "%1,%0"
7465 + asm volatile(_ASM_SUB "%1,%0\n"
7466 +
7467 +#ifdef CONFIG_PAX_REFCOUNT
7468 + "jno 0f\n"
7469 + _ASM_ADD "%1,%0\n"
7470 + "int $4\n0:\n"
7471 + _ASM_EXTABLE(0b, 0b)
7472 +#endif
7473 +
7474 : "+m" (l->a.counter)
7475 : "ir" (i));
7476 }
7477 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7478 {
7479 unsigned char c;
7480
7481 - asm volatile(_ASM_SUB "%2,%0; sete %1"
7482 + asm volatile(_ASM_SUB "%2,%0\n"
7483 +
7484 +#ifdef CONFIG_PAX_REFCOUNT
7485 + "jno 0f\n"
7486 + _ASM_ADD "%2,%0\n"
7487 + "int $4\n0:\n"
7488 + _ASM_EXTABLE(0b, 0b)
7489 +#endif
7490 +
7491 + "sete %1\n"
7492 : "+m" (l->a.counter), "=qm" (c)
7493 : "ir" (i) : "memory");
7494 return c;
7495 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7496 {
7497 unsigned char c;
7498
7499 - asm volatile(_ASM_DEC "%0; sete %1"
7500 + asm volatile(_ASM_DEC "%0\n"
7501 +
7502 +#ifdef CONFIG_PAX_REFCOUNT
7503 + "jno 0f\n"
7504 + _ASM_INC "%0\n"
7505 + "int $4\n0:\n"
7506 + _ASM_EXTABLE(0b, 0b)
7507 +#endif
7508 +
7509 + "sete %1\n"
7510 : "+m" (l->a.counter), "=qm" (c)
7511 : : "memory");
7512 return c != 0;
7513 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7514 {
7515 unsigned char c;
7516
7517 - asm volatile(_ASM_INC "%0; sete %1"
7518 + asm volatile(_ASM_INC "%0\n"
7519 +
7520 +#ifdef CONFIG_PAX_REFCOUNT
7521 + "jno 0f\n"
7522 + _ASM_DEC "%0\n"
7523 + "int $4\n0:\n"
7524 + _ASM_EXTABLE(0b, 0b)
7525 +#endif
7526 +
7527 + "sete %1\n"
7528 : "+m" (l->a.counter), "=qm" (c)
7529 : : "memory");
7530 return c != 0;
7531 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7532 {
7533 unsigned char c;
7534
7535 - asm volatile(_ASM_ADD "%2,%0; sets %1"
7536 + asm volatile(_ASM_ADD "%2,%0\n"
7537 +
7538 +#ifdef CONFIG_PAX_REFCOUNT
7539 + "jno 0f\n"
7540 + _ASM_SUB "%2,%0\n"
7541 + "int $4\n0:\n"
7542 + _ASM_EXTABLE(0b, 0b)
7543 +#endif
7544 +
7545 + "sets %1\n"
7546 : "+m" (l->a.counter), "=qm" (c)
7547 : "ir" (i) : "memory");
7548 return c;
7549 @@ -133,7 +201,15 @@ static inline long local_add_return(long
7550 #endif
7551 /* Modern 486+ processor */
7552 __i = i;
7553 - asm volatile(_ASM_XADD "%0, %1;"
7554 + asm volatile(_ASM_XADD "%0, %1\n"
7555 +
7556 +#ifdef CONFIG_PAX_REFCOUNT
7557 + "jno 0f\n"
7558 + _ASM_MOV "%0,%1\n"
7559 + "int $4\n0:\n"
7560 + _ASM_EXTABLE(0b, 0b)
7561 +#endif
7562 +
7563 : "+r" (i), "+m" (l->a.counter)
7564 : : "memory");
7565 return i + __i;
7566 diff -urNp linux-2.6.39.4/arch/x86/include/asm/mman.h linux-2.6.39.4/arch/x86/include/asm/mman.h
7567 --- linux-2.6.39.4/arch/x86/include/asm/mman.h 2011-05-19 00:06:34.000000000 -0400
7568 +++ linux-2.6.39.4/arch/x86/include/asm/mman.h 2011-08-05 19:44:33.000000000 -0400
7569 @@ -5,4 +5,14 @@
7570
7571 #include <asm-generic/mman.h>
7572
7573 +#ifdef __KERNEL__
7574 +#ifndef __ASSEMBLY__
7575 +#ifdef CONFIG_X86_32
7576 +#define arch_mmap_check i386_mmap_check
7577 +int i386_mmap_check(unsigned long addr, unsigned long len,
7578 + unsigned long flags);
7579 +#endif
7580 +#endif
7581 +#endif
7582 +
7583 #endif /* _ASM_X86_MMAN_H */
7584 diff -urNp linux-2.6.39.4/arch/x86/include/asm/mmu_context.h linux-2.6.39.4/arch/x86/include/asm/mmu_context.h
7585 --- linux-2.6.39.4/arch/x86/include/asm/mmu_context.h 2011-05-19 00:06:34.000000000 -0400
7586 +++ linux-2.6.39.4/arch/x86/include/asm/mmu_context.h 2011-08-17 19:42:21.000000000 -0400
7587 @@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
7588
7589 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7590 {
7591 +
7592 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7593 + unsigned int i;
7594 + pgd_t *pgd;
7595 +
7596 + pax_open_kernel();
7597 + pgd = get_cpu_pgd(smp_processor_id());
7598 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7599 + if (paravirt_enabled())
7600 + set_pgd(pgd+i, native_make_pgd(0));
7601 + else
7602 + pgd[i] = native_make_pgd(0);
7603 + pax_close_kernel();
7604 +#endif
7605 +
7606 #ifdef CONFIG_SMP
7607 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7608 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7609 @@ -34,16 +49,30 @@ static inline void switch_mm(struct mm_s
7610 struct task_struct *tsk)
7611 {
7612 unsigned cpu = smp_processor_id();
7613 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
7614 + int tlbstate = TLBSTATE_OK;
7615 +#endif
7616
7617 if (likely(prev != next)) {
7618 #ifdef CONFIG_SMP
7619 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7620 + tlbstate = percpu_read(cpu_tlbstate.state);
7621 +#endif
7622 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7623 percpu_write(cpu_tlbstate.active_mm, next);
7624 #endif
7625 cpumask_set_cpu(cpu, mm_cpumask(next));
7626
7627 /* Re-load page tables */
7628 +#ifdef CONFIG_PAX_PER_CPU_PGD
7629 + pax_open_kernel();
7630 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7631 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7632 + pax_close_kernel();
7633 + load_cr3(get_cpu_pgd(cpu));
7634 +#else
7635 load_cr3(next->pgd);
7636 +#endif
7637
7638 /* stop flush ipis for the previous mm */
7639 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7640 @@ -53,9 +82,38 @@ static inline void switch_mm(struct mm_s
7641 */
7642 if (unlikely(prev->context.ldt != next->context.ldt))
7643 load_LDT_nolock(&next->context);
7644 - }
7645 +
7646 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7647 + if (!(__supported_pte_mask & _PAGE_NX)) {
7648 + smp_mb__before_clear_bit();
7649 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7650 + smp_mb__after_clear_bit();
7651 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7652 + }
7653 +#endif
7654 +
7655 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7656 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7657 + prev->context.user_cs_limit != next->context.user_cs_limit))
7658 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7659 #ifdef CONFIG_SMP
7660 + else if (unlikely(tlbstate != TLBSTATE_OK))
7661 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7662 +#endif
7663 +#endif
7664 +
7665 + }
7666 else {
7667 +
7668 +#ifdef CONFIG_PAX_PER_CPU_PGD
7669 + pax_open_kernel();
7670 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7671 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7672 + pax_close_kernel();
7673 + load_cr3(get_cpu_pgd(cpu));
7674 +#endif
7675 +
7676 +#ifdef CONFIG_SMP
7677 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7678 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7679
7680 @@ -64,11 +122,28 @@ static inline void switch_mm(struct mm_s
7681 * tlb flush IPI delivery. We must reload CR3
7682 * to make sure to use no freed page tables.
7683 */
7684 +
7685 +#ifndef CONFIG_PAX_PER_CPU_PGD
7686 load_cr3(next->pgd);
7687 +#endif
7688 +
7689 load_LDT_nolock(&next->context);
7690 +
7691 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7692 + if (!(__supported_pte_mask & _PAGE_NX))
7693 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7694 +#endif
7695 +
7696 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7697 +#ifdef CONFIG_PAX_PAGEEXEC
7698 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7699 +#endif
7700 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7701 +#endif
7702 +
7703 }
7704 - }
7705 #endif
7706 + }
7707 }
7708
7709 #define activate_mm(prev, next) \
7710 diff -urNp linux-2.6.39.4/arch/x86/include/asm/mmu.h linux-2.6.39.4/arch/x86/include/asm/mmu.h
7711 --- linux-2.6.39.4/arch/x86/include/asm/mmu.h 2011-05-19 00:06:34.000000000 -0400
7712 +++ linux-2.6.39.4/arch/x86/include/asm/mmu.h 2011-08-05 19:44:33.000000000 -0400
7713 @@ -9,10 +9,22 @@
7714 * we put the segment information here.
7715 */
7716 typedef struct {
7717 - void *ldt;
7718 + struct desc_struct *ldt;
7719 int size;
7720 struct mutex lock;
7721 - void *vdso;
7722 + unsigned long vdso;
7723 +
7724 +#ifdef CONFIG_X86_32
7725 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7726 + unsigned long user_cs_base;
7727 + unsigned long user_cs_limit;
7728 +
7729 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7730 + cpumask_t cpu_user_cs_mask;
7731 +#endif
7732 +
7733 +#endif
7734 +#endif
7735
7736 #ifdef CONFIG_X86_64
7737 /* True if mm supports a task running in 32 bit compatibility mode. */
7738 diff -urNp linux-2.6.39.4/arch/x86/include/asm/module.h linux-2.6.39.4/arch/x86/include/asm/module.h
7739 --- linux-2.6.39.4/arch/x86/include/asm/module.h 2011-05-19 00:06:34.000000000 -0400
7740 +++ linux-2.6.39.4/arch/x86/include/asm/module.h 2011-08-05 19:44:33.000000000 -0400
7741 @@ -5,6 +5,7 @@
7742
7743 #ifdef CONFIG_X86_64
7744 /* X86_64 does not define MODULE_PROC_FAMILY */
7745 +#define MODULE_PROC_FAMILY ""
7746 #elif defined CONFIG_M386
7747 #define MODULE_PROC_FAMILY "386 "
7748 #elif defined CONFIG_M486
7749 @@ -59,8 +60,30 @@
7750 #error unknown processor family
7751 #endif
7752
7753 -#ifdef CONFIG_X86_32
7754 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7755 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7756 +#define MODULE_PAX_UDEREF "UDEREF "
7757 +#else
7758 +#define MODULE_PAX_UDEREF ""
7759 +#endif
7760 +
7761 +#ifdef CONFIG_PAX_KERNEXEC
7762 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
7763 +#else
7764 +#define MODULE_PAX_KERNEXEC ""
7765 #endif
7766
7767 +#ifdef CONFIG_PAX_REFCOUNT
7768 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
7769 +#else
7770 +#define MODULE_PAX_REFCOUNT ""
7771 +#endif
7772 +
7773 +#ifdef CONFIG_GRKERNSEC
7774 +#define MODULE_GRSEC "GRSECURITY "
7775 +#else
7776 +#define MODULE_GRSEC ""
7777 +#endif
7778 +
7779 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
7780 +
7781 #endif /* _ASM_X86_MODULE_H */
7782 diff -urNp linux-2.6.39.4/arch/x86/include/asm/page_64_types.h linux-2.6.39.4/arch/x86/include/asm/page_64_types.h
7783 --- linux-2.6.39.4/arch/x86/include/asm/page_64_types.h 2011-05-19 00:06:34.000000000 -0400
7784 +++ linux-2.6.39.4/arch/x86/include/asm/page_64_types.h 2011-08-05 19:44:33.000000000 -0400
7785 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7786
7787 /* duplicated to the one in bootmem.h */
7788 extern unsigned long max_pfn;
7789 -extern unsigned long phys_base;
7790 +extern const unsigned long phys_base;
7791
7792 extern unsigned long __phys_addr(unsigned long);
7793 #define __phys_reloc_hide(x) (x)
7794 diff -urNp linux-2.6.39.4/arch/x86/include/asm/paravirt.h linux-2.6.39.4/arch/x86/include/asm/paravirt.h
7795 --- linux-2.6.39.4/arch/x86/include/asm/paravirt.h 2011-05-19 00:06:34.000000000 -0400
7796 +++ linux-2.6.39.4/arch/x86/include/asm/paravirt.h 2011-08-05 19:44:33.000000000 -0400
7797 @@ -739,6 +739,21 @@ static inline void __set_fixmap(unsigned
7798 pv_mmu_ops.set_fixmap(idx, phys, flags);
7799 }
7800
7801 +#ifdef CONFIG_PAX_KERNEXEC
7802 +static inline unsigned long pax_open_kernel(void)
7803 +{
7804 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7805 +}
7806 +
7807 +static inline unsigned long pax_close_kernel(void)
7808 +{
7809 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7810 +}
7811 +#else
7812 +static inline unsigned long pax_open_kernel(void) { return 0; }
7813 +static inline unsigned long pax_close_kernel(void) { return 0; }
7814 +#endif
7815 +
7816 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7817
7818 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7819 @@ -955,7 +970,7 @@ extern void default_banner(void);
7820
7821 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
7822 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7823 -#define PARA_INDIRECT(addr) *%cs:addr
7824 +#define PARA_INDIRECT(addr) *%ss:addr
7825 #endif
7826
7827 #define INTERRUPT_RETURN \
7828 @@ -1032,6 +1047,21 @@ extern void default_banner(void);
7829 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
7830 CLBR_NONE, \
7831 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
7832 +
7833 +#define GET_CR0_INTO_RDI \
7834 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
7835 + mov %rax,%rdi
7836 +
7837 +#define SET_RDI_INTO_CR0 \
7838 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
7839 +
7840 +#define GET_CR3_INTO_RDI \
7841 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
7842 + mov %rax,%rdi
7843 +
7844 +#define SET_RDI_INTO_CR3 \
7845 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
7846 +
7847 #endif /* CONFIG_X86_32 */
7848
7849 #endif /* __ASSEMBLY__ */
7850 diff -urNp linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h
7851 --- linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h 2011-05-19 00:06:34.000000000 -0400
7852 +++ linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h 2011-08-05 20:34:06.000000000 -0400
7853 @@ -78,19 +78,19 @@ struct pv_init_ops {
7854 */
7855 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
7856 unsigned long addr, unsigned len);
7857 -};
7858 +} __no_const;
7859
7860
7861 struct pv_lazy_ops {
7862 /* Set deferred update mode, used for batching operations. */
7863 void (*enter)(void);
7864 void (*leave)(void);
7865 -};
7866 +} __no_const;
7867
7868 struct pv_time_ops {
7869 unsigned long long (*sched_clock)(void);
7870 unsigned long (*get_tsc_khz)(void);
7871 -};
7872 +} __no_const;
7873
7874 struct pv_cpu_ops {
7875 /* hooks for various privileged instructions */
7876 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
7877
7878 void (*start_context_switch)(struct task_struct *prev);
7879 void (*end_context_switch)(struct task_struct *next);
7880 -};
7881 +} __no_const;
7882
7883 struct pv_irq_ops {
7884 /*
7885 @@ -217,7 +217,7 @@ struct pv_apic_ops {
7886 unsigned long start_eip,
7887 unsigned long start_esp);
7888 #endif
7889 -};
7890 +} __no_const;
7891
7892 struct pv_mmu_ops {
7893 unsigned long (*read_cr2)(void);
7894 @@ -317,6 +317,12 @@ struct pv_mmu_ops {
7895 an mfn. We can tell which is which from the index. */
7896 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
7897 phys_addr_t phys, pgprot_t flags);
7898 +
7899 +#ifdef CONFIG_PAX_KERNEXEC
7900 + unsigned long (*pax_open_kernel)(void);
7901 + unsigned long (*pax_close_kernel)(void);
7902 +#endif
7903 +
7904 };
7905
7906 struct arch_spinlock;
7907 @@ -327,7 +333,7 @@ struct pv_lock_ops {
7908 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
7909 int (*spin_trylock)(struct arch_spinlock *lock);
7910 void (*spin_unlock)(struct arch_spinlock *lock);
7911 -};
7912 +} __no_const;
7913
7914 /* This contains all the paravirt structures: we get a convenient
7915 * number for each function using the offset which we use to indicate
7916 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgalloc.h linux-2.6.39.4/arch/x86/include/asm/pgalloc.h
7917 --- linux-2.6.39.4/arch/x86/include/asm/pgalloc.h 2011-05-19 00:06:34.000000000 -0400
7918 +++ linux-2.6.39.4/arch/x86/include/asm/pgalloc.h 2011-08-05 19:44:33.000000000 -0400
7919 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
7920 pmd_t *pmd, pte_t *pte)
7921 {
7922 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7923 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
7924 +}
7925 +
7926 +static inline void pmd_populate_user(struct mm_struct *mm,
7927 + pmd_t *pmd, pte_t *pte)
7928 +{
7929 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7930 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
7931 }
7932
7933 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h
7934 --- linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h 2011-05-19 00:06:34.000000000 -0400
7935 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h 2011-08-05 19:44:33.000000000 -0400
7936 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
7937
7938 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
7939 {
7940 + pax_open_kernel();
7941 *pmdp = pmd;
7942 + pax_close_kernel();
7943 }
7944
7945 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
7946 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h
7947 --- linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h 2011-05-19 00:06:34.000000000 -0400
7948 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h 2011-08-05 19:44:33.000000000 -0400
7949 @@ -25,9 +25,6 @@
7950 struct mm_struct;
7951 struct vm_area_struct;
7952
7953 -extern pgd_t swapper_pg_dir[1024];
7954 -extern pgd_t initial_page_table[1024];
7955 -
7956 static inline void pgtable_cache_init(void) { }
7957 static inline void check_pgt_cache(void) { }
7958 void paging_init(void);
7959 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
7960 # include <asm/pgtable-2level.h>
7961 #endif
7962
7963 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
7964 +extern pgd_t initial_page_table[PTRS_PER_PGD];
7965 +#ifdef CONFIG_X86_PAE
7966 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
7967 +#endif
7968 +
7969 #if defined(CONFIG_HIGHPTE)
7970 #define pte_offset_map(dir, address) \
7971 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
7972 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
7973 /* Clear a kernel PTE and flush it from the TLB */
7974 #define kpte_clear_flush(ptep, vaddr) \
7975 do { \
7976 + pax_open_kernel(); \
7977 pte_clear(&init_mm, (vaddr), (ptep)); \
7978 + pax_close_kernel(); \
7979 __flush_tlb_one((vaddr)); \
7980 } while (0)
7981
7982 @@ -74,6 +79,9 @@ do { \
7983
7984 #endif /* !__ASSEMBLY__ */
7985
7986 +#define HAVE_ARCH_UNMAPPED_AREA
7987 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
7988 +
7989 /*
7990 * kern_addr_valid() is (1) for FLATMEM and (0) for
7991 * SPARSEMEM and DISCONTIGMEM
7992 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h
7993 --- linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h 2011-05-19 00:06:34.000000000 -0400
7994 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h 2011-08-05 19:44:33.000000000 -0400
7995 @@ -8,7 +8,7 @@
7996 */
7997 #ifdef CONFIG_X86_PAE
7998 # include <asm/pgtable-3level_types.h>
7999 -# define PMD_SIZE (1UL << PMD_SHIFT)
8000 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
8001 # define PMD_MASK (~(PMD_SIZE - 1))
8002 #else
8003 # include <asm/pgtable-2level_types.h>
8004 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
8005 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
8006 #endif
8007
8008 +#ifdef CONFIG_PAX_KERNEXEC
8009 +#ifndef __ASSEMBLY__
8010 +extern unsigned char MODULES_EXEC_VADDR[];
8011 +extern unsigned char MODULES_EXEC_END[];
8012 +#endif
8013 +#include <asm/boot.h>
8014 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
8015 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
8016 +#else
8017 +#define ktla_ktva(addr) (addr)
8018 +#define ktva_ktla(addr) (addr)
8019 +#endif
8020 +
8021 #define MODULES_VADDR VMALLOC_START
8022 #define MODULES_END VMALLOC_END
8023 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8024 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h
8025 --- linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h 2011-05-19 00:06:34.000000000 -0400
8026 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h 2011-08-05 19:44:33.000000000 -0400
8027 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8028
8029 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8030 {
8031 + pax_open_kernel();
8032 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8033 + pax_close_kernel();
8034 }
8035
8036 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8037 {
8038 + pax_open_kernel();
8039 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8040 + pax_close_kernel();
8041 }
8042
8043 /*
8044 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h
8045 --- linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h 2011-05-19 00:06:34.000000000 -0400
8046 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h 2011-08-05 19:44:33.000000000 -0400
8047 @@ -16,10 +16,13 @@
8048
8049 extern pud_t level3_kernel_pgt[512];
8050 extern pud_t level3_ident_pgt[512];
8051 +extern pud_t level3_vmalloc_pgt[512];
8052 +extern pud_t level3_vmemmap_pgt[512];
8053 +extern pud_t level2_vmemmap_pgt[512];
8054 extern pmd_t level2_kernel_pgt[512];
8055 extern pmd_t level2_fixmap_pgt[512];
8056 -extern pmd_t level2_ident_pgt[512];
8057 -extern pgd_t init_level4_pgt[];
8058 +extern pmd_t level2_ident_pgt[512*2];
8059 +extern pgd_t init_level4_pgt[512];
8060
8061 #define swapper_pg_dir init_level4_pgt
8062
8063 @@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8064
8065 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8066 {
8067 + pax_open_kernel();
8068 *pmdp = pmd;
8069 + pax_close_kernel();
8070 }
8071
8072 static inline void native_pmd_clear(pmd_t *pmd)
8073 @@ -107,7 +112,9 @@ static inline void native_pud_clear(pud_
8074
8075 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8076 {
8077 + pax_open_kernel();
8078 *pgdp = pgd;
8079 + pax_close_kernel();
8080 }
8081
8082 static inline void native_pgd_clear(pgd_t *pgd)
8083 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h
8084 --- linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h 2011-05-19 00:06:34.000000000 -0400
8085 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h 2011-08-05 19:44:33.000000000 -0400
8086 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8087 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8088 #define MODULES_END _AC(0xffffffffff000000, UL)
8089 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8090 +#define MODULES_EXEC_VADDR MODULES_VADDR
8091 +#define MODULES_EXEC_END MODULES_END
8092 +
8093 +#define ktla_ktva(addr) (addr)
8094 +#define ktva_ktla(addr) (addr)
8095
8096 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8097 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable.h linux-2.6.39.4/arch/x86/include/asm/pgtable.h
8098 --- linux-2.6.39.4/arch/x86/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
8099 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
8100 @@ -81,12 +81,51 @@ extern struct mm_struct *pgd_page_get_mm
8101
8102 #define arch_end_context_switch(prev) do {} while(0)
8103
8104 +#define pax_open_kernel() native_pax_open_kernel()
8105 +#define pax_close_kernel() native_pax_close_kernel()
8106 #endif /* CONFIG_PARAVIRT */
8107
8108 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
8109 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8110 +
8111 +#ifdef CONFIG_PAX_KERNEXEC
8112 +static inline unsigned long native_pax_open_kernel(void)
8113 +{
8114 + unsigned long cr0;
8115 +
8116 + preempt_disable();
8117 + barrier();
8118 + cr0 = read_cr0() ^ X86_CR0_WP;
8119 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
8120 + write_cr0(cr0);
8121 + return cr0 ^ X86_CR0_WP;
8122 +}
8123 +
8124 +static inline unsigned long native_pax_close_kernel(void)
8125 +{
8126 + unsigned long cr0;
8127 +
8128 + cr0 = read_cr0() ^ X86_CR0_WP;
8129 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8130 + write_cr0(cr0);
8131 + barrier();
8132 + preempt_enable_no_resched();
8133 + return cr0 ^ X86_CR0_WP;
8134 +}
8135 +#else
8136 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
8137 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
8138 +#endif
8139 +
8140 /*
8141 * The following only work if pte_present() is true.
8142 * Undefined behaviour if not..
8143 */
8144 +static inline int pte_user(pte_t pte)
8145 +{
8146 + return pte_val(pte) & _PAGE_USER;
8147 +}
8148 +
8149 static inline int pte_dirty(pte_t pte)
8150 {
8151 return pte_flags(pte) & _PAGE_DIRTY;
8152 @@ -196,9 +235,29 @@ static inline pte_t pte_wrprotect(pte_t
8153 return pte_clear_flags(pte, _PAGE_RW);
8154 }
8155
8156 +static inline pte_t pte_mkread(pte_t pte)
8157 +{
8158 + return __pte(pte_val(pte) | _PAGE_USER);
8159 +}
8160 +
8161 static inline pte_t pte_mkexec(pte_t pte)
8162 {
8163 - return pte_clear_flags(pte, _PAGE_NX);
8164 +#ifdef CONFIG_X86_PAE
8165 + if (__supported_pte_mask & _PAGE_NX)
8166 + return pte_clear_flags(pte, _PAGE_NX);
8167 + else
8168 +#endif
8169 + return pte_set_flags(pte, _PAGE_USER);
8170 +}
8171 +
8172 +static inline pte_t pte_exprotect(pte_t pte)
8173 +{
8174 +#ifdef CONFIG_X86_PAE
8175 + if (__supported_pte_mask & _PAGE_NX)
8176 + return pte_set_flags(pte, _PAGE_NX);
8177 + else
8178 +#endif
8179 + return pte_clear_flags(pte, _PAGE_USER);
8180 }
8181
8182 static inline pte_t pte_mkdirty(pte_t pte)
8183 @@ -390,6 +449,15 @@ pte_t *populate_extra_pte(unsigned long
8184 #endif
8185
8186 #ifndef __ASSEMBLY__
8187 +
8188 +#ifdef CONFIG_PAX_PER_CPU_PGD
8189 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8190 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8191 +{
8192 + return cpu_pgd[cpu];
8193 +}
8194 +#endif
8195 +
8196 #include <linux/mm_types.h>
8197
8198 static inline int pte_none(pte_t pte)
8199 @@ -560,7 +628,7 @@ static inline pud_t *pud_offset(pgd_t *p
8200
8201 static inline int pgd_bad(pgd_t pgd)
8202 {
8203 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8204 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8205 }
8206
8207 static inline int pgd_none(pgd_t pgd)
8208 @@ -583,7 +651,12 @@ static inline int pgd_none(pgd_t pgd)
8209 * pgd_offset() returns a (pgd_t *)
8210 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8211 */
8212 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8213 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8214 +
8215 +#ifdef CONFIG_PAX_PER_CPU_PGD
8216 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8217 +#endif
8218 +
8219 /*
8220 * a shortcut which implies the use of the kernel's pgd, instead
8221 * of a process's
8222 @@ -594,6 +667,20 @@ static inline int pgd_none(pgd_t pgd)
8223 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8224 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8225
8226 +#ifdef CONFIG_X86_32
8227 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8228 +#else
8229 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8230 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8231 +
8232 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8233 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8234 +#else
8235 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8236 +#endif
8237 +
8238 +#endif
8239 +
8240 #ifndef __ASSEMBLY__
8241
8242 extern int direct_gbpages;
8243 @@ -758,11 +845,23 @@ static inline void pmdp_set_wrprotect(st
8244 * dst and src can be on the same page, but the range must not overlap,
8245 * and must not cross a page boundary.
8246 */
8247 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8248 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8249 {
8250 - memcpy(dst, src, count * sizeof(pgd_t));
8251 + pax_open_kernel();
8252 + while (count--)
8253 + *dst++ = *src++;
8254 + pax_close_kernel();
8255 }
8256
8257 +#ifdef CONFIG_PAX_PER_CPU_PGD
8258 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8259 +#endif
8260 +
8261 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8262 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8263 +#else
8264 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8265 +#endif
8266
8267 #include <asm-generic/pgtable.h>
8268 #endif /* __ASSEMBLY__ */
8269 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h
8270 --- linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h 2011-05-19 00:06:34.000000000 -0400
8271 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h 2011-08-05 19:44:33.000000000 -0400
8272 @@ -16,13 +16,12 @@
8273 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8274 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8275 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8276 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8277 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8278 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8279 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8280 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8281 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8282 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8283 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8284 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8285 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8286 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8287
8288 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8289 @@ -40,7 +39,6 @@
8290 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8291 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8292 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8293 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8294 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8295 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8296 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8297 @@ -57,8 +55,10 @@
8298
8299 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8300 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8301 -#else
8302 +#elif defined(CONFIG_KMEMCHECK)
8303 #define _PAGE_NX (_AT(pteval_t, 0))
8304 +#else
8305 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8306 #endif
8307
8308 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8309 @@ -96,6 +96,9 @@
8310 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8311 _PAGE_ACCESSED)
8312
8313 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
8314 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
8315 +
8316 #define __PAGE_KERNEL_EXEC \
8317 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8318 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8319 @@ -106,8 +109,8 @@
8320 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8321 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8322 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8323 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8324 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8325 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8326 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8327 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8328 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8329 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8330 @@ -166,8 +169,8 @@
8331 * bits are combined, this will alow user to access the high address mapped
8332 * VDSO in the presence of CONFIG_COMPAT_VDSO
8333 */
8334 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8335 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8336 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8337 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8338 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8339 #endif
8340
8341 @@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8342 {
8343 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8344 }
8345 +#endif
8346
8347 +#if PAGETABLE_LEVELS == 3
8348 +#include <asm-generic/pgtable-nopud.h>
8349 +#endif
8350 +
8351 +#if PAGETABLE_LEVELS == 2
8352 +#include <asm-generic/pgtable-nopmd.h>
8353 +#endif
8354 +
8355 +#ifndef __ASSEMBLY__
8356 #if PAGETABLE_LEVELS > 3
8357 typedef struct { pudval_t pud; } pud_t;
8358
8359 @@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu
8360 return pud.pud;
8361 }
8362 #else
8363 -#include <asm-generic/pgtable-nopud.h>
8364 -
8365 static inline pudval_t native_pud_val(pud_t pud)
8366 {
8367 return native_pgd_val(pud.pgd);
8368 @@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm
8369 return pmd.pmd;
8370 }
8371 #else
8372 -#include <asm-generic/pgtable-nopmd.h>
8373 -
8374 static inline pmdval_t native_pmd_val(pmd_t pmd)
8375 {
8376 return native_pgd_val(pmd.pud.pgd);
8377 @@ -281,7 +290,6 @@ typedef struct page *pgtable_t;
8378
8379 extern pteval_t __supported_pte_mask;
8380 extern void set_nx(void);
8381 -extern int nx_enabled;
8382
8383 #define pgprot_writecombine pgprot_writecombine
8384 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8385 diff -urNp linux-2.6.39.4/arch/x86/include/asm/processor.h linux-2.6.39.4/arch/x86/include/asm/processor.h
8386 --- linux-2.6.39.4/arch/x86/include/asm/processor.h 2011-05-19 00:06:34.000000000 -0400
8387 +++ linux-2.6.39.4/arch/x86/include/asm/processor.h 2011-08-05 19:44:33.000000000 -0400
8388 @@ -266,7 +266,7 @@ struct tss_struct {
8389
8390 } ____cacheline_aligned;
8391
8392 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8393 +extern struct tss_struct init_tss[NR_CPUS];
8394
8395 /*
8396 * Save the original ist values for checking stack pointers during debugging
8397 @@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co
8398 */
8399 #define TASK_SIZE PAGE_OFFSET
8400 #define TASK_SIZE_MAX TASK_SIZE
8401 +
8402 +#ifdef CONFIG_PAX_SEGMEXEC
8403 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8404 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8405 +#else
8406 #define STACK_TOP TASK_SIZE
8407 -#define STACK_TOP_MAX STACK_TOP
8408 +#endif
8409 +
8410 +#define STACK_TOP_MAX TASK_SIZE
8411
8412 #define INIT_THREAD { \
8413 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8414 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8415 .vm86_info = NULL, \
8416 .sysenter_cs = __KERNEL_CS, \
8417 .io_bitmap_ptr = NULL, \
8418 @@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co
8419 */
8420 #define INIT_TSS { \
8421 .x86_tss = { \
8422 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8423 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8424 .ss0 = __KERNEL_DS, \
8425 .ss1 = __KERNEL_CS, \
8426 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8427 @@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co
8428 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8429
8430 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8431 -#define KSTK_TOP(info) \
8432 -({ \
8433 - unsigned long *__ptr = (unsigned long *)(info); \
8434 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8435 -})
8436 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8437
8438 /*
8439 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8440 @@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str
8441 #define task_pt_regs(task) \
8442 ({ \
8443 struct pt_regs *__regs__; \
8444 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8445 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8446 __regs__ - 1; \
8447 })
8448
8449 @@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str
8450 /*
8451 * User space process size. 47bits minus one guard page.
8452 */
8453 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8454 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8455
8456 /* This decides where the kernel will search for a free chunk of vm
8457 * space during mmap's.
8458 */
8459 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8460 - 0xc0000000 : 0xFFFFe000)
8461 + 0xc0000000 : 0xFFFFf000)
8462
8463 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8464 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8465 @@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str
8466 #define STACK_TOP_MAX TASK_SIZE_MAX
8467
8468 #define INIT_THREAD { \
8469 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8470 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8471 }
8472
8473 #define INIT_TSS { \
8474 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8475 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8476 }
8477
8478 /*
8479 @@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs
8480 */
8481 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8482
8483 +#ifdef CONFIG_PAX_SEGMEXEC
8484 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8485 +#endif
8486 +
8487 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8488
8489 /* Get/set a process' ability to use the timestamp counter instruction */
8490 diff -urNp linux-2.6.39.4/arch/x86/include/asm/ptrace.h linux-2.6.39.4/arch/x86/include/asm/ptrace.h
8491 --- linux-2.6.39.4/arch/x86/include/asm/ptrace.h 2011-05-19 00:06:34.000000000 -0400
8492 +++ linux-2.6.39.4/arch/x86/include/asm/ptrace.h 2011-08-05 19:44:33.000000000 -0400
8493 @@ -152,28 +152,29 @@ static inline unsigned long regs_return_
8494 }
8495
8496 /*
8497 - * user_mode_vm(regs) determines whether a register set came from user mode.
8498 + * user_mode(regs) determines whether a register set came from user mode.
8499 * This is true if V8086 mode was enabled OR if the register set was from
8500 * protected mode with RPL-3 CS value. This tricky test checks that with
8501 * one comparison. Many places in the kernel can bypass this full check
8502 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8503 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8504 + * be used.
8505 */
8506 -static inline int user_mode(struct pt_regs *regs)
8507 +static inline int user_mode_novm(struct pt_regs *regs)
8508 {
8509 #ifdef CONFIG_X86_32
8510 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8511 #else
8512 - return !!(regs->cs & 3);
8513 + return !!(regs->cs & SEGMENT_RPL_MASK);
8514 #endif
8515 }
8516
8517 -static inline int user_mode_vm(struct pt_regs *regs)
8518 +static inline int user_mode(struct pt_regs *regs)
8519 {
8520 #ifdef CONFIG_X86_32
8521 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8522 USER_RPL;
8523 #else
8524 - return user_mode(regs);
8525 + return user_mode_novm(regs);
8526 #endif
8527 }
8528
8529 diff -urNp linux-2.6.39.4/arch/x86/include/asm/reboot.h linux-2.6.39.4/arch/x86/include/asm/reboot.h
8530 --- linux-2.6.39.4/arch/x86/include/asm/reboot.h 2011-05-19 00:06:34.000000000 -0400
8531 +++ linux-2.6.39.4/arch/x86/include/asm/reboot.h 2011-08-05 20:34:06.000000000 -0400
8532 @@ -6,19 +6,19 @@
8533 struct pt_regs;
8534
8535 struct machine_ops {
8536 - void (*restart)(char *cmd);
8537 - void (*halt)(void);
8538 - void (*power_off)(void);
8539 + void (* __noreturn restart)(char *cmd);
8540 + void (* __noreturn halt)(void);
8541 + void (* __noreturn power_off)(void);
8542 void (*shutdown)(void);
8543 void (*crash_shutdown)(struct pt_regs *);
8544 - void (*emergency_restart)(void);
8545 -};
8546 + void (* __noreturn emergency_restart)(void);
8547 +} __no_const;
8548
8549 extern struct machine_ops machine_ops;
8550
8551 void native_machine_crash_shutdown(struct pt_regs *regs);
8552 void native_machine_shutdown(void);
8553 -void machine_real_restart(unsigned int type);
8554 +void machine_real_restart(unsigned int type) __noreturn;
8555 /* These must match dispatch_table in reboot_32.S */
8556 #define MRR_BIOS 0
8557 #define MRR_APM 1
8558 diff -urNp linux-2.6.39.4/arch/x86/include/asm/rwsem.h linux-2.6.39.4/arch/x86/include/asm/rwsem.h
8559 --- linux-2.6.39.4/arch/x86/include/asm/rwsem.h 2011-05-19 00:06:34.000000000 -0400
8560 +++ linux-2.6.39.4/arch/x86/include/asm/rwsem.h 2011-08-05 19:44:33.000000000 -0400
8561 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8562 {
8563 asm volatile("# beginning down_read\n\t"
8564 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8565 +
8566 +#ifdef CONFIG_PAX_REFCOUNT
8567 + "jno 0f\n"
8568 + LOCK_PREFIX _ASM_DEC "(%1)\n"
8569 + "int $4\n0:\n"
8570 + _ASM_EXTABLE(0b, 0b)
8571 +#endif
8572 +
8573 /* adds 0x00000001 */
8574 " jns 1f\n"
8575 " call call_rwsem_down_read_failed\n"
8576 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8577 "1:\n\t"
8578 " mov %1,%2\n\t"
8579 " add %3,%2\n\t"
8580 +
8581 +#ifdef CONFIG_PAX_REFCOUNT
8582 + "jno 0f\n"
8583 + "sub %3,%2\n"
8584 + "int $4\n0:\n"
8585 + _ASM_EXTABLE(0b, 0b)
8586 +#endif
8587 +
8588 " jle 2f\n\t"
8589 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8590 " jnz 1b\n\t"
8591 @@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8592 long tmp;
8593 asm volatile("# beginning down_write\n\t"
8594 LOCK_PREFIX " xadd %1,(%2)\n\t"
8595 +
8596 +#ifdef CONFIG_PAX_REFCOUNT
8597 + "jno 0f\n"
8598 + "mov %1,(%2)\n"
8599 + "int $4\n0:\n"
8600 + _ASM_EXTABLE(0b, 0b)
8601 +#endif
8602 +
8603 /* adds 0xffff0001, returns the old value */
8604 " test %1,%1\n\t"
8605 /* was the count 0 before? */
8606 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8607 long tmp;
8608 asm volatile("# beginning __up_read\n\t"
8609 LOCK_PREFIX " xadd %1,(%2)\n\t"
8610 +
8611 +#ifdef CONFIG_PAX_REFCOUNT
8612 + "jno 0f\n"
8613 + "mov %1,(%2)\n"
8614 + "int $4\n0:\n"
8615 + _ASM_EXTABLE(0b, 0b)
8616 +#endif
8617 +
8618 /* subtracts 1, returns the old value */
8619 " jns 1f\n\t"
8620 " call call_rwsem_wake\n" /* expects old value in %edx */
8621 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8622 long tmp;
8623 asm volatile("# beginning __up_write\n\t"
8624 LOCK_PREFIX " xadd %1,(%2)\n\t"
8625 +
8626 +#ifdef CONFIG_PAX_REFCOUNT
8627 + "jno 0f\n"
8628 + "mov %1,(%2)\n"
8629 + "int $4\n0:\n"
8630 + _ASM_EXTABLE(0b, 0b)
8631 +#endif
8632 +
8633 /* subtracts 0xffff0001, returns the old value */
8634 " jns 1f\n\t"
8635 " call call_rwsem_wake\n" /* expects old value in %edx */
8636 @@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8637 {
8638 asm volatile("# beginning __downgrade_write\n\t"
8639 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8640 +
8641 +#ifdef CONFIG_PAX_REFCOUNT
8642 + "jno 0f\n"
8643 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8644 + "int $4\n0:\n"
8645 + _ASM_EXTABLE(0b, 0b)
8646 +#endif
8647 +
8648 /*
8649 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8650 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8651 @@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8652 */
8653 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8654 {
8655 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8656 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8657 +
8658 +#ifdef CONFIG_PAX_REFCOUNT
8659 + "jno 0f\n"
8660 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
8661 + "int $4\n0:\n"
8662 + _ASM_EXTABLE(0b, 0b)
8663 +#endif
8664 +
8665 : "+m" (sem->count)
8666 : "er" (delta));
8667 }
8668 @@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8669 {
8670 long tmp = delta;
8671
8672 - asm volatile(LOCK_PREFIX "xadd %0,%1"
8673 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8674 +
8675 +#ifdef CONFIG_PAX_REFCOUNT
8676 + "jno 0f\n"
8677 + "mov %0,%1\n"
8678 + "int $4\n0:\n"
8679 + _ASM_EXTABLE(0b, 0b)
8680 +#endif
8681 +
8682 : "+r" (tmp), "+m" (sem->count)
8683 : : "memory");
8684
8685 diff -urNp linux-2.6.39.4/arch/x86/include/asm/segment.h linux-2.6.39.4/arch/x86/include/asm/segment.h
8686 --- linux-2.6.39.4/arch/x86/include/asm/segment.h 2011-05-19 00:06:34.000000000 -0400
8687 +++ linux-2.6.39.4/arch/x86/include/asm/segment.h 2011-08-05 19:44:33.000000000 -0400
8688 @@ -64,8 +64,8 @@
8689 * 26 - ESPFIX small SS
8690 * 27 - per-cpu [ offset to per-cpu data area ]
8691 * 28 - stack_canary-20 [ for stack protector ]
8692 - * 29 - unused
8693 - * 30 - unused
8694 + * 29 - PCI BIOS CS
8695 + * 30 - PCI BIOS DS
8696 * 31 - TSS for double fault handler
8697 */
8698 #define GDT_ENTRY_TLS_MIN 6
8699 @@ -79,6 +79,8 @@
8700
8701 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8702
8703 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8704 +
8705 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8706
8707 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8708 @@ -104,6 +106,12 @@
8709 #define __KERNEL_STACK_CANARY 0
8710 #endif
8711
8712 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8713 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8714 +
8715 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8716 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8717 +
8718 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8719
8720 /*
8721 @@ -141,7 +149,7 @@
8722 */
8723
8724 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8725 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8726 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8727
8728
8729 #else
8730 @@ -165,6 +173,8 @@
8731 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
8732 #define __USER32_DS __USER_DS
8733
8734 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8735 +
8736 #define GDT_ENTRY_TSS 8 /* needs two entries */
8737 #define GDT_ENTRY_LDT 10 /* needs two entries */
8738 #define GDT_ENTRY_TLS_MIN 12
8739 @@ -185,6 +195,7 @@
8740 #endif
8741
8742 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8743 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8744 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8745 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8746 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8747 diff -urNp linux-2.6.39.4/arch/x86/include/asm/smp.h linux-2.6.39.4/arch/x86/include/asm/smp.h
8748 --- linux-2.6.39.4/arch/x86/include/asm/smp.h 2011-05-19 00:06:34.000000000 -0400
8749 +++ linux-2.6.39.4/arch/x86/include/asm/smp.h 2011-08-05 20:34:06.000000000 -0400
8750 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8751 /* cpus sharing the last level cache: */
8752 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8753 DECLARE_PER_CPU(u16, cpu_llc_id);
8754 -DECLARE_PER_CPU(int, cpu_number);
8755 +DECLARE_PER_CPU(unsigned int, cpu_number);
8756
8757 static inline struct cpumask *cpu_sibling_mask(int cpu)
8758 {
8759 @@ -77,7 +77,7 @@ struct smp_ops {
8760
8761 void (*send_call_func_ipi)(const struct cpumask *mask);
8762 void (*send_call_func_single_ipi)(int cpu);
8763 -};
8764 +} __no_const;
8765
8766 /* Globals due to paravirt */
8767 extern void set_cpu_sibling_map(int cpu);
8768 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
8769 extern int safe_smp_processor_id(void);
8770
8771 #elif defined(CONFIG_X86_64_SMP)
8772 -#define raw_smp_processor_id() (percpu_read(cpu_number))
8773 -
8774 -#define stack_smp_processor_id() \
8775 -({ \
8776 - struct thread_info *ti; \
8777 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
8778 - ti->cpu; \
8779 -})
8780 +#define raw_smp_processor_id() (percpu_read(cpu_number))
8781 +#define stack_smp_processor_id() raw_smp_processor_id()
8782 #define safe_smp_processor_id() smp_processor_id()
8783
8784 #endif
8785 diff -urNp linux-2.6.39.4/arch/x86/include/asm/spinlock.h linux-2.6.39.4/arch/x86/include/asm/spinlock.h
8786 --- linux-2.6.39.4/arch/x86/include/asm/spinlock.h 2011-05-19 00:06:34.000000000 -0400
8787 +++ linux-2.6.39.4/arch/x86/include/asm/spinlock.h 2011-08-05 19:44:33.000000000 -0400
8788 @@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
8789 static inline void arch_read_lock(arch_rwlock_t *rw)
8790 {
8791 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
8792 +
8793 +#ifdef CONFIG_PAX_REFCOUNT
8794 + "jno 0f\n"
8795 + LOCK_PREFIX " addl $1,(%0)\n"
8796 + "int $4\n0:\n"
8797 + _ASM_EXTABLE(0b, 0b)
8798 +#endif
8799 +
8800 "jns 1f\n"
8801 "call __read_lock_failed\n\t"
8802 "1:\n"
8803 @@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
8804 static inline void arch_write_lock(arch_rwlock_t *rw)
8805 {
8806 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
8807 +
8808 +#ifdef CONFIG_PAX_REFCOUNT
8809 + "jno 0f\n"
8810 + LOCK_PREFIX " addl %1,(%0)\n"
8811 + "int $4\n0:\n"
8812 + _ASM_EXTABLE(0b, 0b)
8813 +#endif
8814 +
8815 "jz 1f\n"
8816 "call __write_lock_failed\n\t"
8817 "1:\n"
8818 @@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
8819
8820 static inline void arch_read_unlock(arch_rwlock_t *rw)
8821 {
8822 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
8823 + asm volatile(LOCK_PREFIX "incl %0\n"
8824 +
8825 +#ifdef CONFIG_PAX_REFCOUNT
8826 + "jno 0f\n"
8827 + LOCK_PREFIX "decl %0\n"
8828 + "int $4\n0:\n"
8829 + _ASM_EXTABLE(0b, 0b)
8830 +#endif
8831 +
8832 + :"+m" (rw->lock) : : "memory");
8833 }
8834
8835 static inline void arch_write_unlock(arch_rwlock_t *rw)
8836 {
8837 - asm volatile(LOCK_PREFIX "addl %1, %0"
8838 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
8839 +
8840 +#ifdef CONFIG_PAX_REFCOUNT
8841 + "jno 0f\n"
8842 + LOCK_PREFIX "subl %1, %0\n"
8843 + "int $4\n0:\n"
8844 + _ASM_EXTABLE(0b, 0b)
8845 +#endif
8846 +
8847 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
8848 }
8849
8850 diff -urNp linux-2.6.39.4/arch/x86/include/asm/stackprotector.h linux-2.6.39.4/arch/x86/include/asm/stackprotector.h
8851 --- linux-2.6.39.4/arch/x86/include/asm/stackprotector.h 2011-05-19 00:06:34.000000000 -0400
8852 +++ linux-2.6.39.4/arch/x86/include/asm/stackprotector.h 2011-08-05 19:44:33.000000000 -0400
8853 @@ -48,7 +48,7 @@
8854 * head_32 for boot CPU and setup_per_cpu_areas() for others.
8855 */
8856 #define GDT_STACK_CANARY_INIT \
8857 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
8858 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
8859
8860 /*
8861 * Initialize the stackprotector canary value.
8862 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
8863
8864 static inline void load_stack_canary_segment(void)
8865 {
8866 -#ifdef CONFIG_X86_32
8867 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
8868 asm volatile ("mov %0, %%gs" : : "r" (0));
8869 #endif
8870 }
8871 diff -urNp linux-2.6.39.4/arch/x86/include/asm/stacktrace.h linux-2.6.39.4/arch/x86/include/asm/stacktrace.h
8872 --- linux-2.6.39.4/arch/x86/include/asm/stacktrace.h 2011-05-19 00:06:34.000000000 -0400
8873 +++ linux-2.6.39.4/arch/x86/include/asm/stacktrace.h 2011-08-05 19:44:33.000000000 -0400
8874 @@ -11,28 +11,20 @@
8875
8876 extern int kstack_depth_to_print;
8877
8878 -struct thread_info;
8879 +struct task_struct;
8880 struct stacktrace_ops;
8881
8882 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
8883 - unsigned long *stack,
8884 - unsigned long bp,
8885 - const struct stacktrace_ops *ops,
8886 - void *data,
8887 - unsigned long *end,
8888 - int *graph);
8889 -
8890 -extern unsigned long
8891 -print_context_stack(struct thread_info *tinfo,
8892 - unsigned long *stack, unsigned long bp,
8893 - const struct stacktrace_ops *ops, void *data,
8894 - unsigned long *end, int *graph);
8895 -
8896 -extern unsigned long
8897 -print_context_stack_bp(struct thread_info *tinfo,
8898 - unsigned long *stack, unsigned long bp,
8899 - const struct stacktrace_ops *ops, void *data,
8900 - unsigned long *end, int *graph);
8901 +typedef unsigned long walk_stack_t(struct task_struct *task,
8902 + void *stack_start,
8903 + unsigned long *stack,
8904 + unsigned long bp,
8905 + const struct stacktrace_ops *ops,
8906 + void *data,
8907 + unsigned long *end,
8908 + int *graph);
8909 +
8910 +extern walk_stack_t print_context_stack;
8911 +extern walk_stack_t print_context_stack_bp;
8912
8913 /* Generic stack tracer with callbacks */
8914
8915 @@ -43,7 +35,7 @@ struct stacktrace_ops {
8916 void (*address)(void *data, unsigned long address, int reliable);
8917 /* On negative return stop dumping */
8918 int (*stack)(void *data, char *name);
8919 - walk_stack_t walk_stack;
8920 + walk_stack_t *walk_stack;
8921 };
8922
8923 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
8924 diff -urNp linux-2.6.39.4/arch/x86/include/asm/system.h linux-2.6.39.4/arch/x86/include/asm/system.h
8925 --- linux-2.6.39.4/arch/x86/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
8926 +++ linux-2.6.39.4/arch/x86/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
8927 @@ -129,7 +129,7 @@ do { \
8928 "call __switch_to\n\t" \
8929 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
8930 __switch_canary \
8931 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
8932 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
8933 "movq %%rax,%%rdi\n\t" \
8934 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
8935 "jnz ret_from_fork\n\t" \
8936 @@ -140,7 +140,7 @@ do { \
8937 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
8938 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
8939 [_tif_fork] "i" (_TIF_FORK), \
8940 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
8941 + [thread_info] "m" (current_tinfo), \
8942 [current_task] "m" (current_task) \
8943 __switch_canary_iparam \
8944 : "memory", "cc" __EXTRA_CLOBBER)
8945 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
8946 {
8947 unsigned long __limit;
8948 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
8949 - return __limit + 1;
8950 + return __limit;
8951 }
8952
8953 static inline void native_clts(void)
8954 @@ -340,12 +340,12 @@ void enable_hlt(void);
8955
8956 void cpu_idle_wait(void);
8957
8958 -extern unsigned long arch_align_stack(unsigned long sp);
8959 +#define arch_align_stack(x) ((x) & ~0xfUL)
8960 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
8961
8962 void default_idle(void);
8963
8964 -void stop_this_cpu(void *dummy);
8965 +void stop_this_cpu(void *dummy) __noreturn;
8966
8967 /*
8968 * Force strict CPU ordering.
8969 diff -urNp linux-2.6.39.4/arch/x86/include/asm/thread_info.h linux-2.6.39.4/arch/x86/include/asm/thread_info.h
8970 --- linux-2.6.39.4/arch/x86/include/asm/thread_info.h 2011-05-19 00:06:34.000000000 -0400
8971 +++ linux-2.6.39.4/arch/x86/include/asm/thread_info.h 2011-08-05 19:44:33.000000000 -0400
8972 @@ -10,6 +10,7 @@
8973 #include <linux/compiler.h>
8974 #include <asm/page.h>
8975 #include <asm/types.h>
8976 +#include <asm/percpu.h>
8977
8978 /*
8979 * low level task data that entry.S needs immediate access to
8980 @@ -24,7 +25,6 @@ struct exec_domain;
8981 #include <asm/atomic.h>
8982
8983 struct thread_info {
8984 - struct task_struct *task; /* main task structure */
8985 struct exec_domain *exec_domain; /* execution domain */
8986 __u32 flags; /* low level flags */
8987 __u32 status; /* thread synchronous flags */
8988 @@ -34,18 +34,12 @@ struct thread_info {
8989 mm_segment_t addr_limit;
8990 struct restart_block restart_block;
8991 void __user *sysenter_return;
8992 -#ifdef CONFIG_X86_32
8993 - unsigned long previous_esp; /* ESP of the previous stack in
8994 - case of nested (IRQ) stacks
8995 - */
8996 - __u8 supervisor_stack[0];
8997 -#endif
8998 + unsigned long lowest_stack;
8999 int uaccess_err;
9000 };
9001
9002 -#define INIT_THREAD_INFO(tsk) \
9003 +#define INIT_THREAD_INFO \
9004 { \
9005 - .task = &tsk, \
9006 .exec_domain = &default_exec_domain, \
9007 .flags = 0, \
9008 .cpu = 0, \
9009 @@ -56,7 +50,7 @@ struct thread_info {
9010 }, \
9011 }
9012
9013 -#define init_thread_info (init_thread_union.thread_info)
9014 +#define init_thread_info (init_thread_union.stack)
9015 #define init_stack (init_thread_union.stack)
9016
9017 #else /* !__ASSEMBLY__ */
9018 @@ -170,6 +164,23 @@ struct thread_info {
9019 ret; \
9020 })
9021
9022 +#ifdef __ASSEMBLY__
9023 +/* how to get the thread information struct from ASM */
9024 +#define GET_THREAD_INFO(reg) \
9025 + mov PER_CPU_VAR(current_tinfo), reg
9026 +
9027 +/* use this one if reg already contains %esp */
9028 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9029 +#else
9030 +/* how to get the thread information struct from C */
9031 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9032 +
9033 +static __always_inline struct thread_info *current_thread_info(void)
9034 +{
9035 + return percpu_read_stable(current_tinfo);
9036 +}
9037 +#endif
9038 +
9039 #ifdef CONFIG_X86_32
9040
9041 #define STACK_WARN (THREAD_SIZE/8)
9042 @@ -180,35 +191,13 @@ struct thread_info {
9043 */
9044 #ifndef __ASSEMBLY__
9045
9046 -
9047 /* how to get the current stack pointer from C */
9048 register unsigned long current_stack_pointer asm("esp") __used;
9049
9050 -/* how to get the thread information struct from C */
9051 -static inline struct thread_info *current_thread_info(void)
9052 -{
9053 - return (struct thread_info *)
9054 - (current_stack_pointer & ~(THREAD_SIZE - 1));
9055 -}
9056 -
9057 -#else /* !__ASSEMBLY__ */
9058 -
9059 -/* how to get the thread information struct from ASM */
9060 -#define GET_THREAD_INFO(reg) \
9061 - movl $-THREAD_SIZE, reg; \
9062 - andl %esp, reg
9063 -
9064 -/* use this one if reg already contains %esp */
9065 -#define GET_THREAD_INFO_WITH_ESP(reg) \
9066 - andl $-THREAD_SIZE, reg
9067 -
9068 #endif
9069
9070 #else /* X86_32 */
9071
9072 -#include <asm/percpu.h>
9073 -#define KERNEL_STACK_OFFSET (5*8)
9074 -
9075 /*
9076 * macros/functions for gaining access to the thread information structure
9077 * preempt_count needs to be 1 initially, until the scheduler is functional.
9078 @@ -216,21 +205,8 @@ static inline struct thread_info *curren
9079 #ifndef __ASSEMBLY__
9080 DECLARE_PER_CPU(unsigned long, kernel_stack);
9081
9082 -static inline struct thread_info *current_thread_info(void)
9083 -{
9084 - struct thread_info *ti;
9085 - ti = (void *)(percpu_read_stable(kernel_stack) +
9086 - KERNEL_STACK_OFFSET - THREAD_SIZE);
9087 - return ti;
9088 -}
9089 -
9090 -#else /* !__ASSEMBLY__ */
9091 -
9092 -/* how to get the thread information struct from ASM */
9093 -#define GET_THREAD_INFO(reg) \
9094 - movq PER_CPU_VAR(kernel_stack),reg ; \
9095 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9096 -
9097 +/* how to get the current stack pointer from C */
9098 +register unsigned long current_stack_pointer asm("rsp") __used;
9099 #endif
9100
9101 #endif /* !X86_32 */
9102 @@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9103 extern void free_thread_info(struct thread_info *ti);
9104 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9105 #define arch_task_cache_init arch_task_cache_init
9106 +
9107 +#define __HAVE_THREAD_FUNCTIONS
9108 +#define task_thread_info(task) (&(task)->tinfo)
9109 +#define task_stack_page(task) ((task)->stack)
9110 +#define setup_thread_stack(p, org) do {} while (0)
9111 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9112 +
9113 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9114 +extern struct task_struct *alloc_task_struct_node(int node);
9115 +extern void free_task_struct(struct task_struct *);
9116 +
9117 #endif
9118 #endif /* _ASM_X86_THREAD_INFO_H */
9119 diff -urNp linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h
9120 --- linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h 2011-05-19 00:06:34.000000000 -0400
9121 +++ linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h 2011-08-05 19:44:33.000000000 -0400
9122 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
9123 static __always_inline unsigned long __must_check
9124 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9125 {
9126 + pax_track_stack();
9127 +
9128 + if ((long)n < 0)
9129 + return n;
9130 +
9131 if (__builtin_constant_p(n)) {
9132 unsigned long ret;
9133
9134 @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
9135 return ret;
9136 }
9137 }
9138 + if (!__builtin_constant_p(n))
9139 + check_object_size(from, n, true);
9140 return __copy_to_user_ll(to, from, n);
9141 }
9142
9143 @@ -83,12 +90,16 @@ static __always_inline unsigned long __m
9144 __copy_to_user(void __user *to, const void *from, unsigned long n)
9145 {
9146 might_fault();
9147 +
9148 return __copy_to_user_inatomic(to, from, n);
9149 }
9150
9151 static __always_inline unsigned long
9152 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9153 {
9154 + if ((long)n < 0)
9155 + return n;
9156 +
9157 /* Avoid zeroing the tail if the copy fails..
9158 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9159 * but as the zeroing behaviour is only significant when n is not
9160 @@ -138,6 +149,12 @@ static __always_inline unsigned long
9161 __copy_from_user(void *to, const void __user *from, unsigned long n)
9162 {
9163 might_fault();
9164 +
9165 + pax_track_stack();
9166 +
9167 + if ((long)n < 0)
9168 + return n;
9169 +
9170 if (__builtin_constant_p(n)) {
9171 unsigned long ret;
9172
9173 @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
9174 return ret;
9175 }
9176 }
9177 + if (!__builtin_constant_p(n))
9178 + check_object_size(to, n, false);
9179 return __copy_from_user_ll(to, from, n);
9180 }
9181
9182 @@ -160,6 +179,10 @@ static __always_inline unsigned long __c
9183 const void __user *from, unsigned long n)
9184 {
9185 might_fault();
9186 +
9187 + if ((long)n < 0)
9188 + return n;
9189 +
9190 if (__builtin_constant_p(n)) {
9191 unsigned long ret;
9192
9193 @@ -182,15 +205,19 @@ static __always_inline unsigned long
9194 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9195 unsigned long n)
9196 {
9197 - return __copy_from_user_ll_nocache_nozero(to, from, n);
9198 -}
9199 + if ((long)n < 0)
9200 + return n;
9201
9202 -unsigned long __must_check copy_to_user(void __user *to,
9203 - const void *from, unsigned long n);
9204 -unsigned long __must_check _copy_from_user(void *to,
9205 - const void __user *from,
9206 - unsigned long n);
9207 + return __copy_from_user_ll_nocache_nozero(to, from, n);
9208 +}
9209
9210 +extern void copy_to_user_overflow(void)
9211 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9212 + __compiletime_error("copy_to_user() buffer size is not provably correct")
9213 +#else
9214 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
9215 +#endif
9216 +;
9217
9218 extern void copy_from_user_overflow(void)
9219 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9220 @@ -200,17 +227,61 @@ extern void copy_from_user_overflow(void
9221 #endif
9222 ;
9223
9224 -static inline unsigned long __must_check copy_from_user(void *to,
9225 - const void __user *from,
9226 - unsigned long n)
9227 +/**
9228 + * copy_to_user: - Copy a block of data into user space.
9229 + * @to: Destination address, in user space.
9230 + * @from: Source address, in kernel space.
9231 + * @n: Number of bytes to copy.
9232 + *
9233 + * Context: User context only. This function may sleep.
9234 + *
9235 + * Copy data from kernel space to user space.
9236 + *
9237 + * Returns number of bytes that could not be copied.
9238 + * On success, this will be zero.
9239 + */
9240 +static inline unsigned long __must_check
9241 +copy_to_user(void __user *to, const void *from, unsigned long n)
9242 +{
9243 + int sz = __compiletime_object_size(from);
9244 +
9245 + if (unlikely(sz != -1 && sz < n))
9246 + copy_to_user_overflow();
9247 + else if (access_ok(VERIFY_WRITE, to, n))
9248 + n = __copy_to_user(to, from, n);
9249 + return n;
9250 +}
9251 +
9252 +/**
9253 + * copy_from_user: - Copy a block of data from user space.
9254 + * @to: Destination address, in kernel space.
9255 + * @from: Source address, in user space.
9256 + * @n: Number of bytes to copy.
9257 + *
9258 + * Context: User context only. This function may sleep.
9259 + *
9260 + * Copy data from user space to kernel space.
9261 + *
9262 + * Returns number of bytes that could not be copied.
9263 + * On success, this will be zero.
9264 + *
9265 + * If some data could not be copied, this function will pad the copied
9266 + * data to the requested size using zero bytes.
9267 + */
9268 +static inline unsigned long __must_check
9269 +copy_from_user(void *to, const void __user *from, unsigned long n)
9270 {
9271 int sz = __compiletime_object_size(to);
9272
9273 - if (likely(sz == -1 || sz >= n))
9274 - n = _copy_from_user(to, from, n);
9275 - else
9276 + if (unlikely(sz != -1 && sz < n))
9277 copy_from_user_overflow();
9278 -
9279 + else if (access_ok(VERIFY_READ, from, n))
9280 + n = __copy_from_user(to, from, n);
9281 + else if ((long)n > 0) {
9282 + if (!__builtin_constant_p(n))
9283 + check_object_size(to, n, false);
9284 + memset(to, 0, n);
9285 + }
9286 return n;
9287 }
9288
9289 diff -urNp linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h
9290 --- linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h 2011-05-19 00:06:34.000000000 -0400
9291 +++ linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h 2011-08-05 19:44:33.000000000 -0400
9292 @@ -11,6 +11,9 @@
9293 #include <asm/alternative.h>
9294 #include <asm/cpufeature.h>
9295 #include <asm/page.h>
9296 +#include <asm/pgtable.h>
9297 +
9298 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
9299
9300 /*
9301 * Copy To/From Userspace
9302 @@ -37,26 +40,26 @@ copy_user_generic(void *to, const void *
9303 return ret;
9304 }
9305
9306 -__must_check unsigned long
9307 -_copy_to_user(void __user *to, const void *from, unsigned len);
9308 -__must_check unsigned long
9309 -_copy_from_user(void *to, const void __user *from, unsigned len);
9310 +static __always_inline __must_check unsigned long
9311 +__copy_to_user(void __user *to, const void *from, unsigned len);
9312 +static __always_inline __must_check unsigned long
9313 +__copy_from_user(void *to, const void __user *from, unsigned len);
9314 __must_check unsigned long
9315 copy_in_user(void __user *to, const void __user *from, unsigned len);
9316
9317 static inline unsigned long __must_check copy_from_user(void *to,
9318 const void __user *from,
9319 - unsigned long n)
9320 + unsigned n)
9321 {
9322 - int sz = __compiletime_object_size(to);
9323 -
9324 might_fault();
9325 - if (likely(sz == -1 || sz >= n))
9326 - n = _copy_from_user(to, from, n);
9327 -#ifdef CONFIG_DEBUG_VM
9328 - else
9329 - WARN(1, "Buffer overflow detected!\n");
9330 -#endif
9331 +
9332 + if (access_ok(VERIFY_READ, from, n))
9333 + n = __copy_from_user(to, from, n);
9334 + else if ((int)n > 0) {
9335 + if (!__builtin_constant_p(n))
9336 + check_object_size(to, n, false);
9337 + memset(to, 0, n);
9338 + }
9339 return n;
9340 }
9341
9342 @@ -65,110 +68,198 @@ int copy_to_user(void __user *dst, const
9343 {
9344 might_fault();
9345
9346 - return _copy_to_user(dst, src, size);
9347 + if (access_ok(VERIFY_WRITE, dst, size))
9348 + size = __copy_to_user(dst, src, size);
9349 + return size;
9350 }
9351
9352 static __always_inline __must_check
9353 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
9354 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9355 {
9356 - int ret = 0;
9357 + int sz = __compiletime_object_size(dst);
9358 + unsigned ret = 0;
9359
9360 might_fault();
9361 - if (!__builtin_constant_p(size))
9362 - return copy_user_generic(dst, (__force void *)src, size);
9363 +
9364 + pax_track_stack();
9365 +
9366 + if ((int)size < 0)
9367 + return size;
9368 +
9369 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9370 + if (!__access_ok(VERIFY_READ, src, size))
9371 + return size;
9372 +#endif
9373 +
9374 + if (unlikely(sz != -1 && sz < size)) {
9375 +#ifdef CONFIG_DEBUG_VM
9376 + WARN(1, "Buffer overflow detected!\n");
9377 +#endif
9378 + return size;
9379 + }
9380 +
9381 + if (!__builtin_constant_p(size)) {
9382 + check_object_size(dst, size, false);
9383 +
9384 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9385 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9386 + src += PAX_USER_SHADOW_BASE;
9387 +#endif
9388 +
9389 + return copy_user_generic(dst, (__force const void *)src, size);
9390 + }
9391 switch (size) {
9392 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9393 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9394 ret, "b", "b", "=q", 1);
9395 return ret;
9396 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9397 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9398 ret, "w", "w", "=r", 2);
9399 return ret;
9400 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9401 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9402 ret, "l", "k", "=r", 4);
9403 return ret;
9404 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9405 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9406 ret, "q", "", "=r", 8);
9407 return ret;
9408 case 10:
9409 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9410 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9411 ret, "q", "", "=r", 10);
9412 if (unlikely(ret))
9413 return ret;
9414 __get_user_asm(*(u16 *)(8 + (char *)dst),
9415 - (u16 __user *)(8 + (char __user *)src),
9416 + (const u16 __user *)(8 + (const char __user *)src),
9417 ret, "w", "w", "=r", 2);
9418 return ret;
9419 case 16:
9420 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9421 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9422 ret, "q", "", "=r", 16);
9423 if (unlikely(ret))
9424 return ret;
9425 __get_user_asm(*(u64 *)(8 + (char *)dst),
9426 - (u64 __user *)(8 + (char __user *)src),
9427 + (const u64 __user *)(8 + (const char __user *)src),
9428 ret, "q", "", "=r", 8);
9429 return ret;
9430 default:
9431 - return copy_user_generic(dst, (__force void *)src, size);
9432 +
9433 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9434 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9435 + src += PAX_USER_SHADOW_BASE;
9436 +#endif
9437 +
9438 + return copy_user_generic(dst, (__force const void *)src, size);
9439 }
9440 }
9441
9442 static __always_inline __must_check
9443 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
9444 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9445 {
9446 - int ret = 0;
9447 + int sz = __compiletime_object_size(src);
9448 + unsigned ret = 0;
9449
9450 might_fault();
9451 - if (!__builtin_constant_p(size))
9452 +
9453 + pax_track_stack();
9454 +
9455 + if ((int)size < 0)
9456 + return size;
9457 +
9458 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9459 + if (!__access_ok(VERIFY_WRITE, dst, size))
9460 + return size;
9461 +#endif
9462 +
9463 + if (unlikely(sz != -1 && sz < size)) {
9464 +#ifdef CONFIG_DEBUG_VM
9465 + WARN(1, "Buffer overflow detected!\n");
9466 +#endif
9467 + return size;
9468 + }
9469 +
9470 + if (!__builtin_constant_p(size)) {
9471 + check_object_size(src, size, true);
9472 +
9473 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9474 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9475 + dst += PAX_USER_SHADOW_BASE;
9476 +#endif
9477 +
9478 return copy_user_generic((__force void *)dst, src, size);
9479 + }
9480 switch (size) {
9481 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9482 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9483 ret, "b", "b", "iq", 1);
9484 return ret;
9485 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9486 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9487 ret, "w", "w", "ir", 2);
9488 return ret;
9489 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9490 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9491 ret, "l", "k", "ir", 4);
9492 return ret;
9493 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9494 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9495 ret, "q", "", "er", 8);
9496 return ret;
9497 case 10:
9498 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9499 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9500 ret, "q", "", "er", 10);
9501 if (unlikely(ret))
9502 return ret;
9503 asm("":::"memory");
9504 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9505 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9506 ret, "w", "w", "ir", 2);
9507 return ret;
9508 case 16:
9509 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9510 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9511 ret, "q", "", "er", 16);
9512 if (unlikely(ret))
9513 return ret;
9514 asm("":::"memory");
9515 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9516 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9517 ret, "q", "", "er", 8);
9518 return ret;
9519 default:
9520 +
9521 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9522 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9523 + dst += PAX_USER_SHADOW_BASE;
9524 +#endif
9525 +
9526 return copy_user_generic((__force void *)dst, src, size);
9527 }
9528 }
9529
9530 static __always_inline __must_check
9531 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9532 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9533 {
9534 - int ret = 0;
9535 + unsigned ret = 0;
9536
9537 might_fault();
9538 - if (!__builtin_constant_p(size))
9539 +
9540 + if ((int)size < 0)
9541 + return size;
9542 +
9543 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9544 + if (!__access_ok(VERIFY_READ, src, size))
9545 + return size;
9546 + if (!__access_ok(VERIFY_WRITE, dst, size))
9547 + return size;
9548 +#endif
9549 +
9550 + if (!__builtin_constant_p(size)) {
9551 +
9552 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9553 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9554 + src += PAX_USER_SHADOW_BASE;
9555 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9556 + dst += PAX_USER_SHADOW_BASE;
9557 +#endif
9558 +
9559 return copy_user_generic((__force void *)dst,
9560 - (__force void *)src, size);
9561 + (__force const void *)src, size);
9562 + }
9563 switch (size) {
9564 case 1: {
9565 u8 tmp;
9566 - __get_user_asm(tmp, (u8 __user *)src,
9567 + __get_user_asm(tmp, (const u8 __user *)src,
9568 ret, "b", "b", "=q", 1);
9569 if (likely(!ret))
9570 __put_user_asm(tmp, (u8 __user *)dst,
9571 @@ -177,7 +268,7 @@ int __copy_in_user(void __user *dst, con
9572 }
9573 case 2: {
9574 u16 tmp;
9575 - __get_user_asm(tmp, (u16 __user *)src,
9576 + __get_user_asm(tmp, (const u16 __user *)src,
9577 ret, "w", "w", "=r", 2);
9578 if (likely(!ret))
9579 __put_user_asm(tmp, (u16 __user *)dst,
9580 @@ -187,7 +278,7 @@ int __copy_in_user(void __user *dst, con
9581
9582 case 4: {
9583 u32 tmp;
9584 - __get_user_asm(tmp, (u32 __user *)src,
9585 + __get_user_asm(tmp, (const u32 __user *)src,
9586 ret, "l", "k", "=r", 4);
9587 if (likely(!ret))
9588 __put_user_asm(tmp, (u32 __user *)dst,
9589 @@ -196,7 +287,7 @@ int __copy_in_user(void __user *dst, con
9590 }
9591 case 8: {
9592 u64 tmp;
9593 - __get_user_asm(tmp, (u64 __user *)src,
9594 + __get_user_asm(tmp, (const u64 __user *)src,
9595 ret, "q", "", "=r", 8);
9596 if (likely(!ret))
9597 __put_user_asm(tmp, (u64 __user *)dst,
9598 @@ -204,8 +295,16 @@ int __copy_in_user(void __user *dst, con
9599 return ret;
9600 }
9601 default:
9602 +
9603 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9604 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9605 + src += PAX_USER_SHADOW_BASE;
9606 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9607 + dst += PAX_USER_SHADOW_BASE;
9608 +#endif
9609 +
9610 return copy_user_generic((__force void *)dst,
9611 - (__force void *)src, size);
9612 + (__force const void *)src, size);
9613 }
9614 }
9615
9616 @@ -222,33 +321,72 @@ __must_check unsigned long __clear_user(
9617 static __must_check __always_inline int
9618 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9619 {
9620 + pax_track_stack();
9621 +
9622 + if ((int)size < 0)
9623 + return size;
9624 +
9625 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9626 + if (!__access_ok(VERIFY_READ, src, size))
9627 + return size;
9628 +
9629 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9630 + src += PAX_USER_SHADOW_BASE;
9631 +#endif
9632 +
9633 return copy_user_generic(dst, (__force const void *)src, size);
9634 }
9635
9636 -static __must_check __always_inline int
9637 +static __must_check __always_inline unsigned long
9638 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9639 {
9640 + if ((int)size < 0)
9641 + return size;
9642 +
9643 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9644 + if (!__access_ok(VERIFY_WRITE, dst, size))
9645 + return size;
9646 +
9647 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9648 + dst += PAX_USER_SHADOW_BASE;
9649 +#endif
9650 +
9651 return copy_user_generic((__force void *)dst, src, size);
9652 }
9653
9654 -extern long __copy_user_nocache(void *dst, const void __user *src,
9655 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9656 unsigned size, int zerorest);
9657
9658 -static inline int
9659 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9660 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9661 {
9662 might_sleep();
9663 +
9664 + if ((int)size < 0)
9665 + return size;
9666 +
9667 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9668 + if (!__access_ok(VERIFY_READ, src, size))
9669 + return size;
9670 +#endif
9671 +
9672 return __copy_user_nocache(dst, src, size, 1);
9673 }
9674
9675 -static inline int
9676 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9677 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9678 unsigned size)
9679 {
9680 + if ((int)size < 0)
9681 + return size;
9682 +
9683 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9684 + if (!__access_ok(VERIFY_READ, src, size))
9685 + return size;
9686 +#endif
9687 +
9688 return __copy_user_nocache(dst, src, size, 0);
9689 }
9690
9691 -unsigned long
9692 +extern unsigned long
9693 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9694
9695 #endif /* _ASM_X86_UACCESS_64_H */
9696 diff -urNp linux-2.6.39.4/arch/x86/include/asm/uaccess.h linux-2.6.39.4/arch/x86/include/asm/uaccess.h
9697 --- linux-2.6.39.4/arch/x86/include/asm/uaccess.h 2011-06-03 00:04:13.000000000 -0400
9698 +++ linux-2.6.39.4/arch/x86/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
9699 @@ -8,12 +8,15 @@
9700 #include <linux/thread_info.h>
9701 #include <linux/prefetch.h>
9702 #include <linux/string.h>
9703 +#include <linux/sched.h>
9704 #include <asm/asm.h>
9705 #include <asm/page.h>
9706
9707 #define VERIFY_READ 0
9708 #define VERIFY_WRITE 1
9709
9710 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
9711 +
9712 /*
9713 * The fs value determines whether argument validity checking should be
9714 * performed or not. If get_fs() == USER_DS, checking is performed, with
9715 @@ -29,7 +32,12 @@
9716
9717 #define get_ds() (KERNEL_DS)
9718 #define get_fs() (current_thread_info()->addr_limit)
9719 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9720 +void __set_fs(mm_segment_t x);
9721 +void set_fs(mm_segment_t x);
9722 +#else
9723 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9724 +#endif
9725
9726 #define segment_eq(a, b) ((a).seg == (b).seg)
9727
9728 @@ -77,7 +85,33 @@
9729 * checks that the pointer is in the user space range - after calling
9730 * this function, memory access functions may still return -EFAULT.
9731 */
9732 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9733 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9734 +#define access_ok(type, addr, size) \
9735 +({ \
9736 + long __size = size; \
9737 + unsigned long __addr = (unsigned long)addr; \
9738 + unsigned long __addr_ao = __addr & PAGE_MASK; \
9739 + unsigned long __end_ao = __addr + __size - 1; \
9740 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9741 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9742 + while(__addr_ao <= __end_ao) { \
9743 + char __c_ao; \
9744 + __addr_ao += PAGE_SIZE; \
9745 + if (__size > PAGE_SIZE) \
9746 + cond_resched(); \
9747 + if (__get_user(__c_ao, (char __user *)__addr)) \
9748 + break; \
9749 + if (type != VERIFY_WRITE) { \
9750 + __addr = __addr_ao; \
9751 + continue; \
9752 + } \
9753 + if (__put_user(__c_ao, (char __user *)__addr)) \
9754 + break; \
9755 + __addr = __addr_ao; \
9756 + } \
9757 + } \
9758 + __ret_ao; \
9759 +})
9760
9761 /*
9762 * The exception table consists of pairs of addresses: the first is the
9763 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
9764 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
9765 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
9766
9767 -
9768 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9769 +#define __copyuser_seg "gs;"
9770 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
9771 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
9772 +#else
9773 +#define __copyuser_seg
9774 +#define __COPYUSER_SET_ES
9775 +#define __COPYUSER_RESTORE_ES
9776 +#endif
9777
9778 #ifdef CONFIG_X86_32
9779 #define __put_user_asm_u64(x, addr, err, errret) \
9780 - asm volatile("1: movl %%eax,0(%2)\n" \
9781 - "2: movl %%edx,4(%2)\n" \
9782 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
9783 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
9784 "3:\n" \
9785 ".section .fixup,\"ax\"\n" \
9786 "4: movl %3,%0\n" \
9787 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
9788 : "A" (x), "r" (addr), "i" (errret), "0" (err))
9789
9790 #define __put_user_asm_ex_u64(x, addr) \
9791 - asm volatile("1: movl %%eax,0(%1)\n" \
9792 - "2: movl %%edx,4(%1)\n" \
9793 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
9794 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
9795 "3:\n" \
9796 _ASM_EXTABLE(1b, 2b - 1b) \
9797 _ASM_EXTABLE(2b, 3b - 2b) \
9798 @@ -374,7 +416,7 @@ do { \
9799 } while (0)
9800
9801 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9802 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
9803 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
9804 "2:\n" \
9805 ".section .fixup,\"ax\"\n" \
9806 "3: mov %3,%0\n" \
9807 @@ -382,7 +424,7 @@ do { \
9808 " jmp 2b\n" \
9809 ".previous\n" \
9810 _ASM_EXTABLE(1b, 3b) \
9811 - : "=r" (err), ltype(x) \
9812 + : "=r" (err), ltype (x) \
9813 : "m" (__m(addr)), "i" (errret), "0" (err))
9814
9815 #define __get_user_size_ex(x, ptr, size) \
9816 @@ -407,7 +449,7 @@ do { \
9817 } while (0)
9818
9819 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
9820 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
9821 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
9822 "2:\n" \
9823 _ASM_EXTABLE(1b, 2b - 1b) \
9824 : ltype(x) : "m" (__m(addr)))
9825 @@ -424,13 +466,24 @@ do { \
9826 int __gu_err; \
9827 unsigned long __gu_val; \
9828 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
9829 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
9830 + (x) = (__typeof__(*(ptr)))__gu_val; \
9831 __gu_err; \
9832 })
9833
9834 /* FIXME: this hack is definitely wrong -AK */
9835 struct __large_struct { unsigned long buf[100]; };
9836 -#define __m(x) (*(struct __large_struct __user *)(x))
9837 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9838 +#define ____m(x) \
9839 +({ \
9840 + unsigned long ____x = (unsigned long)(x); \
9841 + if (____x < PAX_USER_SHADOW_BASE) \
9842 + ____x += PAX_USER_SHADOW_BASE; \
9843 + (void __user *)____x; \
9844 +})
9845 +#else
9846 +#define ____m(x) (x)
9847 +#endif
9848 +#define __m(x) (*(struct __large_struct __user *)____m(x))
9849
9850 /*
9851 * Tell gcc we read from memory instead of writing: this is because
9852 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
9853 * aliasing issues.
9854 */
9855 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9856 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
9857 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
9858 "2:\n" \
9859 ".section .fixup,\"ax\"\n" \
9860 "3: mov %3,%0\n" \
9861 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
9862 ".previous\n" \
9863 _ASM_EXTABLE(1b, 3b) \
9864 : "=r"(err) \
9865 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
9866 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
9867
9868 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
9869 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
9870 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
9871 "2:\n" \
9872 _ASM_EXTABLE(1b, 2b - 1b) \
9873 : : ltype(x), "m" (__m(addr)))
9874 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
9875 * On error, the variable @x is set to zero.
9876 */
9877
9878 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9879 +#define __get_user(x, ptr) get_user((x), (ptr))
9880 +#else
9881 #define __get_user(x, ptr) \
9882 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
9883 +#endif
9884
9885 /**
9886 * __put_user: - Write a simple value into user space, with less checking.
9887 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
9888 * Returns zero on success, or -EFAULT on error.
9889 */
9890
9891 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9892 +#define __put_user(x, ptr) put_user((x), (ptr))
9893 +#else
9894 #define __put_user(x, ptr) \
9895 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
9896 +#endif
9897
9898 #define __get_user_unaligned __get_user
9899 #define __put_user_unaligned __put_user
9900 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
9901 #define get_user_ex(x, ptr) do { \
9902 unsigned long __gue_val; \
9903 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
9904 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
9905 + (x) = (__typeof__(*(ptr)))__gue_val; \
9906 } while (0)
9907
9908 #ifdef CONFIG_X86_WP_WORKS_OK
9909 @@ -567,6 +628,7 @@ extern struct movsl_mask {
9910
9911 #define ARCH_HAS_NOCACHE_UACCESS 1
9912
9913 +#define ARCH_HAS_SORT_EXTABLE
9914 #ifdef CONFIG_X86_32
9915 # include "uaccess_32.h"
9916 #else
9917 diff -urNp linux-2.6.39.4/arch/x86/include/asm/vgtod.h linux-2.6.39.4/arch/x86/include/asm/vgtod.h
9918 --- linux-2.6.39.4/arch/x86/include/asm/vgtod.h 2011-05-19 00:06:34.000000000 -0400
9919 +++ linux-2.6.39.4/arch/x86/include/asm/vgtod.h 2011-08-05 19:44:33.000000000 -0400
9920 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
9921 int sysctl_enabled;
9922 struct timezone sys_tz;
9923 struct { /* extract of a clocksource struct */
9924 + char name[8];
9925 cycle_t (*vread)(void);
9926 cycle_t cycle_last;
9927 cycle_t mask;
9928 diff -urNp linux-2.6.39.4/arch/x86/include/asm/vsyscall.h linux-2.6.39.4/arch/x86/include/asm/vsyscall.h
9929 --- linux-2.6.39.4/arch/x86/include/asm/vsyscall.h 2011-05-19 00:06:34.000000000 -0400
9930 +++ linux-2.6.39.4/arch/x86/include/asm/vsyscall.h 2011-08-05 19:44:33.000000000 -0400
9931 @@ -15,9 +15,10 @@ enum vsyscall_num {
9932
9933 #ifdef __KERNEL__
9934 #include <linux/seqlock.h>
9935 +#include <linux/getcpu.h>
9936 +#include <linux/time.h>
9937
9938 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
9939 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
9940
9941 /* Definitions for CONFIG_GENERIC_TIME definitions */
9942 #define __section_vsyscall_gtod_data __attribute__ \
9943 @@ -31,7 +32,6 @@ enum vsyscall_num {
9944 #define VGETCPU_LSL 2
9945
9946 extern int __vgetcpu_mode;
9947 -extern volatile unsigned long __jiffies;
9948
9949 /* kernel space (writeable) */
9950 extern int vgetcpu_mode;
9951 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
9952
9953 extern void map_vsyscall(void);
9954
9955 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
9956 +extern time_t vtime(time_t *t);
9957 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
9958 #endif /* __KERNEL__ */
9959
9960 #endif /* _ASM_X86_VSYSCALL_H */
9961 diff -urNp linux-2.6.39.4/arch/x86/include/asm/x86_init.h linux-2.6.39.4/arch/x86/include/asm/x86_init.h
9962 --- linux-2.6.39.4/arch/x86/include/asm/x86_init.h 2011-05-19 00:06:34.000000000 -0400
9963 +++ linux-2.6.39.4/arch/x86/include/asm/x86_init.h 2011-08-05 20:34:06.000000000 -0400
9964 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
9965 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
9966 void (*find_smp_config)(void);
9967 void (*get_smp_config)(unsigned int early);
9968 -};
9969 +} __no_const;
9970
9971 /**
9972 * struct x86_init_resources - platform specific resource related ops
9973 @@ -42,7 +42,7 @@ struct x86_init_resources {
9974 void (*probe_roms)(void);
9975 void (*reserve_resources)(void);
9976 char *(*memory_setup)(void);
9977 -};
9978 +} __no_const;
9979
9980 /**
9981 * struct x86_init_irqs - platform specific interrupt setup
9982 @@ -55,7 +55,7 @@ struct x86_init_irqs {
9983 void (*pre_vector_init)(void);
9984 void (*intr_init)(void);
9985 void (*trap_init)(void);
9986 -};
9987 +} __no_const;
9988
9989 /**
9990 * struct x86_init_oem - oem platform specific customizing functions
9991 @@ -65,7 +65,7 @@ struct x86_init_irqs {
9992 struct x86_init_oem {
9993 void (*arch_setup)(void);
9994 void (*banner)(void);
9995 -};
9996 +} __no_const;
9997
9998 /**
9999 * struct x86_init_mapping - platform specific initial kernel pagetable setup
10000 @@ -76,7 +76,7 @@ struct x86_init_oem {
10001 */
10002 struct x86_init_mapping {
10003 void (*pagetable_reserve)(u64 start, u64 end);
10004 -};
10005 +} __no_const;
10006
10007 /**
10008 * struct x86_init_paging - platform specific paging functions
10009 @@ -86,7 +86,7 @@ struct x86_init_mapping {
10010 struct x86_init_paging {
10011 void (*pagetable_setup_start)(pgd_t *base);
10012 void (*pagetable_setup_done)(pgd_t *base);
10013 -};
10014 +} __no_const;
10015
10016 /**
10017 * struct x86_init_timers - platform specific timer setup
10018 @@ -101,7 +101,7 @@ struct x86_init_timers {
10019 void (*tsc_pre_init)(void);
10020 void (*timer_init)(void);
10021 void (*wallclock_init)(void);
10022 -};
10023 +} __no_const;
10024
10025 /**
10026 * struct x86_init_iommu - platform specific iommu setup
10027 @@ -109,7 +109,7 @@ struct x86_init_timers {
10028 */
10029 struct x86_init_iommu {
10030 int (*iommu_init)(void);
10031 -};
10032 +} __no_const;
10033
10034 /**
10035 * struct x86_init_pci - platform specific pci init functions
10036 @@ -123,7 +123,7 @@ struct x86_init_pci {
10037 int (*init)(void);
10038 void (*init_irq)(void);
10039 void (*fixup_irqs)(void);
10040 -};
10041 +} __no_const;
10042
10043 /**
10044 * struct x86_init_ops - functions for platform specific setup
10045 @@ -139,7 +139,7 @@ struct x86_init_ops {
10046 struct x86_init_timers timers;
10047 struct x86_init_iommu iommu;
10048 struct x86_init_pci pci;
10049 -};
10050 +} __no_const;
10051
10052 /**
10053 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10054 @@ -147,7 +147,7 @@ struct x86_init_ops {
10055 */
10056 struct x86_cpuinit_ops {
10057 void (*setup_percpu_clockev)(void);
10058 -};
10059 +} __no_const;
10060
10061 /**
10062 * struct x86_platform_ops - platform specific runtime functions
10063 @@ -166,7 +166,7 @@ struct x86_platform_ops {
10064 bool (*is_untracked_pat_range)(u64 start, u64 end);
10065 void (*nmi_init)(void);
10066 int (*i8042_detect)(void);
10067 -};
10068 +} __no_const;
10069
10070 struct pci_dev;
10071
10072 @@ -174,7 +174,7 @@ struct x86_msi_ops {
10073 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10074 void (*teardown_msi_irq)(unsigned int irq);
10075 void (*teardown_msi_irqs)(struct pci_dev *dev);
10076 -};
10077 +} __no_const;
10078
10079 extern struct x86_init_ops x86_init;
10080 extern struct x86_cpuinit_ops x86_cpuinit;
10081 diff -urNp linux-2.6.39.4/arch/x86/include/asm/xsave.h linux-2.6.39.4/arch/x86/include/asm/xsave.h
10082 --- linux-2.6.39.4/arch/x86/include/asm/xsave.h 2011-05-19 00:06:34.000000000 -0400
10083 +++ linux-2.6.39.4/arch/x86/include/asm/xsave.h 2011-08-05 19:44:33.000000000 -0400
10084 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10085 {
10086 int err;
10087
10088 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10089 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10090 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10091 +#endif
10092 +
10093 /*
10094 * Clear the xsave header first, so that reserved fields are
10095 * initialized to zero.
10096 @@ -100,6 +105,11 @@ static inline int xrestore_user(struct x
10097 u32 lmask = mask;
10098 u32 hmask = mask >> 32;
10099
10100 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10101 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10102 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10103 +#endif
10104 +
10105 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10106 "2:\n"
10107 ".section .fixup,\"ax\"\n"
10108 diff -urNp linux-2.6.39.4/arch/x86/Kconfig linux-2.6.39.4/arch/x86/Kconfig
10109 --- linux-2.6.39.4/arch/x86/Kconfig 2011-05-19 00:06:34.000000000 -0400
10110 +++ linux-2.6.39.4/arch/x86/Kconfig 2011-08-05 19:44:33.000000000 -0400
10111 @@ -224,7 +224,7 @@ config X86_HT
10112
10113 config X86_32_LAZY_GS
10114 def_bool y
10115 - depends on X86_32 && !CC_STACKPROTECTOR
10116 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10117
10118 config ARCH_HWEIGHT_CFLAGS
10119 string
10120 @@ -1022,7 +1022,7 @@ choice
10121
10122 config NOHIGHMEM
10123 bool "off"
10124 - depends on !X86_NUMAQ
10125 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10126 ---help---
10127 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10128 However, the address space of 32-bit x86 processors is only 4
10129 @@ -1059,7 +1059,7 @@ config NOHIGHMEM
10130
10131 config HIGHMEM4G
10132 bool "4GB"
10133 - depends on !X86_NUMAQ
10134 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10135 ---help---
10136 Select this if you have a 32-bit processor and between 1 and 4
10137 gigabytes of physical RAM.
10138 @@ -1113,7 +1113,7 @@ config PAGE_OFFSET
10139 hex
10140 default 0xB0000000 if VMSPLIT_3G_OPT
10141 default 0x80000000 if VMSPLIT_2G
10142 - default 0x78000000 if VMSPLIT_2G_OPT
10143 + default 0x70000000 if VMSPLIT_2G_OPT
10144 default 0x40000000 if VMSPLIT_1G
10145 default 0xC0000000
10146 depends on X86_32
10147 @@ -1457,7 +1457,7 @@ config ARCH_USES_PG_UNCACHED
10148
10149 config EFI
10150 bool "EFI runtime service support"
10151 - depends on ACPI
10152 + depends on ACPI && !PAX_KERNEXEC
10153 ---help---
10154 This enables the kernel to use EFI runtime services that are
10155 available (such as the EFI variable services).
10156 @@ -1487,6 +1487,7 @@ config SECCOMP
10157
10158 config CC_STACKPROTECTOR
10159 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10160 + depends on X86_64 || !PAX_MEMORY_UDEREF
10161 ---help---
10162 This option turns on the -fstack-protector GCC feature. This
10163 feature puts, at the beginning of functions, a canary value on
10164 @@ -1544,6 +1545,7 @@ config KEXEC_JUMP
10165 config PHYSICAL_START
10166 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10167 default "0x1000000"
10168 + range 0x400000 0x40000000
10169 ---help---
10170 This gives the physical address where the kernel is loaded.
10171
10172 @@ -1607,6 +1609,7 @@ config X86_NEED_RELOCS
10173 config PHYSICAL_ALIGN
10174 hex "Alignment value to which kernel should be aligned" if X86_32
10175 default "0x1000000"
10176 + range 0x400000 0x1000000 if PAX_KERNEXEC
10177 range 0x2000 0x1000000
10178 ---help---
10179 This value puts the alignment restrictions on physical address
10180 @@ -1638,9 +1641,10 @@ config HOTPLUG_CPU
10181 Say N if you want to disable CPU hotplug.
10182
10183 config COMPAT_VDSO
10184 - def_bool y
10185 + def_bool n
10186 prompt "Compat VDSO support"
10187 depends on X86_32 || IA32_EMULATION
10188 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10189 ---help---
10190 Map the 32-bit VDSO to the predictable old-style address too.
10191
10192 diff -urNp linux-2.6.39.4/arch/x86/Kconfig.cpu linux-2.6.39.4/arch/x86/Kconfig.cpu
10193 --- linux-2.6.39.4/arch/x86/Kconfig.cpu 2011-05-19 00:06:34.000000000 -0400
10194 +++ linux-2.6.39.4/arch/x86/Kconfig.cpu 2011-08-05 19:44:33.000000000 -0400
10195 @@ -334,7 +334,7 @@ config X86_PPRO_FENCE
10196
10197 config X86_F00F_BUG
10198 def_bool y
10199 - depends on M586MMX || M586TSC || M586 || M486 || M386
10200 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10201
10202 config X86_INVD_BUG
10203 def_bool y
10204 @@ -358,7 +358,7 @@ config X86_POPAD_OK
10205
10206 config X86_ALIGNMENT_16
10207 def_bool y
10208 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10209 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10210
10211 config X86_INTEL_USERCOPY
10212 def_bool y
10213 @@ -404,7 +404,7 @@ config X86_CMPXCHG64
10214 # generates cmov.
10215 config X86_CMOV
10216 def_bool y
10217 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10218 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10219
10220 config X86_MINIMUM_CPU_FAMILY
10221 int
10222 diff -urNp linux-2.6.39.4/arch/x86/Kconfig.debug linux-2.6.39.4/arch/x86/Kconfig.debug
10223 --- linux-2.6.39.4/arch/x86/Kconfig.debug 2011-05-19 00:06:34.000000000 -0400
10224 +++ linux-2.6.39.4/arch/x86/Kconfig.debug 2011-08-05 19:44:33.000000000 -0400
10225 @@ -101,7 +101,7 @@ config X86_PTDUMP
10226 config DEBUG_RODATA
10227 bool "Write protect kernel read-only data structures"
10228 default y
10229 - depends on DEBUG_KERNEL
10230 + depends on DEBUG_KERNEL && BROKEN
10231 ---help---
10232 Mark the kernel read-only data as write-protected in the pagetables,
10233 in order to catch accidental (and incorrect) writes to such const
10234 @@ -119,7 +119,7 @@ config DEBUG_RODATA_TEST
10235
10236 config DEBUG_SET_MODULE_RONX
10237 bool "Set loadable kernel module data as NX and text as RO"
10238 - depends on MODULES
10239 + depends on MODULES && BROKEN
10240 ---help---
10241 This option helps catch unintended modifications to loadable
10242 kernel module's text and read-only data. It also prevents execution
10243 diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile
10244 --- linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile 2011-05-19 00:06:34.000000000 -0400
10245 +++ linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile 2011-08-05 20:34:06.000000000 -0400
10246 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10247 $(call cc-option, -fno-stack-protector) \
10248 $(call cc-option, -mpreferred-stack-boundary=2)
10249 KBUILD_CFLAGS += $(call cc-option, -m32)
10250 +ifdef CONSTIFY_PLUGIN
10251 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10252 +endif
10253 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10254 GCOV_PROFILE := n
10255
10256 diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S
10257 --- linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-09 09:18:51.000000000 -0400
10258 +++ linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-08-05 19:44:33.000000000 -0400
10259 @@ -108,6 +108,9 @@ wakeup_code:
10260 /* Do any other stuff... */
10261
10262 #ifndef CONFIG_64BIT
10263 + /* Recheck NX bit overrides (64bit path does this in trampoline */
10264 + call verify_cpu
10265 +
10266 /* This could also be done in C code... */
10267 movl pmode_cr3, %eax
10268 movl %eax, %cr3
10269 @@ -131,6 +134,7 @@ wakeup_code:
10270 movl pmode_cr0, %eax
10271 movl %eax, %cr0
10272 jmp pmode_return
10273 +# include "../../verify_cpu.S"
10274 #else
10275 pushw $0
10276 pushw trampoline_segment
10277 diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c
10278 --- linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c 2011-07-09 09:18:51.000000000 -0400
10279 +++ linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c 2011-08-05 19:44:33.000000000 -0400
10280 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10281 header->trampoline_segment = trampoline_address() >> 4;
10282 #ifdef CONFIG_SMP
10283 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10284 +
10285 + pax_open_kernel();
10286 early_gdt_descr.address =
10287 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10288 + pax_close_kernel();
10289 +
10290 initial_gs = per_cpu_offset(smp_processor_id());
10291 #endif
10292 initial_code = (unsigned long)wakeup_long64;
10293 diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S
10294 --- linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S 2011-05-19 00:06:34.000000000 -0400
10295 +++ linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S 2011-08-05 19:44:33.000000000 -0400
10296 @@ -30,13 +30,11 @@ wakeup_pmode_return:
10297 # and restore the stack ... but you need gdt for this to work
10298 movl saved_context_esp, %esp
10299
10300 - movl %cs:saved_magic, %eax
10301 - cmpl $0x12345678, %eax
10302 + cmpl $0x12345678, saved_magic
10303 jne bogus_magic
10304
10305 # jump to place where we left off
10306 - movl saved_eip, %eax
10307 - jmp *%eax
10308 + jmp *(saved_eip)
10309
10310 bogus_magic:
10311 jmp bogus_magic
10312 diff -urNp linux-2.6.39.4/arch/x86/kernel/alternative.c linux-2.6.39.4/arch/x86/kernel/alternative.c
10313 --- linux-2.6.39.4/arch/x86/kernel/alternative.c 2011-05-19 00:06:34.000000000 -0400
10314 +++ linux-2.6.39.4/arch/x86/kernel/alternative.c 2011-08-05 19:44:33.000000000 -0400
10315 @@ -248,7 +248,7 @@ static void alternatives_smp_lock(const
10316 if (!*poff || ptr < text || ptr >= text_end)
10317 continue;
10318 /* turn DS segment override prefix into lock prefix */
10319 - if (*ptr == 0x3e)
10320 + if (*ktla_ktva(ptr) == 0x3e)
10321 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10322 };
10323 mutex_unlock(&text_mutex);
10324 @@ -269,7 +269,7 @@ static void alternatives_smp_unlock(cons
10325 if (!*poff || ptr < text || ptr >= text_end)
10326 continue;
10327 /* turn lock prefix into DS segment override prefix */
10328 - if (*ptr == 0xf0)
10329 + if (*ktla_ktva(ptr) == 0xf0)
10330 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10331 };
10332 mutex_unlock(&text_mutex);
10333 @@ -438,7 +438,7 @@ void __init_or_module apply_paravirt(str
10334
10335 BUG_ON(p->len > MAX_PATCH_LEN);
10336 /* prep the buffer with the original instructions */
10337 - memcpy(insnbuf, p->instr, p->len);
10338 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10339 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10340 (unsigned long)p->instr, p->len);
10341
10342 @@ -506,7 +506,7 @@ void __init alternative_instructions(voi
10343 if (smp_alt_once)
10344 free_init_pages("SMP alternatives",
10345 (unsigned long)__smp_locks,
10346 - (unsigned long)__smp_locks_end);
10347 + PAGE_ALIGN((unsigned long)__smp_locks_end));
10348
10349 restart_nmi();
10350 }
10351 @@ -523,13 +523,17 @@ void __init alternative_instructions(voi
10352 * instructions. And on the local CPU you need to be protected again NMI or MCE
10353 * handlers seeing an inconsistent instruction while you patch.
10354 */
10355 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
10356 +void *__kprobes text_poke_early(void *addr, const void *opcode,
10357 size_t len)
10358 {
10359 unsigned long flags;
10360 local_irq_save(flags);
10361 - memcpy(addr, opcode, len);
10362 +
10363 + pax_open_kernel();
10364 + memcpy(ktla_ktva(addr), opcode, len);
10365 sync_core();
10366 + pax_close_kernel();
10367 +
10368 local_irq_restore(flags);
10369 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10370 that causes hangs on some VIA CPUs. */
10371 @@ -551,36 +555,22 @@ void *__init_or_module text_poke_early(v
10372 */
10373 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10374 {
10375 - unsigned long flags;
10376 - char *vaddr;
10377 + unsigned char *vaddr = ktla_ktva(addr);
10378 struct page *pages[2];
10379 - int i;
10380 + size_t i;
10381
10382 if (!core_kernel_text((unsigned long)addr)) {
10383 - pages[0] = vmalloc_to_page(addr);
10384 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10385 + pages[0] = vmalloc_to_page(vaddr);
10386 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10387 } else {
10388 - pages[0] = virt_to_page(addr);
10389 + pages[0] = virt_to_page(vaddr);
10390 WARN_ON(!PageReserved(pages[0]));
10391 - pages[1] = virt_to_page(addr + PAGE_SIZE);
10392 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10393 }
10394 BUG_ON(!pages[0]);
10395 - local_irq_save(flags);
10396 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10397 - if (pages[1])
10398 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10399 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10400 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10401 - clear_fixmap(FIX_TEXT_POKE0);
10402 - if (pages[1])
10403 - clear_fixmap(FIX_TEXT_POKE1);
10404 - local_flush_tlb();
10405 - sync_core();
10406 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
10407 - that causes hangs on some VIA CPUs. */
10408 + text_poke_early(addr, opcode, len);
10409 for (i = 0; i < len; i++)
10410 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10411 - local_irq_restore(flags);
10412 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10413 return addr;
10414 }
10415
10416 @@ -682,9 +672,9 @@ void __kprobes text_poke_smp_batch(struc
10417 #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
10418
10419 #ifdef CONFIG_X86_64
10420 -unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 };
10421 +unsigned char ideal_nop5[5] __read_only = { 0x66, 0x66, 0x66, 0x66, 0x90 };
10422 #else
10423 -unsigned char ideal_nop5[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
10424 +unsigned char ideal_nop5[5] __read_only = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
10425 #endif
10426
10427 void __init arch_init_ideal_nop5(void)
10428 diff -urNp linux-2.6.39.4/arch/x86/kernel/apic/apic.c linux-2.6.39.4/arch/x86/kernel/apic/apic.c
10429 --- linux-2.6.39.4/arch/x86/kernel/apic/apic.c 2011-05-19 00:06:34.000000000 -0400
10430 +++ linux-2.6.39.4/arch/x86/kernel/apic/apic.c 2011-08-17 20:01:50.000000000 -0400
10431 @@ -173,7 +173,7 @@ int first_system_vector = 0xfe;
10432 /*
10433 * Debug level, exported for io_apic.c
10434 */
10435 -unsigned int apic_verbosity;
10436 +int apic_verbosity;
10437
10438 int pic_mode;
10439
10440 @@ -1821,7 +1821,7 @@ void smp_error_interrupt(struct pt_regs
10441 apic_write(APIC_ESR, 0);
10442 v1 = apic_read(APIC_ESR);
10443 ack_APIC_irq();
10444 - atomic_inc(&irq_err_count);
10445 + atomic_inc_unchecked(&irq_err_count);
10446
10447 /*
10448 * Here is what the APIC error bits mean:
10449 @@ -2204,6 +2204,8 @@ static int __cpuinit apic_cluster_num(vo
10450 u16 *bios_cpu_apicid;
10451 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10452
10453 + pax_track_stack();
10454 +
10455 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10456 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10457
10458 diff -urNp linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c
10459 --- linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c 2011-06-03 00:04:13.000000000 -0400
10460 +++ linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c 2011-08-05 19:44:33.000000000 -0400
10461 @@ -623,7 +623,7 @@ struct IO_APIC_route_entry **alloc_ioapi
10462 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
10463 GFP_ATOMIC);
10464 if (!ioapic_entries)
10465 - return 0;
10466 + return NULL;
10467
10468 for (apic = 0; apic < nr_ioapics; apic++) {
10469 ioapic_entries[apic] =
10470 @@ -640,7 +640,7 @@ nomem:
10471 kfree(ioapic_entries[apic]);
10472 kfree(ioapic_entries);
10473
10474 - return 0;
10475 + return NULL;
10476 }
10477
10478 /*
10479 @@ -1040,7 +1040,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10480 }
10481 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10482
10483 -void lock_vector_lock(void)
10484 +void lock_vector_lock(void) __acquires(vector_lock)
10485 {
10486 /* Used to the online set of cpus does not change
10487 * during assign_irq_vector.
10488 @@ -1048,7 +1048,7 @@ void lock_vector_lock(void)
10489 raw_spin_lock(&vector_lock);
10490 }
10491
10492 -void unlock_vector_lock(void)
10493 +void unlock_vector_lock(void) __releases(vector_lock)
10494 {
10495 raw_spin_unlock(&vector_lock);
10496 }
10497 @@ -2379,7 +2379,7 @@ static void ack_apic_edge(struct irq_dat
10498 ack_APIC_irq();
10499 }
10500
10501 -atomic_t irq_mis_count;
10502 +atomic_unchecked_t irq_mis_count;
10503
10504 /*
10505 * IO-APIC versions below 0x20 don't support EOI register.
10506 @@ -2487,7 +2487,7 @@ static void ack_apic_level(struct irq_da
10507 * at the cpu.
10508 */
10509 if (!(v & (1 << (i & 0x1f)))) {
10510 - atomic_inc(&irq_mis_count);
10511 + atomic_inc_unchecked(&irq_mis_count);
10512
10513 eoi_ioapic_irq(irq, cfg);
10514 }
10515 diff -urNp linux-2.6.39.4/arch/x86/kernel/apm_32.c linux-2.6.39.4/arch/x86/kernel/apm_32.c
10516 --- linux-2.6.39.4/arch/x86/kernel/apm_32.c 2011-05-19 00:06:34.000000000 -0400
10517 +++ linux-2.6.39.4/arch/x86/kernel/apm_32.c 2011-08-05 19:44:33.000000000 -0400
10518 @@ -412,7 +412,7 @@ static DEFINE_MUTEX(apm_mutex);
10519 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10520 * even though they are called in protected mode.
10521 */
10522 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10523 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10524 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10525
10526 static const char driver_version[] = "1.16ac"; /* no spaces */
10527 @@ -590,7 +590,10 @@ static long __apm_bios_call(void *_call)
10528 BUG_ON(cpu != 0);
10529 gdt = get_cpu_gdt_table(cpu);
10530 save_desc_40 = gdt[0x40 / 8];
10531 +
10532 + pax_open_kernel();
10533 gdt[0x40 / 8] = bad_bios_desc;
10534 + pax_close_kernel();
10535
10536 apm_irq_save(flags);
10537 APM_DO_SAVE_SEGS;
10538 @@ -599,7 +602,11 @@ static long __apm_bios_call(void *_call)
10539 &call->esi);
10540 APM_DO_RESTORE_SEGS;
10541 apm_irq_restore(flags);
10542 +
10543 + pax_open_kernel();
10544 gdt[0x40 / 8] = save_desc_40;
10545 + pax_close_kernel();
10546 +
10547 put_cpu();
10548
10549 return call->eax & 0xff;
10550 @@ -666,7 +673,10 @@ static long __apm_bios_call_simple(void
10551 BUG_ON(cpu != 0);
10552 gdt = get_cpu_gdt_table(cpu);
10553 save_desc_40 = gdt[0x40 / 8];
10554 +
10555 + pax_open_kernel();
10556 gdt[0x40 / 8] = bad_bios_desc;
10557 + pax_close_kernel();
10558
10559 apm_irq_save(flags);
10560 APM_DO_SAVE_SEGS;
10561 @@ -674,7 +684,11 @@ static long __apm_bios_call_simple(void
10562 &call->eax);
10563 APM_DO_RESTORE_SEGS;
10564 apm_irq_restore(flags);
10565 +
10566 + pax_open_kernel();
10567 gdt[0x40 / 8] = save_desc_40;
10568 + pax_close_kernel();
10569 +
10570 put_cpu();
10571 return error;
10572 }
10573 @@ -2351,12 +2365,15 @@ static int __init apm_init(void)
10574 * code to that CPU.
10575 */
10576 gdt = get_cpu_gdt_table(0);
10577 +
10578 + pax_open_kernel();
10579 set_desc_base(&gdt[APM_CS >> 3],
10580 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10581 set_desc_base(&gdt[APM_CS_16 >> 3],
10582 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10583 set_desc_base(&gdt[APM_DS >> 3],
10584 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10585 + pax_close_kernel();
10586
10587 proc_create("apm", 0, NULL, &apm_file_ops);
10588
10589 diff -urNp linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c
10590 --- linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c 2011-05-19 00:06:34.000000000 -0400
10591 +++ linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c 2011-08-05 19:44:33.000000000 -0400
10592 @@ -69,6 +69,7 @@ int main(void)
10593 BLANK();
10594 #undef ENTRY
10595
10596 + DEFINE(TSS_size, sizeof(struct tss_struct));
10597 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10598 BLANK();
10599
10600 diff -urNp linux-2.6.39.4/arch/x86/kernel/asm-offsets.c linux-2.6.39.4/arch/x86/kernel/asm-offsets.c
10601 --- linux-2.6.39.4/arch/x86/kernel/asm-offsets.c 2011-05-19 00:06:34.000000000 -0400
10602 +++ linux-2.6.39.4/arch/x86/kernel/asm-offsets.c 2011-08-05 19:44:33.000000000 -0400
10603 @@ -33,6 +33,8 @@ void common(void) {
10604 OFFSET(TI_status, thread_info, status);
10605 OFFSET(TI_addr_limit, thread_info, addr_limit);
10606 OFFSET(TI_preempt_count, thread_info, preempt_count);
10607 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10608 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10609
10610 BLANK();
10611 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10612 @@ -53,8 +55,26 @@ void common(void) {
10613 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10614 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10615 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10616 +
10617 +#ifdef CONFIG_PAX_KERNEXEC
10618 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10619 +#endif
10620 +
10621 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10622 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10623 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10624 +#ifdef CONFIG_X86_64
10625 + OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
10626 +#endif
10627 #endif
10628
10629 +#endif
10630 +
10631 + BLANK();
10632 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10633 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10634 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10635 +
10636 #ifdef CONFIG_XEN
10637 BLANK();
10638 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10639 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/amd.c linux-2.6.39.4/arch/x86/kernel/cpu/amd.c
10640 --- linux-2.6.39.4/arch/x86/kernel/cpu/amd.c 2011-06-03 00:04:13.000000000 -0400
10641 +++ linux-2.6.39.4/arch/x86/kernel/cpu/amd.c 2011-08-05 19:44:33.000000000 -0400
10642 @@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10643 unsigned int size)
10644 {
10645 /* AMD errata T13 (order #21922) */
10646 - if ((c->x86 == 6)) {
10647 + if (c->x86 == 6) {
10648 /* Duron Rev A0 */
10649 if (c->x86_model == 3 && c->x86_mask == 0)
10650 size = 64;
10651 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/common.c linux-2.6.39.4/arch/x86/kernel/cpu/common.c
10652 --- linux-2.6.39.4/arch/x86/kernel/cpu/common.c 2011-06-03 00:04:13.000000000 -0400
10653 +++ linux-2.6.39.4/arch/x86/kernel/cpu/common.c 2011-08-05 19:44:33.000000000 -0400
10654 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10655
10656 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10657
10658 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10659 -#ifdef CONFIG_X86_64
10660 - /*
10661 - * We need valid kernel segments for data and code in long mode too
10662 - * IRET will check the segment types kkeil 2000/10/28
10663 - * Also sysret mandates a special GDT layout
10664 - *
10665 - * TLS descriptors are currently at a different place compared to i386.
10666 - * Hopefully nobody expects them at a fixed place (Wine?)
10667 - */
10668 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10669 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10670 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10671 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10672 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10673 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10674 -#else
10675 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10676 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10677 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10678 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10679 - /*
10680 - * Segments used for calling PnP BIOS have byte granularity.
10681 - * They code segments and data segments have fixed 64k limits,
10682 - * the transfer segment sizes are set at run time.
10683 - */
10684 - /* 32-bit code */
10685 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10686 - /* 16-bit code */
10687 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10688 - /* 16-bit data */
10689 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10690 - /* 16-bit data */
10691 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10692 - /* 16-bit data */
10693 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10694 - /*
10695 - * The APM segments have byte granularity and their bases
10696 - * are set at run time. All have 64k limits.
10697 - */
10698 - /* 32-bit code */
10699 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10700 - /* 16-bit code */
10701 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10702 - /* data */
10703 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10704 -
10705 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10706 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10707 - GDT_STACK_CANARY_INIT
10708 -#endif
10709 -} };
10710 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10711 -
10712 static int __init x86_xsave_setup(char *s)
10713 {
10714 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10715 @@ -352,7 +298,7 @@ void switch_to_new_gdt(int cpu)
10716 {
10717 struct desc_ptr gdt_descr;
10718
10719 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10720 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10721 gdt_descr.size = GDT_SIZE - 1;
10722 load_gdt(&gdt_descr);
10723 /* Reload the per-cpu base */
10724 @@ -824,6 +770,10 @@ static void __cpuinit identify_cpu(struc
10725 /* Filter out anything that depends on CPUID levels we don't have */
10726 filter_cpuid_features(c, true);
10727
10728 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10729 + setup_clear_cpu_cap(X86_FEATURE_SEP);
10730 +#endif
10731 +
10732 /* If the model name is still unset, do table lookup. */
10733 if (!c->x86_model_id[0]) {
10734 const char *p;
10735 @@ -1003,6 +953,9 @@ static __init int setup_disablecpuid(cha
10736 }
10737 __setup("clearcpuid=", setup_disablecpuid);
10738
10739 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10740 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
10741 +
10742 #ifdef CONFIG_X86_64
10743 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10744
10745 @@ -1018,7 +971,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10746 EXPORT_PER_CPU_SYMBOL(current_task);
10747
10748 DEFINE_PER_CPU(unsigned long, kernel_stack) =
10749 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10750 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10751 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10752
10753 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10754 @@ -1083,7 +1036,7 @@ struct pt_regs * __cpuinit idle_regs(str
10755 {
10756 memset(regs, 0, sizeof(struct pt_regs));
10757 regs->fs = __KERNEL_PERCPU;
10758 - regs->gs = __KERNEL_STACK_CANARY;
10759 + savesegment(gs, regs->gs);
10760
10761 return regs;
10762 }
10763 @@ -1138,7 +1091,7 @@ void __cpuinit cpu_init(void)
10764 int i;
10765
10766 cpu = stack_smp_processor_id();
10767 - t = &per_cpu(init_tss, cpu);
10768 + t = init_tss + cpu;
10769 oist = &per_cpu(orig_ist, cpu);
10770
10771 #ifdef CONFIG_NUMA
10772 @@ -1164,7 +1117,7 @@ void __cpuinit cpu_init(void)
10773 switch_to_new_gdt(cpu);
10774 loadsegment(fs, 0);
10775
10776 - load_idt((const struct desc_ptr *)&idt_descr);
10777 + load_idt(&idt_descr);
10778
10779 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10780 syscall_init();
10781 @@ -1173,7 +1126,6 @@ void __cpuinit cpu_init(void)
10782 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10783 barrier();
10784
10785 - x86_configure_nx();
10786 if (cpu != 0)
10787 enable_x2apic();
10788
10789 @@ -1227,7 +1179,7 @@ void __cpuinit cpu_init(void)
10790 {
10791 int cpu = smp_processor_id();
10792 struct task_struct *curr = current;
10793 - struct tss_struct *t = &per_cpu(init_tss, cpu);
10794 + struct tss_struct *t = init_tss + cpu;
10795 struct thread_struct *thread = &curr->thread;
10796
10797 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10798 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/intel.c linux-2.6.39.4/arch/x86/kernel/cpu/intel.c
10799 --- linux-2.6.39.4/arch/x86/kernel/cpu/intel.c 2011-05-19 00:06:34.000000000 -0400
10800 +++ linux-2.6.39.4/arch/x86/kernel/cpu/intel.c 2011-08-05 19:44:33.000000000 -0400
10801 @@ -161,7 +161,7 @@ static void __cpuinit trap_init_f00f_bug
10802 * Update the IDT descriptor and reload the IDT so that
10803 * it uses the read-only mapped virtual address.
10804 */
10805 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
10806 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
10807 load_idt(&idt_descr);
10808 }
10809 #endif
10810 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/Makefile linux-2.6.39.4/arch/x86/kernel/cpu/Makefile
10811 --- linux-2.6.39.4/arch/x86/kernel/cpu/Makefile 2011-05-19 00:06:34.000000000 -0400
10812 +++ linux-2.6.39.4/arch/x86/kernel/cpu/Makefile 2011-08-05 19:44:33.000000000 -0400
10813 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
10814 CFLAGS_REMOVE_perf_event.o = -pg
10815 endif
10816
10817 -# Make sure load_percpu_segment has no stackprotector
10818 -nostackp := $(call cc-option, -fno-stack-protector)
10819 -CFLAGS_common.o := $(nostackp)
10820 -
10821 obj-y := intel_cacheinfo.o scattered.o topology.o
10822 obj-y += proc.o capflags.o powerflags.o common.o
10823 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
10824 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c
10825 --- linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-19 00:06:34.000000000 -0400
10826 +++ linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-08-05 19:44:33.000000000 -0400
10827 @@ -46,6 +46,7 @@
10828 #include <asm/ipi.h>
10829 #include <asm/mce.h>
10830 #include <asm/msr.h>
10831 +#include <asm/local.h>
10832
10833 #include "mce-internal.h"
10834
10835 @@ -220,7 +221,7 @@ static void print_mce(struct mce *m)
10836 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
10837 m->cs, m->ip);
10838
10839 - if (m->cs == __KERNEL_CS)
10840 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
10841 print_symbol("{%s}", m->ip);
10842 pr_cont("\n");
10843 }
10844 @@ -244,10 +245,10 @@ static void print_mce(struct mce *m)
10845
10846 #define PANIC_TIMEOUT 5 /* 5 seconds */
10847
10848 -static atomic_t mce_paniced;
10849 +static atomic_unchecked_t mce_paniced;
10850
10851 static int fake_panic;
10852 -static atomic_t mce_fake_paniced;
10853 +static atomic_unchecked_t mce_fake_paniced;
10854
10855 /* Panic in progress. Enable interrupts and wait for final IPI */
10856 static void wait_for_panic(void)
10857 @@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct
10858 /*
10859 * Make sure only one CPU runs in machine check panic
10860 */
10861 - if (atomic_inc_return(&mce_paniced) > 1)
10862 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
10863 wait_for_panic();
10864 barrier();
10865
10866 @@ -279,7 +280,7 @@ static void mce_panic(char *msg, struct
10867 console_verbose();
10868 } else {
10869 /* Don't log too much for fake panic */
10870 - if (atomic_inc_return(&mce_fake_paniced) > 1)
10871 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
10872 return;
10873 }
10874 /* First print corrected ones that are still unlogged */
10875 @@ -647,7 +648,7 @@ static int mce_timed_out(u64 *t)
10876 * might have been modified by someone else.
10877 */
10878 rmb();
10879 - if (atomic_read(&mce_paniced))
10880 + if (atomic_read_unchecked(&mce_paniced))
10881 wait_for_panic();
10882 if (!monarch_timeout)
10883 goto out;
10884 @@ -1461,14 +1462,14 @@ void __cpuinit mcheck_cpu_init(struct cp
10885 */
10886
10887 static DEFINE_SPINLOCK(mce_state_lock);
10888 -static int open_count; /* #times opened */
10889 +static local_t open_count; /* #times opened */
10890 static int open_exclu; /* already open exclusive? */
10891
10892 static int mce_open(struct inode *inode, struct file *file)
10893 {
10894 spin_lock(&mce_state_lock);
10895
10896 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
10897 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
10898 spin_unlock(&mce_state_lock);
10899
10900 return -EBUSY;
10901 @@ -1476,7 +1477,7 @@ static int mce_open(struct inode *inode,
10902
10903 if (file->f_flags & O_EXCL)
10904 open_exclu = 1;
10905 - open_count++;
10906 + local_inc(&open_count);
10907
10908 spin_unlock(&mce_state_lock);
10909
10910 @@ -1487,7 +1488,7 @@ static int mce_release(struct inode *ino
10911 {
10912 spin_lock(&mce_state_lock);
10913
10914 - open_count--;
10915 + local_dec(&open_count);
10916 open_exclu = 0;
10917
10918 spin_unlock(&mce_state_lock);
10919 @@ -2174,7 +2175,7 @@ struct dentry *mce_get_debugfs_dir(void)
10920 static void mce_reset(void)
10921 {
10922 cpu_missing = 0;
10923 - atomic_set(&mce_fake_paniced, 0);
10924 + atomic_set_unchecked(&mce_fake_paniced, 0);
10925 atomic_set(&mce_executing, 0);
10926 atomic_set(&mce_callin, 0);
10927 atomic_set(&global_nwo, 0);
10928 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c
10929 --- linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-05-19 00:06:34.000000000 -0400
10930 +++ linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-05 20:34:06.000000000 -0400
10931 @@ -215,7 +215,9 @@ static int inject_init(void)
10932 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
10933 return -ENOMEM;
10934 printk(KERN_INFO "Machine check injector initialized\n");
10935 - mce_chrdev_ops.write = mce_write;
10936 + pax_open_kernel();
10937 + *(void **)&mce_chrdev_ops.write = mce_write;
10938 + pax_close_kernel();
10939 register_die_notifier(&mce_raise_nb);
10940 return 0;
10941 }
10942 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c
10943 --- linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c 2011-05-19 00:06:34.000000000 -0400
10944 +++ linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c 2011-08-05 19:44:33.000000000 -0400
10945 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
10946 u64 size_or_mask, size_and_mask;
10947 static bool mtrr_aps_delayed_init;
10948
10949 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
10950 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
10951
10952 const struct mtrr_ops *mtrr_if;
10953
10954 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h
10955 --- linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-05-19 00:06:34.000000000 -0400
10956 +++ linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-05 20:34:06.000000000 -0400
10957 @@ -12,8 +12,8 @@
10958 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
10959
10960 struct mtrr_ops {
10961 - u32 vendor;
10962 - u32 use_intel_if;
10963 + const u32 vendor;
10964 + const u32 use_intel_if;
10965 void (*set)(unsigned int reg, unsigned long base,
10966 unsigned long size, mtrr_type type);
10967 void (*set_all)(void);
10968 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c
10969 --- linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c 2011-05-19 00:06:34.000000000 -0400
10970 +++ linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c 2011-08-05 19:44:33.000000000 -0400
10971 @@ -774,6 +774,8 @@ static int x86_schedule_events(struct cp
10972 int i, j, w, wmax, num = 0;
10973 struct hw_perf_event *hwc;
10974
10975 + pax_track_stack();
10976 +
10977 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
10978
10979 for (i = 0; i < n; i++) {
10980 @@ -1878,7 +1880,7 @@ perf_callchain_user(struct perf_callchai
10981 break;
10982
10983 perf_callchain_store(entry, frame.return_address);
10984 - fp = frame.next_frame;
10985 + fp = (__force const void __user *)frame.next_frame;
10986 }
10987 }
10988
10989 diff -urNp linux-2.6.39.4/arch/x86/kernel/crash.c linux-2.6.39.4/arch/x86/kernel/crash.c
10990 --- linux-2.6.39.4/arch/x86/kernel/crash.c 2011-05-19 00:06:34.000000000 -0400
10991 +++ linux-2.6.39.4/arch/x86/kernel/crash.c 2011-08-05 19:44:33.000000000 -0400
10992 @@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
10993 regs = args->regs;
10994
10995 #ifdef CONFIG_X86_32
10996 - if (!user_mode_vm(regs)) {
10997 + if (!user_mode(regs)) {
10998 crash_fixup_ss_esp(&fixed_regs, regs);
10999 regs = &fixed_regs;
11000 }
11001 diff -urNp linux-2.6.39.4/arch/x86/kernel/doublefault_32.c linux-2.6.39.4/arch/x86/kernel/doublefault_32.c
11002 --- linux-2.6.39.4/arch/x86/kernel/doublefault_32.c 2011-05-19 00:06:34.000000000 -0400
11003 +++ linux-2.6.39.4/arch/x86/kernel/doublefault_32.c 2011-08-05 19:44:33.000000000 -0400
11004 @@ -11,7 +11,7 @@
11005
11006 #define DOUBLEFAULT_STACKSIZE (1024)
11007 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
11008 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
11009 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
11010
11011 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
11012
11013 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
11014 unsigned long gdt, tss;
11015
11016 store_gdt(&gdt_desc);
11017 - gdt = gdt_desc.address;
11018 + gdt = (unsigned long)gdt_desc.address;
11019
11020 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
11021
11022 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
11023 /* 0x2 bit is always set */
11024 .flags = X86_EFLAGS_SF | 0x2,
11025 .sp = STACK_START,
11026 - .es = __USER_DS,
11027 + .es = __KERNEL_DS,
11028 .cs = __KERNEL_CS,
11029 .ss = __KERNEL_DS,
11030 - .ds = __USER_DS,
11031 + .ds = __KERNEL_DS,
11032 .fs = __KERNEL_PERCPU,
11033
11034 .__cr3 = __pa_nodebug(swapper_pg_dir),
11035 diff -urNp linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c
11036 --- linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c 2011-05-19 00:06:34.000000000 -0400
11037 +++ linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c 2011-08-05 19:44:33.000000000 -0400
11038 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
11039 bp = stack_frame(task, regs);
11040
11041 for (;;) {
11042 - struct thread_info *context;
11043 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11044
11045 - context = (struct thread_info *)
11046 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
11047 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
11048 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11049
11050 - stack = (unsigned long *)context->previous_esp;
11051 - if (!stack)
11052 + if (stack_start == task_stack_page(task))
11053 break;
11054 + stack = *(unsigned long **)stack_start;
11055 if (ops->stack(data, "IRQ") < 0)
11056 break;
11057 touch_nmi_watchdog();
11058 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
11059 * When in-kernel, we also print out the stack and code at the
11060 * time of the fault..
11061 */
11062 - if (!user_mode_vm(regs)) {
11063 + if (!user_mode(regs)) {
11064 unsigned int code_prologue = code_bytes * 43 / 64;
11065 unsigned int code_len = code_bytes;
11066 unsigned char c;
11067 u8 *ip;
11068 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
11069
11070 printk(KERN_EMERG "Stack:\n");
11071 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
11072
11073 printk(KERN_EMERG "Code: ");
11074
11075 - ip = (u8 *)regs->ip - code_prologue;
11076 + ip = (u8 *)regs->ip - code_prologue + cs_base;
11077 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
11078 /* try starting at IP */
11079 - ip = (u8 *)regs->ip;
11080 + ip = (u8 *)regs->ip + cs_base;
11081 code_len = code_len - code_prologue + 1;
11082 }
11083 for (i = 0; i < code_len; i++, ip++) {
11084 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11085 printk(" Bad EIP value.");
11086 break;
11087 }
11088 - if (ip == (u8 *)regs->ip)
11089 + if (ip == (u8 *)regs->ip + cs_base)
11090 printk("<%02x> ", c);
11091 else
11092 printk("%02x ", c);
11093 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11094 {
11095 unsigned short ud2;
11096
11097 + ip = ktla_ktva(ip);
11098 if (ip < PAGE_OFFSET)
11099 return 0;
11100 if (probe_kernel_address((unsigned short *)ip, ud2))
11101 diff -urNp linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c
11102 --- linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c 2011-05-19 00:06:34.000000000 -0400
11103 +++ linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c 2011-08-05 19:44:33.000000000 -0400
11104 @@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task
11105 unsigned long *irq_stack_end =
11106 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11107 unsigned used = 0;
11108 - struct thread_info *tinfo;
11109 int graph = 0;
11110 unsigned long dummy;
11111 + void *stack_start;
11112
11113 if (!task)
11114 task = current;
11115 @@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task
11116 * current stack address. If the stacks consist of nested
11117 * exceptions
11118 */
11119 - tinfo = task_thread_info(task);
11120 for (;;) {
11121 char *id;
11122 unsigned long *estack_end;
11123 +
11124 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11125 &used, &id);
11126
11127 @@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task
11128 if (ops->stack(data, id) < 0)
11129 break;
11130
11131 - bp = ops->walk_stack(tinfo, stack, bp, ops,
11132 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11133 data, estack_end, &graph);
11134 ops->stack(data, "<EOE>");
11135 /*
11136 @@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task
11137 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11138 if (ops->stack(data, "IRQ") < 0)
11139 break;
11140 - bp = ops->walk_stack(tinfo, stack, bp,
11141 + bp = ops->walk_stack(task, irq_stack, stack, bp,
11142 ops, data, irq_stack_end, &graph);
11143 /*
11144 * We link to the next stack (which would be
11145 @@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task
11146 /*
11147 * This handles the process stack:
11148 */
11149 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11150 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11151 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11152 put_cpu();
11153 }
11154 EXPORT_SYMBOL(dump_trace);
11155 diff -urNp linux-2.6.39.4/arch/x86/kernel/dumpstack.c linux-2.6.39.4/arch/x86/kernel/dumpstack.c
11156 --- linux-2.6.39.4/arch/x86/kernel/dumpstack.c 2011-05-19 00:06:34.000000000 -0400
11157 +++ linux-2.6.39.4/arch/x86/kernel/dumpstack.c 2011-08-05 19:44:33.000000000 -0400
11158 @@ -2,6 +2,9 @@
11159 * Copyright (C) 1991, 1992 Linus Torvalds
11160 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11161 */
11162 +#ifdef CONFIG_GRKERNSEC_HIDESYM
11163 +#define __INCLUDED_BY_HIDESYM 1
11164 +#endif
11165 #include <linux/kallsyms.h>
11166 #include <linux/kprobes.h>
11167 #include <linux/uaccess.h>
11168 @@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11169 static void
11170 print_ftrace_graph_addr(unsigned long addr, void *data,
11171 const struct stacktrace_ops *ops,
11172 - struct thread_info *tinfo, int *graph)
11173 + struct task_struct *task, int *graph)
11174 {
11175 - struct task_struct *task = tinfo->task;
11176 unsigned long ret_addr;
11177 int index = task->curr_ret_stack;
11178
11179 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11180 static inline void
11181 print_ftrace_graph_addr(unsigned long addr, void *data,
11182 const struct stacktrace_ops *ops,
11183 - struct thread_info *tinfo, int *graph)
11184 + struct task_struct *task, int *graph)
11185 { }
11186 #endif
11187
11188 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11189 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11190 */
11191
11192 -static inline int valid_stack_ptr(struct thread_info *tinfo,
11193 - void *p, unsigned int size, void *end)
11194 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11195 {
11196 - void *t = tinfo;
11197 if (end) {
11198 if (p < end && p >= (end-THREAD_SIZE))
11199 return 1;
11200 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11201 }
11202
11203 unsigned long
11204 -print_context_stack(struct thread_info *tinfo,
11205 +print_context_stack(struct task_struct *task, void *stack_start,
11206 unsigned long *stack, unsigned long bp,
11207 const struct stacktrace_ops *ops, void *data,
11208 unsigned long *end, int *graph)
11209 {
11210 struct stack_frame *frame = (struct stack_frame *)bp;
11211
11212 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11213 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11214 unsigned long addr;
11215
11216 addr = *stack;
11217 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11218 } else {
11219 ops->address(data, addr, 0);
11220 }
11221 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11222 + print_ftrace_graph_addr(addr, data, ops, task, graph);
11223 }
11224 stack++;
11225 }
11226 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11227 EXPORT_SYMBOL_GPL(print_context_stack);
11228
11229 unsigned long
11230 -print_context_stack_bp(struct thread_info *tinfo,
11231 +print_context_stack_bp(struct task_struct *task, void *stack_start,
11232 unsigned long *stack, unsigned long bp,
11233 const struct stacktrace_ops *ops, void *data,
11234 unsigned long *end, int *graph)
11235 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11236 struct stack_frame *frame = (struct stack_frame *)bp;
11237 unsigned long *ret_addr = &frame->return_address;
11238
11239 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11240 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11241 unsigned long addr = *ret_addr;
11242
11243 if (!__kernel_text_address(addr))
11244 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11245 ops->address(data, addr, 1);
11246 frame = frame->next_frame;
11247 ret_addr = &frame->return_address;
11248 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11249 + print_ftrace_graph_addr(addr, data, ops, task, graph);
11250 }
11251
11252 return (unsigned long)frame;
11253 @@ -202,7 +202,7 @@ void dump_stack(void)
11254
11255 bp = stack_frame(current, NULL);
11256 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11257 - current->pid, current->comm, print_tainted(),
11258 + task_pid_nr(current), current->comm, print_tainted(),
11259 init_utsname()->release,
11260 (int)strcspn(init_utsname()->version, " "),
11261 init_utsname()->version);
11262 @@ -238,6 +238,8 @@ unsigned __kprobes long oops_begin(void)
11263 }
11264 EXPORT_SYMBOL_GPL(oops_begin);
11265
11266 +extern void gr_handle_kernel_exploit(void);
11267 +
11268 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11269 {
11270 if (regs && kexec_should_crash(current))
11271 @@ -259,7 +261,10 @@ void __kprobes oops_end(unsigned long fl
11272 panic("Fatal exception in interrupt");
11273 if (panic_on_oops)
11274 panic("Fatal exception");
11275 - do_exit(signr);
11276 +
11277 + gr_handle_kernel_exploit();
11278 +
11279 + do_group_exit(signr);
11280 }
11281
11282 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11283 @@ -286,7 +291,7 @@ int __kprobes __die(const char *str, str
11284
11285 show_registers(regs);
11286 #ifdef CONFIG_X86_32
11287 - if (user_mode_vm(regs)) {
11288 + if (user_mode(regs)) {
11289 sp = regs->sp;
11290 ss = regs->ss & 0xffff;
11291 } else {
11292 @@ -314,7 +319,7 @@ void die(const char *str, struct pt_regs
11293 unsigned long flags = oops_begin();
11294 int sig = SIGSEGV;
11295
11296 - if (!user_mode_vm(regs))
11297 + if (!user_mode(regs))
11298 report_bug(regs->ip, regs);
11299
11300 if (__die(str, regs, err))
11301 diff -urNp linux-2.6.39.4/arch/x86/kernel/early_printk.c linux-2.6.39.4/arch/x86/kernel/early_printk.c
11302 --- linux-2.6.39.4/arch/x86/kernel/early_printk.c 2011-05-19 00:06:34.000000000 -0400
11303 +++ linux-2.6.39.4/arch/x86/kernel/early_printk.c 2011-08-05 19:44:33.000000000 -0400
11304 @@ -7,6 +7,7 @@
11305 #include <linux/pci_regs.h>
11306 #include <linux/pci_ids.h>
11307 #include <linux/errno.h>
11308 +#include <linux/sched.h>
11309 #include <asm/io.h>
11310 #include <asm/processor.h>
11311 #include <asm/fcntl.h>
11312 @@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11313 int n;
11314 va_list ap;
11315
11316 + pax_track_stack();
11317 +
11318 va_start(ap, fmt);
11319 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11320 early_console->write(early_console, buf, n);
11321 diff -urNp linux-2.6.39.4/arch/x86/kernel/entry_32.S linux-2.6.39.4/arch/x86/kernel/entry_32.S
11322 --- linux-2.6.39.4/arch/x86/kernel/entry_32.S 2011-05-19 00:06:34.000000000 -0400
11323 +++ linux-2.6.39.4/arch/x86/kernel/entry_32.S 2011-08-05 19:44:33.000000000 -0400
11324 @@ -185,13 +185,146 @@
11325 /*CFI_REL_OFFSET gs, PT_GS*/
11326 .endm
11327 .macro SET_KERNEL_GS reg
11328 +
11329 +#ifdef CONFIG_CC_STACKPROTECTOR
11330 movl $(__KERNEL_STACK_CANARY), \reg
11331 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11332 + movl $(__USER_DS), \reg
11333 +#else
11334 + xorl \reg, \reg
11335 +#endif
11336 +
11337 movl \reg, %gs
11338 .endm
11339
11340 #endif /* CONFIG_X86_32_LAZY_GS */
11341
11342 -.macro SAVE_ALL
11343 +.macro pax_enter_kernel
11344 +#ifdef CONFIG_PAX_KERNEXEC
11345 + call pax_enter_kernel
11346 +#endif
11347 +.endm
11348 +
11349 +.macro pax_exit_kernel
11350 +#ifdef CONFIG_PAX_KERNEXEC
11351 + call pax_exit_kernel
11352 +#endif
11353 +.endm
11354 +
11355 +#ifdef CONFIG_PAX_KERNEXEC
11356 +ENTRY(pax_enter_kernel)
11357 +#ifdef CONFIG_PARAVIRT
11358 + pushl %eax
11359 + pushl %ecx
11360 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11361 + mov %eax, %esi
11362 +#else
11363 + mov %cr0, %esi
11364 +#endif
11365 + bts $16, %esi
11366 + jnc 1f
11367 + mov %cs, %esi
11368 + cmp $__KERNEL_CS, %esi
11369 + jz 3f
11370 + ljmp $__KERNEL_CS, $3f
11371 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11372 +2:
11373 +#ifdef CONFIG_PARAVIRT
11374 + mov %esi, %eax
11375 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11376 +#else
11377 + mov %esi, %cr0
11378 +#endif
11379 +3:
11380 +#ifdef CONFIG_PARAVIRT
11381 + popl %ecx
11382 + popl %eax
11383 +#endif
11384 + ret
11385 +ENDPROC(pax_enter_kernel)
11386 +
11387 +ENTRY(pax_exit_kernel)
11388 +#ifdef CONFIG_PARAVIRT
11389 + pushl %eax
11390 + pushl %ecx
11391 +#endif
11392 + mov %cs, %esi
11393 + cmp $__KERNEXEC_KERNEL_CS, %esi
11394 + jnz 2f
11395 +#ifdef CONFIG_PARAVIRT
11396 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11397 + mov %eax, %esi
11398 +#else
11399 + mov %cr0, %esi
11400 +#endif
11401 + btr $16, %esi
11402 + ljmp $__KERNEL_CS, $1f
11403 +1:
11404 +#ifdef CONFIG_PARAVIRT
11405 + mov %esi, %eax
11406 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11407 +#else
11408 + mov %esi, %cr0
11409 +#endif
11410 +2:
11411 +#ifdef CONFIG_PARAVIRT
11412 + popl %ecx
11413 + popl %eax
11414 +#endif
11415 + ret
11416 +ENDPROC(pax_exit_kernel)
11417 +#endif
11418 +
11419 +.macro pax_erase_kstack
11420 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11421 + call pax_erase_kstack
11422 +#endif
11423 +.endm
11424 +
11425 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11426 +/*
11427 + * ebp: thread_info
11428 + * ecx, edx: can be clobbered
11429 + */
11430 +ENTRY(pax_erase_kstack)
11431 + pushl %edi
11432 + pushl %eax
11433 +
11434 + mov TI_lowest_stack(%ebp), %edi
11435 + mov $-0xBEEF, %eax
11436 + std
11437 +
11438 +1: mov %edi, %ecx
11439 + and $THREAD_SIZE_asm - 1, %ecx
11440 + shr $2, %ecx
11441 + repne scasl
11442 + jecxz 2f
11443 +
11444 + cmp $2*16, %ecx
11445 + jc 2f
11446 +
11447 + mov $2*16, %ecx
11448 + repe scasl
11449 + jecxz 2f
11450 + jne 1b
11451 +
11452 +2: cld
11453 + mov %esp, %ecx
11454 + sub %edi, %ecx
11455 + shr $2, %ecx
11456 + rep stosl
11457 +
11458 + mov TI_task_thread_sp0(%ebp), %edi
11459 + sub $128, %edi
11460 + mov %edi, TI_lowest_stack(%ebp)
11461 +
11462 + popl %eax
11463 + popl %edi
11464 + ret
11465 +ENDPROC(pax_erase_kstack)
11466 +#endif
11467 +
11468 +.macro __SAVE_ALL _DS
11469 cld
11470 PUSH_GS
11471 pushl_cfi %fs
11472 @@ -214,7 +347,7 @@
11473 CFI_REL_OFFSET ecx, 0
11474 pushl_cfi %ebx
11475 CFI_REL_OFFSET ebx, 0
11476 - movl $(__USER_DS), %edx
11477 + movl $\_DS, %edx
11478 movl %edx, %ds
11479 movl %edx, %es
11480 movl $(__KERNEL_PERCPU), %edx
11481 @@ -222,6 +355,15 @@
11482 SET_KERNEL_GS %edx
11483 .endm
11484
11485 +.macro SAVE_ALL
11486 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11487 + __SAVE_ALL __KERNEL_DS
11488 + pax_enter_kernel
11489 +#else
11490 + __SAVE_ALL __USER_DS
11491 +#endif
11492 +.endm
11493 +
11494 .macro RESTORE_INT_REGS
11495 popl_cfi %ebx
11496 CFI_RESTORE ebx
11497 @@ -332,7 +474,15 @@ check_userspace:
11498 movb PT_CS(%esp), %al
11499 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11500 cmpl $USER_RPL, %eax
11501 +
11502 +#ifdef CONFIG_PAX_KERNEXEC
11503 + jae resume_userspace
11504 +
11505 + PAX_EXIT_KERNEL
11506 + jmp resume_kernel
11507 +#else
11508 jb resume_kernel # not returning to v8086 or userspace
11509 +#endif
11510
11511 ENTRY(resume_userspace)
11512 LOCKDEP_SYS_EXIT
11513 @@ -344,7 +494,7 @@ ENTRY(resume_userspace)
11514 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11515 # int/exception return?
11516 jne work_pending
11517 - jmp restore_all
11518 + jmp restore_all_pax
11519 END(ret_from_exception)
11520
11521 #ifdef CONFIG_PREEMPT
11522 @@ -394,23 +544,34 @@ sysenter_past_esp:
11523 /*CFI_REL_OFFSET cs, 0*/
11524 /*
11525 * Push current_thread_info()->sysenter_return to the stack.
11526 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11527 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
11528 */
11529 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11530 + pushl_cfi $0
11531 CFI_REL_OFFSET eip, 0
11532
11533 pushl_cfi %eax
11534 SAVE_ALL
11535 + GET_THREAD_INFO(%ebp)
11536 + movl TI_sysenter_return(%ebp),%ebp
11537 + movl %ebp,PT_EIP(%esp)
11538 ENABLE_INTERRUPTS(CLBR_NONE)
11539
11540 /*
11541 * Load the potential sixth argument from user stack.
11542 * Careful about security.
11543 */
11544 + movl PT_OLDESP(%esp),%ebp
11545 +
11546 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11547 + mov PT_OLDSS(%esp),%ds
11548 +1: movl %ds:(%ebp),%ebp
11549 + push %ss
11550 + pop %ds
11551 +#else
11552 cmpl $__PAGE_OFFSET-3,%ebp
11553 jae syscall_fault
11554 1: movl (%ebp),%ebp
11555 +#endif
11556 +
11557 movl %ebp,PT_EBP(%esp)
11558 .section __ex_table,"a"
11559 .align 4
11560 @@ -433,12 +594,23 @@ sysenter_do_call:
11561 testl $_TIF_ALLWORK_MASK, %ecx
11562 jne sysexit_audit
11563 sysenter_exit:
11564 +
11565 +#ifdef CONFIG_PAX_RANDKSTACK
11566 + pushl_cfi %eax
11567 + call pax_randomize_kstack
11568 + popl_cfi %eax
11569 +#endif
11570 +
11571 + pax_erase_kstack
11572 +
11573 /* if something modifies registers it must also disable sysexit */
11574 movl PT_EIP(%esp), %edx
11575 movl PT_OLDESP(%esp), %ecx
11576 xorl %ebp,%ebp
11577 TRACE_IRQS_ON
11578 1: mov PT_FS(%esp), %fs
11579 +2: mov PT_DS(%esp), %ds
11580 +3: mov PT_ES(%esp), %es
11581 PTGS_TO_GS
11582 ENABLE_INTERRUPTS_SYSEXIT
11583
11584 @@ -455,6 +627,9 @@ sysenter_audit:
11585 movl %eax,%edx /* 2nd arg: syscall number */
11586 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11587 call audit_syscall_entry
11588 +
11589 + pax_erase_kstack
11590 +
11591 pushl_cfi %ebx
11592 movl PT_EAX(%esp),%eax /* reload syscall number */
11593 jmp sysenter_do_call
11594 @@ -481,11 +656,17 @@ sysexit_audit:
11595
11596 CFI_ENDPROC
11597 .pushsection .fixup,"ax"
11598 -2: movl $0,PT_FS(%esp)
11599 +4: movl $0,PT_FS(%esp)
11600 + jmp 1b
11601 +5: movl $0,PT_DS(%esp)
11602 + jmp 1b
11603 +6: movl $0,PT_ES(%esp)
11604 jmp 1b
11605 .section __ex_table,"a"
11606 .align 4
11607 - .long 1b,2b
11608 + .long 1b,4b
11609 + .long 2b,5b
11610 + .long 3b,6b
11611 .popsection
11612 PTGS_TO_GS_EX
11613 ENDPROC(ia32_sysenter_target)
11614 @@ -518,6 +699,14 @@ syscall_exit:
11615 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11616 jne syscall_exit_work
11617
11618 +restore_all_pax:
11619 +
11620 +#ifdef CONFIG_PAX_RANDKSTACK
11621 + call pax_randomize_kstack
11622 +#endif
11623 +
11624 + pax_erase_kstack
11625 +
11626 restore_all:
11627 TRACE_IRQS_IRET
11628 restore_all_notrace:
11629 @@ -577,14 +766,21 @@ ldt_ss:
11630 * compensating for the offset by changing to the ESPFIX segment with
11631 * a base address that matches for the difference.
11632 */
11633 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11634 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11635 mov %esp, %edx /* load kernel esp */
11636 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11637 mov %dx, %ax /* eax: new kernel esp */
11638 sub %eax, %edx /* offset (low word is 0) */
11639 +#ifdef CONFIG_SMP
11640 + movl PER_CPU_VAR(cpu_number), %ebx
11641 + shll $PAGE_SHIFT_asm, %ebx
11642 + addl $cpu_gdt_table, %ebx
11643 +#else
11644 + movl $cpu_gdt_table, %ebx
11645 +#endif
11646 shr $16, %edx
11647 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11648 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11649 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11650 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11651 pushl_cfi $__ESPFIX_SS
11652 pushl_cfi %eax /* new kernel esp */
11653 /* Disable interrupts, but do not irqtrace this section: we
11654 @@ -613,29 +809,23 @@ work_resched:
11655 movl TI_flags(%ebp), %ecx
11656 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11657 # than syscall tracing?
11658 - jz restore_all
11659 + jz restore_all_pax
11660 testb $_TIF_NEED_RESCHED, %cl
11661 jnz work_resched
11662
11663 work_notifysig: # deal with pending signals and
11664 # notify-resume requests
11665 + movl %esp, %eax
11666 #ifdef CONFIG_VM86
11667 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11668 - movl %esp, %eax
11669 - jne work_notifysig_v86 # returning to kernel-space or
11670 + jz 1f # returning to kernel-space or
11671 # vm86-space
11672 - xorl %edx, %edx
11673 - call do_notify_resume
11674 - jmp resume_userspace_sig
11675
11676 - ALIGN
11677 -work_notifysig_v86:
11678 pushl_cfi %ecx # save ti_flags for do_notify_resume
11679 call save_v86_state # %eax contains pt_regs pointer
11680 popl_cfi %ecx
11681 movl %eax, %esp
11682 -#else
11683 - movl %esp, %eax
11684 +1:
11685 #endif
11686 xorl %edx, %edx
11687 call do_notify_resume
11688 @@ -648,6 +838,9 @@ syscall_trace_entry:
11689 movl $-ENOSYS,PT_EAX(%esp)
11690 movl %esp, %eax
11691 call syscall_trace_enter
11692 +
11693 + pax_erase_kstack
11694 +
11695 /* What it returned is what we'll actually use. */
11696 cmpl $(nr_syscalls), %eax
11697 jnae syscall_call
11698 @@ -670,6 +863,10 @@ END(syscall_exit_work)
11699
11700 RING0_INT_FRAME # can't unwind into user space anyway
11701 syscall_fault:
11702 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11703 + push %ss
11704 + pop %ds
11705 +#endif
11706 GET_THREAD_INFO(%ebp)
11707 movl $-EFAULT,PT_EAX(%esp)
11708 jmp resume_userspace
11709 @@ -752,6 +949,36 @@ ptregs_clone:
11710 CFI_ENDPROC
11711 ENDPROC(ptregs_clone)
11712
11713 + ALIGN;
11714 +ENTRY(kernel_execve)
11715 + CFI_STARTPROC
11716 + pushl_cfi %ebp
11717 + sub $PT_OLDSS+4,%esp
11718 + pushl_cfi %edi
11719 + pushl_cfi %ecx
11720 + pushl_cfi %eax
11721 + lea 3*4(%esp),%edi
11722 + mov $PT_OLDSS/4+1,%ecx
11723 + xorl %eax,%eax
11724 + rep stosl
11725 + popl_cfi %eax
11726 + popl_cfi %ecx
11727 + popl_cfi %edi
11728 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
11729 + pushl_cfi %esp
11730 + call sys_execve
11731 + add $4,%esp
11732 + CFI_ADJUST_CFA_OFFSET -4
11733 + GET_THREAD_INFO(%ebp)
11734 + test %eax,%eax
11735 + jz syscall_exit
11736 + add $PT_OLDSS+4,%esp
11737 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
11738 + popl_cfi %ebp
11739 + ret
11740 + CFI_ENDPROC
11741 +ENDPROC(kernel_execve)
11742 +
11743 .macro FIXUP_ESPFIX_STACK
11744 /*
11745 * Switch back for ESPFIX stack to the normal zerobased stack
11746 @@ -761,8 +988,15 @@ ENDPROC(ptregs_clone)
11747 * normal stack and adjusts ESP with the matching offset.
11748 */
11749 /* fixup the stack */
11750 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
11751 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
11752 +#ifdef CONFIG_SMP
11753 + movl PER_CPU_VAR(cpu_number), %ebx
11754 + shll $PAGE_SHIFT_asm, %ebx
11755 + addl $cpu_gdt_table, %ebx
11756 +#else
11757 + movl $cpu_gdt_table, %ebx
11758 +#endif
11759 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
11760 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
11761 shl $16, %eax
11762 addl %esp, %eax /* the adjusted stack pointer */
11763 pushl_cfi $__KERNEL_DS
11764 @@ -1213,7 +1447,6 @@ return_to_handler:
11765 jmp *%ecx
11766 #endif
11767
11768 -.section .rodata,"a"
11769 #include "syscall_table_32.S"
11770
11771 syscall_table_size=(.-sys_call_table)
11772 @@ -1259,9 +1492,12 @@ error_code:
11773 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
11774 REG_TO_PTGS %ecx
11775 SET_KERNEL_GS %ecx
11776 - movl $(__USER_DS), %ecx
11777 + movl $(__KERNEL_DS), %ecx
11778 movl %ecx, %ds
11779 movl %ecx, %es
11780 +
11781 + pax_enter_kernel
11782 +
11783 TRACE_IRQS_OFF
11784 movl %esp,%eax # pt_regs pointer
11785 call *%edi
11786 @@ -1346,6 +1582,9 @@ nmi_stack_correct:
11787 xorl %edx,%edx # zero error code
11788 movl %esp,%eax # pt_regs pointer
11789 call do_nmi
11790 +
11791 + pax_exit_kernel
11792 +
11793 jmp restore_all_notrace
11794 CFI_ENDPROC
11795
11796 @@ -1382,6 +1621,9 @@ nmi_espfix_stack:
11797 FIXUP_ESPFIX_STACK # %eax == %esp
11798 xorl %edx,%edx # zero error code
11799 call do_nmi
11800 +
11801 + pax_exit_kernel
11802 +
11803 RESTORE_REGS
11804 lss 12+4(%esp), %esp # back to espfix stack
11805 CFI_ADJUST_CFA_OFFSET -24
11806 diff -urNp linux-2.6.39.4/arch/x86/kernel/entry_64.S linux-2.6.39.4/arch/x86/kernel/entry_64.S
11807 --- linux-2.6.39.4/arch/x86/kernel/entry_64.S 2011-05-19 00:06:34.000000000 -0400
11808 +++ linux-2.6.39.4/arch/x86/kernel/entry_64.S 2011-08-05 19:44:33.000000000 -0400
11809 @@ -53,6 +53,7 @@
11810 #include <asm/paravirt.h>
11811 #include <asm/ftrace.h>
11812 #include <asm/percpu.h>
11813 +#include <asm/pgtable.h>
11814
11815 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11816 #include <linux/elf-em.h>
11817 @@ -176,6 +177,259 @@ ENTRY(native_usergs_sysret64)
11818 ENDPROC(native_usergs_sysret64)
11819 #endif /* CONFIG_PARAVIRT */
11820
11821 + .macro ljmpq sel, off
11822 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
11823 + .byte 0x48; ljmp *1234f(%rip)
11824 + .pushsection .rodata
11825 + .align 16
11826 + 1234: .quad \off; .word \sel
11827 + .popsection
11828 +#else
11829 + pushq $\sel
11830 + pushq $\off
11831 + lretq
11832 +#endif
11833 + .endm
11834 +
11835 + .macro pax_enter_kernel
11836 +#ifdef CONFIG_PAX_KERNEXEC
11837 + call pax_enter_kernel
11838 +#endif
11839 + .endm
11840 +
11841 + .macro pax_exit_kernel
11842 +#ifdef CONFIG_PAX_KERNEXEC
11843 + call pax_exit_kernel
11844 +#endif
11845 + .endm
11846 +
11847 +#ifdef CONFIG_PAX_KERNEXEC
11848 +ENTRY(pax_enter_kernel)
11849 + pushq %rdi
11850 +
11851 +#ifdef CONFIG_PARAVIRT
11852 + PV_SAVE_REGS(CLBR_RDI)
11853 +#endif
11854 +
11855 + GET_CR0_INTO_RDI
11856 + bts $16,%rdi
11857 + jnc 1f
11858 + mov %cs,%edi
11859 + cmp $__KERNEL_CS,%edi
11860 + jz 3f
11861 + ljmpq __KERNEL_CS,3f
11862 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
11863 +2: SET_RDI_INTO_CR0
11864 +3:
11865 +
11866 +#ifdef CONFIG_PARAVIRT
11867 + PV_RESTORE_REGS(CLBR_RDI)
11868 +#endif
11869 +
11870 + popq %rdi
11871 + retq
11872 +ENDPROC(pax_enter_kernel)
11873 +
11874 +ENTRY(pax_exit_kernel)
11875 + pushq %rdi
11876 +
11877 +#ifdef CONFIG_PARAVIRT
11878 + PV_SAVE_REGS(CLBR_RDI)
11879 +#endif
11880 +
11881 + mov %cs,%rdi
11882 + cmp $__KERNEXEC_KERNEL_CS,%edi
11883 + jnz 2f
11884 + GET_CR0_INTO_RDI
11885 + btr $16,%rdi
11886 + ljmpq __KERNEL_CS,1f
11887 +1: SET_RDI_INTO_CR0
11888 +2:
11889 +
11890 +#ifdef CONFIG_PARAVIRT
11891 + PV_RESTORE_REGS(CLBR_RDI);
11892 +#endif
11893 +
11894 + popq %rdi
11895 + retq
11896 +ENDPROC(pax_exit_kernel)
11897 +#endif
11898 +
11899 + .macro pax_enter_kernel_user
11900 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11901 + call pax_enter_kernel_user
11902 +#endif
11903 + .endm
11904 +
11905 + .macro pax_exit_kernel_user
11906 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11907 + call pax_exit_kernel_user
11908 +#endif
11909 +#ifdef CONFIG_PAX_RANDKSTACK
11910 + push %rax
11911 + call pax_randomize_kstack
11912 + pop %rax
11913 +#endif
11914 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11915 + call pax_erase_kstack
11916 +#endif
11917 + .endm
11918 +
11919 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11920 +ENTRY(pax_enter_kernel_user)
11921 + pushq %rdi
11922 + pushq %rbx
11923 +
11924 +#ifdef CONFIG_PARAVIRT
11925 + PV_SAVE_REGS(CLBR_RDI)
11926 +#endif
11927 +
11928 + GET_CR3_INTO_RDI
11929 + mov %rdi,%rbx
11930 + add $__START_KERNEL_map,%rbx
11931 + sub phys_base(%rip),%rbx
11932 +
11933 +#ifdef CONFIG_PARAVIRT
11934 + pushq %rdi
11935 + cmpl $0, pv_info+PARAVIRT_enabled
11936 + jz 1f
11937 + i = 0
11938 + .rept USER_PGD_PTRS
11939 + mov i*8(%rbx),%rsi
11940 + mov $0,%sil
11941 + lea i*8(%rbx),%rdi
11942 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
11943 + i = i + 1
11944 + .endr
11945 + jmp 2f
11946 +1:
11947 +#endif
11948 +
11949 + i = 0
11950 + .rept USER_PGD_PTRS
11951 + movb $0,i*8(%rbx)
11952 + i = i + 1
11953 + .endr
11954 +
11955 +#ifdef CONFIG_PARAVIRT
11956 +2: popq %rdi
11957 +#endif
11958 + SET_RDI_INTO_CR3
11959 +
11960 +#ifdef CONFIG_PAX_KERNEXEC
11961 + GET_CR0_INTO_RDI
11962 + bts $16,%rdi
11963 + SET_RDI_INTO_CR0
11964 +#endif
11965 +
11966 +#ifdef CONFIG_PARAVIRT
11967 + PV_RESTORE_REGS(CLBR_RDI)
11968 +#endif
11969 +
11970 + popq %rbx
11971 + popq %rdi
11972 + retq
11973 +ENDPROC(pax_enter_kernel_user)
11974 +
11975 +ENTRY(pax_exit_kernel_user)
11976 + push %rdi
11977 +
11978 +#ifdef CONFIG_PARAVIRT
11979 + pushq %rbx
11980 + PV_SAVE_REGS(CLBR_RDI)
11981 +#endif
11982 +
11983 +#ifdef CONFIG_PAX_KERNEXEC
11984 + GET_CR0_INTO_RDI
11985 + btr $16,%rdi
11986 + SET_RDI_INTO_CR0
11987 +#endif
11988 +
11989 + GET_CR3_INTO_RDI
11990 + add $__START_KERNEL_map,%rdi
11991 + sub phys_base(%rip),%rdi
11992 +
11993 +#ifdef CONFIG_PARAVIRT
11994 + cmpl $0, pv_info+PARAVIRT_enabled
11995 + jz 1f
11996 + mov %rdi,%rbx
11997 + i = 0
11998 + .rept USER_PGD_PTRS
11999 + mov i*8(%rbx),%rsi
12000 + mov $0x67,%sil
12001 + lea i*8(%rbx),%rdi
12002 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
12003 + i = i + 1
12004 + .endr
12005 + jmp 2f
12006 +1:
12007 +#endif
12008 +
12009 + i = 0
12010 + .rept USER_PGD_PTRS
12011 + movb $0x67,i*8(%rdi)
12012 + i = i + 1
12013 + .endr
12014 +
12015 +#ifdef CONFIG_PARAVIRT
12016 +2: PV_RESTORE_REGS(CLBR_RDI)
12017 + popq %rbx
12018 +#endif
12019 +
12020 + popq %rdi
12021 + retq
12022 +ENDPROC(pax_exit_kernel_user)
12023 +#endif
12024 +
12025 + .macro pax_erase_kstack
12026 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12027 + call pax_erase_kstack
12028 +#endif
12029 + .endm
12030 +
12031 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12032 +/*
12033 + * r10: thread_info
12034 + * rcx, rdx: can be clobbered
12035 + */
12036 +ENTRY(pax_erase_kstack)
12037 + pushq %rdi
12038 + pushq %rax
12039 +
12040 + GET_THREAD_INFO(%r10)
12041 + mov TI_lowest_stack(%r10), %rdi
12042 + mov $-0xBEEF, %rax
12043 + std
12044 +
12045 +1: mov %edi, %ecx
12046 + and $THREAD_SIZE_asm - 1, %ecx
12047 + shr $3, %ecx
12048 + repne scasq
12049 + jecxz 2f
12050 +
12051 + cmp $2*8, %ecx
12052 + jc 2f
12053 +
12054 + mov $2*8, %ecx
12055 + repe scasq
12056 + jecxz 2f
12057 + jne 1b
12058 +
12059 +2: cld
12060 + mov %esp, %ecx
12061 + sub %edi, %ecx
12062 + shr $3, %ecx
12063 + rep stosq
12064 +
12065 + mov TI_task_thread_sp0(%r10), %rdi
12066 + sub $256, %rdi
12067 + mov %rdi, TI_lowest_stack(%r10)
12068 +
12069 + popq %rax
12070 + popq %rdi
12071 + ret
12072 +ENDPROC(pax_erase_kstack)
12073 +#endif
12074
12075 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12076 #ifdef CONFIG_TRACE_IRQFLAGS
12077 @@ -318,7 +572,7 @@ ENTRY(save_args)
12078 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
12079 movq_cfi rbp, 8 /* push %rbp */
12080 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
12081 - testl $3, CS(%rdi)
12082 + testb $3, CS(%rdi)
12083 je 1f
12084 SWAPGS
12085 /*
12086 @@ -409,7 +663,7 @@ ENTRY(ret_from_fork)
12087
12088 RESTORE_REST
12089
12090 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12091 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12092 je int_ret_from_sys_call
12093
12094 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12095 @@ -455,7 +709,7 @@ END(ret_from_fork)
12096 ENTRY(system_call)
12097 CFI_STARTPROC simple
12098 CFI_SIGNAL_FRAME
12099 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12100 + CFI_DEF_CFA rsp,0
12101 CFI_REGISTER rip,rcx
12102 /*CFI_REGISTER rflags,r11*/
12103 SWAPGS_UNSAFE_STACK
12104 @@ -468,12 +722,13 @@ ENTRY(system_call_after_swapgs)
12105
12106 movq %rsp,PER_CPU_VAR(old_rsp)
12107 movq PER_CPU_VAR(kernel_stack),%rsp
12108 + pax_enter_kernel_user
12109 /*
12110 * No need to follow this irqs off/on section - it's straight
12111 * and short:
12112 */
12113 ENABLE_INTERRUPTS(CLBR_NONE)
12114 - SAVE_ARGS 8,1
12115 + SAVE_ARGS 8*6,1
12116 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12117 movq %rcx,RIP-ARGOFFSET(%rsp)
12118 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12119 @@ -502,6 +757,7 @@ sysret_check:
12120 andl %edi,%edx
12121 jnz sysret_careful
12122 CFI_REMEMBER_STATE
12123 + pax_exit_kernel_user
12124 /*
12125 * sysretq will re-enable interrupts:
12126 */
12127 @@ -560,6 +816,9 @@ auditsys:
12128 movq %rax,%rsi /* 2nd arg: syscall number */
12129 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12130 call audit_syscall_entry
12131 +
12132 + pax_erase_kstack
12133 +
12134 LOAD_ARGS 0 /* reload call-clobbered registers */
12135 jmp system_call_fastpath
12136
12137 @@ -590,6 +849,9 @@ tracesys:
12138 FIXUP_TOP_OF_STACK %rdi
12139 movq %rsp,%rdi
12140 call syscall_trace_enter
12141 +
12142 + pax_erase_kstack
12143 +
12144 /*
12145 * Reload arg registers from stack in case ptrace changed them.
12146 * We don't reload %rax because syscall_trace_enter() returned
12147 @@ -611,7 +873,7 @@ tracesys:
12148 GLOBAL(int_ret_from_sys_call)
12149 DISABLE_INTERRUPTS(CLBR_NONE)
12150 TRACE_IRQS_OFF
12151 - testl $3,CS-ARGOFFSET(%rsp)
12152 + testb $3,CS-ARGOFFSET(%rsp)
12153 je retint_restore_args
12154 movl $_TIF_ALLWORK_MASK,%edi
12155 /* edi: mask to check */
12156 @@ -793,6 +1055,16 @@ END(interrupt)
12157 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12158 call save_args
12159 PARTIAL_FRAME 0
12160 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12161 + testb $3, CS(%rdi)
12162 + jnz 1f
12163 + pax_enter_kernel
12164 + jmp 2f
12165 +1: pax_enter_kernel_user
12166 +2:
12167 +#else
12168 + pax_enter_kernel
12169 +#endif
12170 call \func
12171 .endm
12172
12173 @@ -825,7 +1097,7 @@ ret_from_intr:
12174 CFI_ADJUST_CFA_OFFSET -8
12175 exit_intr:
12176 GET_THREAD_INFO(%rcx)
12177 - testl $3,CS-ARGOFFSET(%rsp)
12178 + testb $3,CS-ARGOFFSET(%rsp)
12179 je retint_kernel
12180
12181 /* Interrupt came from user space */
12182 @@ -847,12 +1119,14 @@ retint_swapgs: /* return to user-space
12183 * The iretq could re-enable interrupts:
12184 */
12185 DISABLE_INTERRUPTS(CLBR_ANY)
12186 + pax_exit_kernel_user
12187 TRACE_IRQS_IRETQ
12188 SWAPGS
12189 jmp restore_args
12190
12191 retint_restore_args: /* return to kernel space */
12192 DISABLE_INTERRUPTS(CLBR_ANY)
12193 + pax_exit_kernel
12194 /*
12195 * The iretq could re-enable interrupts:
12196 */
12197 @@ -1027,6 +1301,16 @@ ENTRY(\sym)
12198 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12199 call error_entry
12200 DEFAULT_FRAME 0
12201 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12202 + testb $3, CS(%rsp)
12203 + jnz 1f
12204 + pax_enter_kernel
12205 + jmp 2f
12206 +1: pax_enter_kernel_user
12207 +2:
12208 +#else
12209 + pax_enter_kernel
12210 +#endif
12211 movq %rsp,%rdi /* pt_regs pointer */
12212 xorl %esi,%esi /* no error code */
12213 call \do_sym
12214 @@ -1044,6 +1328,16 @@ ENTRY(\sym)
12215 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12216 call save_paranoid
12217 TRACE_IRQS_OFF
12218 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12219 + testb $3, CS(%rsp)
12220 + jnz 1f
12221 + pax_enter_kernel
12222 + jmp 2f
12223 +1: pax_enter_kernel_user
12224 +2:
12225 +#else
12226 + pax_enter_kernel
12227 +#endif
12228 movq %rsp,%rdi /* pt_regs pointer */
12229 xorl %esi,%esi /* no error code */
12230 call \do_sym
12231 @@ -1052,7 +1346,7 @@ ENTRY(\sym)
12232 END(\sym)
12233 .endm
12234
12235 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12236 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12237 .macro paranoidzeroentry_ist sym do_sym ist
12238 ENTRY(\sym)
12239 INTR_FRAME
12240 @@ -1062,8 +1356,24 @@ ENTRY(\sym)
12241 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12242 call save_paranoid
12243 TRACE_IRQS_OFF
12244 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12245 + testb $3, CS(%rsp)
12246 + jnz 1f
12247 + pax_enter_kernel
12248 + jmp 2f
12249 +1: pax_enter_kernel_user
12250 +2:
12251 +#else
12252 + pax_enter_kernel
12253 +#endif
12254 movq %rsp,%rdi /* pt_regs pointer */
12255 xorl %esi,%esi /* no error code */
12256 +#ifdef CONFIG_SMP
12257 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
12258 + lea init_tss(%r12), %r12
12259 +#else
12260 + lea init_tss(%rip), %r12
12261 +#endif
12262 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12263 call \do_sym
12264 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12265 @@ -1080,6 +1390,16 @@ ENTRY(\sym)
12266 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12267 call error_entry
12268 DEFAULT_FRAME 0
12269 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12270 + testb $3, CS(%rsp)
12271 + jnz 1f
12272 + pax_enter_kernel
12273 + jmp 2f
12274 +1: pax_enter_kernel_user
12275 +2:
12276 +#else
12277 + pax_enter_kernel
12278 +#endif
12279 movq %rsp,%rdi /* pt_regs pointer */
12280 movq ORIG_RAX(%rsp),%rsi /* get error code */
12281 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12282 @@ -1099,6 +1419,16 @@ ENTRY(\sym)
12283 call save_paranoid
12284 DEFAULT_FRAME 0
12285 TRACE_IRQS_OFF
12286 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12287 + testb $3, CS(%rsp)
12288 + jnz 1f
12289 + pax_enter_kernel
12290 + jmp 2f
12291 +1: pax_enter_kernel_user
12292 +2:
12293 +#else
12294 + pax_enter_kernel
12295 +#endif
12296 movq %rsp,%rdi /* pt_regs pointer */
12297 movq ORIG_RAX(%rsp),%rsi /* get error code */
12298 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12299 @@ -1361,14 +1691,27 @@ ENTRY(paranoid_exit)
12300 TRACE_IRQS_OFF
12301 testl %ebx,%ebx /* swapgs needed? */
12302 jnz paranoid_restore
12303 - testl $3,CS(%rsp)
12304 + testb $3,CS(%rsp)
12305 jnz paranoid_userspace
12306 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12307 + pax_exit_kernel
12308 + TRACE_IRQS_IRETQ 0
12309 + SWAPGS_UNSAFE_STACK
12310 + RESTORE_ALL 8
12311 + jmp irq_return
12312 +#endif
12313 paranoid_swapgs:
12314 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12315 + pax_exit_kernel_user
12316 +#else
12317 + pax_exit_kernel
12318 +#endif
12319 TRACE_IRQS_IRETQ 0
12320 SWAPGS_UNSAFE_STACK
12321 RESTORE_ALL 8
12322 jmp irq_return
12323 paranoid_restore:
12324 + pax_exit_kernel
12325 TRACE_IRQS_IRETQ 0
12326 RESTORE_ALL 8
12327 jmp irq_return
12328 @@ -1426,7 +1769,7 @@ ENTRY(error_entry)
12329 movq_cfi r14, R14+8
12330 movq_cfi r15, R15+8
12331 xorl %ebx,%ebx
12332 - testl $3,CS+8(%rsp)
12333 + testb $3,CS+8(%rsp)
12334 je error_kernelspace
12335 error_swapgs:
12336 SWAPGS
12337 @@ -1490,6 +1833,16 @@ ENTRY(nmi)
12338 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12339 call save_paranoid
12340 DEFAULT_FRAME 0
12341 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12342 + testb $3, CS(%rsp)
12343 + jnz 1f
12344 + pax_enter_kernel
12345 + jmp 2f
12346 +1: pax_enter_kernel_user
12347 +2:
12348 +#else
12349 + pax_enter_kernel
12350 +#endif
12351 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
12352 movq %rsp,%rdi
12353 movq $-1,%rsi
12354 @@ -1500,11 +1853,25 @@ ENTRY(nmi)
12355 DISABLE_INTERRUPTS(CLBR_NONE)
12356 testl %ebx,%ebx /* swapgs needed? */
12357 jnz nmi_restore
12358 - testl $3,CS(%rsp)
12359 + testb $3,CS(%rsp)
12360 jnz nmi_userspace
12361 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12362 + pax_exit_kernel
12363 + SWAPGS_UNSAFE_STACK
12364 + RESTORE_ALL 8
12365 + jmp irq_return
12366 +#endif
12367 nmi_swapgs:
12368 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12369 + pax_exit_kernel_user
12370 +#else
12371 + pax_exit_kernel
12372 +#endif
12373 SWAPGS_UNSAFE_STACK
12374 + RESTORE_ALL 8
12375 + jmp irq_return
12376 nmi_restore:
12377 + pax_exit_kernel
12378 RESTORE_ALL 8
12379 jmp irq_return
12380 nmi_userspace:
12381 diff -urNp linux-2.6.39.4/arch/x86/kernel/ftrace.c linux-2.6.39.4/arch/x86/kernel/ftrace.c
12382 --- linux-2.6.39.4/arch/x86/kernel/ftrace.c 2011-05-19 00:06:34.000000000 -0400
12383 +++ linux-2.6.39.4/arch/x86/kernel/ftrace.c 2011-08-05 19:44:33.000000000 -0400
12384 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
12385 static void *mod_code_newcode; /* holds the text to write to the IP */
12386
12387 static unsigned nmi_wait_count;
12388 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
12389 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
12390
12391 int ftrace_arch_read_dyn_info(char *buf, int size)
12392 {
12393 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
12394
12395 r = snprintf(buf, size, "%u %u",
12396 nmi_wait_count,
12397 - atomic_read(&nmi_update_count));
12398 + atomic_read_unchecked(&nmi_update_count));
12399 return r;
12400 }
12401
12402 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
12403
12404 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
12405 smp_rmb();
12406 + pax_open_kernel();
12407 ftrace_mod_code();
12408 - atomic_inc(&nmi_update_count);
12409 + pax_close_kernel();
12410 + atomic_inc_unchecked(&nmi_update_count);
12411 }
12412 /* Must have previous changes seen before executions */
12413 smp_mb();
12414 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
12415 {
12416 unsigned char replaced[MCOUNT_INSN_SIZE];
12417
12418 + ip = ktla_ktva(ip);
12419 +
12420 /*
12421 * Note: Due to modules and __init, code can
12422 * disappear and change, we need to protect against faulting
12423 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
12424 unsigned char old[MCOUNT_INSN_SIZE], *new;
12425 int ret;
12426
12427 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
12428 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
12429 new = ftrace_call_replace(ip, (unsigned long)func);
12430 ret = ftrace_modify_code(ip, old, new);
12431
12432 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
12433 {
12434 unsigned char code[MCOUNT_INSN_SIZE];
12435
12436 + ip = ktla_ktva(ip);
12437 +
12438 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
12439 return -EFAULT;
12440
12441 diff -urNp linux-2.6.39.4/arch/x86/kernel/head32.c linux-2.6.39.4/arch/x86/kernel/head32.c
12442 --- linux-2.6.39.4/arch/x86/kernel/head32.c 2011-05-19 00:06:34.000000000 -0400
12443 +++ linux-2.6.39.4/arch/x86/kernel/head32.c 2011-08-05 19:44:33.000000000 -0400
12444 @@ -19,6 +19,7 @@
12445 #include <asm/io_apic.h>
12446 #include <asm/bios_ebda.h>
12447 #include <asm/tlbflush.h>
12448 +#include <asm/boot.h>
12449
12450 static void __init i386_default_early_setup(void)
12451 {
12452 @@ -34,7 +35,7 @@ void __init i386_start_kernel(void)
12453 {
12454 memblock_init();
12455
12456 - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12457 + memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12458
12459 #ifdef CONFIG_BLK_DEV_INITRD
12460 /* Reserve INITRD */
12461 diff -urNp linux-2.6.39.4/arch/x86/kernel/head_32.S linux-2.6.39.4/arch/x86/kernel/head_32.S
12462 --- linux-2.6.39.4/arch/x86/kernel/head_32.S 2011-05-19 00:06:34.000000000 -0400
12463 +++ linux-2.6.39.4/arch/x86/kernel/head_32.S 2011-08-05 19:44:33.000000000 -0400
12464 @@ -25,6 +25,12 @@
12465 /* Physical address */
12466 #define pa(X) ((X) - __PAGE_OFFSET)
12467
12468 +#ifdef CONFIG_PAX_KERNEXEC
12469 +#define ta(X) (X)
12470 +#else
12471 +#define ta(X) ((X) - __PAGE_OFFSET)
12472 +#endif
12473 +
12474 /*
12475 * References to members of the new_cpu_data structure.
12476 */
12477 @@ -54,11 +60,7 @@
12478 * and small than max_low_pfn, otherwise will waste some page table entries
12479 */
12480
12481 -#if PTRS_PER_PMD > 1
12482 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
12483 -#else
12484 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
12485 -#endif
12486 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
12487
12488 /* Number of possible pages in the lowmem region */
12489 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
12490 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
12491 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12492
12493 /*
12494 + * Real beginning of normal "text" segment
12495 + */
12496 +ENTRY(stext)
12497 +ENTRY(_stext)
12498 +
12499 +/*
12500 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
12501 * %esi points to the real-mode code as a 32-bit pointer.
12502 * CS and DS must be 4 GB flat segments, but we don't depend on
12503 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12504 * can.
12505 */
12506 __HEAD
12507 +
12508 +#ifdef CONFIG_PAX_KERNEXEC
12509 + jmp startup_32
12510 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
12511 +.fill PAGE_SIZE-5,1,0xcc
12512 +#endif
12513 +
12514 ENTRY(startup_32)
12515 movl pa(stack_start),%ecx
12516
12517 @@ -105,6 +120,57 @@ ENTRY(startup_32)
12518 2:
12519 leal -__PAGE_OFFSET(%ecx),%esp
12520
12521 +#ifdef CONFIG_SMP
12522 + movl $pa(cpu_gdt_table),%edi
12523 + movl $__per_cpu_load,%eax
12524 + movw %ax,__KERNEL_PERCPU + 2(%edi)
12525 + rorl $16,%eax
12526 + movb %al,__KERNEL_PERCPU + 4(%edi)
12527 + movb %ah,__KERNEL_PERCPU + 7(%edi)
12528 + movl $__per_cpu_end - 1,%eax
12529 + subl $__per_cpu_start,%eax
12530 + movw %ax,__KERNEL_PERCPU + 0(%edi)
12531 +#endif
12532 +
12533 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12534 + movl $NR_CPUS,%ecx
12535 + movl $pa(cpu_gdt_table),%edi
12536 +1:
12537 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
12538 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
12539 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
12540 + addl $PAGE_SIZE_asm,%edi
12541 + loop 1b
12542 +#endif
12543 +
12544 +#ifdef CONFIG_PAX_KERNEXEC
12545 + movl $pa(boot_gdt),%edi
12546 + movl $__LOAD_PHYSICAL_ADDR,%eax
12547 + movw %ax,__BOOT_CS + 2(%edi)
12548 + rorl $16,%eax
12549 + movb %al,__BOOT_CS + 4(%edi)
12550 + movb %ah,__BOOT_CS + 7(%edi)
12551 + rorl $16,%eax
12552 +
12553 + ljmp $(__BOOT_CS),$1f
12554 +1:
12555 +
12556 + movl $NR_CPUS,%ecx
12557 + movl $pa(cpu_gdt_table),%edi
12558 + addl $__PAGE_OFFSET,%eax
12559 +1:
12560 + movw %ax,__KERNEL_CS + 2(%edi)
12561 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
12562 + rorl $16,%eax
12563 + movb %al,__KERNEL_CS + 4(%edi)
12564 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
12565 + movb %ah,__KERNEL_CS + 7(%edi)
12566 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
12567 + rorl $16,%eax
12568 + addl $PAGE_SIZE_asm,%edi
12569 + loop 1b
12570 +#endif
12571 +
12572 /*
12573 * Clear BSS first so that there are no surprises...
12574 */
12575 @@ -195,8 +261,11 @@ ENTRY(startup_32)
12576 movl %eax, pa(max_pfn_mapped)
12577
12578 /* Do early initialization of the fixmap area */
12579 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12580 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
12581 +#ifdef CONFIG_COMPAT_VDSO
12582 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
12583 +#else
12584 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
12585 +#endif
12586 #else /* Not PAE */
12587
12588 page_pde_offset = (__PAGE_OFFSET >> 20);
12589 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12590 movl %eax, pa(max_pfn_mapped)
12591
12592 /* Do early initialization of the fixmap area */
12593 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12594 - movl %eax,pa(initial_page_table+0xffc)
12595 +#ifdef CONFIG_COMPAT_VDSO
12596 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
12597 +#else
12598 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
12599 +#endif
12600 #endif
12601
12602 #ifdef CONFIG_PARAVIRT
12603 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12604 cmpl $num_subarch_entries, %eax
12605 jae bad_subarch
12606
12607 - movl pa(subarch_entries)(,%eax,4), %eax
12608 - subl $__PAGE_OFFSET, %eax
12609 - jmp *%eax
12610 + jmp *pa(subarch_entries)(,%eax,4)
12611
12612 bad_subarch:
12613 WEAK(lguest_entry)
12614 @@ -255,10 +325,10 @@ WEAK(xen_entry)
12615 __INITDATA
12616
12617 subarch_entries:
12618 - .long default_entry /* normal x86/PC */
12619 - .long lguest_entry /* lguest hypervisor */
12620 - .long xen_entry /* Xen hypervisor */
12621 - .long default_entry /* Moorestown MID */
12622 + .long ta(default_entry) /* normal x86/PC */
12623 + .long ta(lguest_entry) /* lguest hypervisor */
12624 + .long ta(xen_entry) /* Xen hypervisor */
12625 + .long ta(default_entry) /* Moorestown MID */
12626 num_subarch_entries = (. - subarch_entries) / 4
12627 .previous
12628 #else
12629 @@ -312,6 +382,7 @@ default_entry:
12630 orl %edx,%eax
12631 movl %eax,%cr4
12632
12633 +#ifdef CONFIG_X86_PAE
12634 testb $X86_CR4_PAE, %al # check if PAE is enabled
12635 jz 6f
12636
12637 @@ -340,6 +411,9 @@ default_entry:
12638 /* Make changes effective */
12639 wrmsr
12640
12641 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
12642 +#endif
12643 +
12644 6:
12645
12646 /*
12647 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
12648 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
12649 movl %eax,%ss # after changing gdt.
12650
12651 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
12652 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
12653 movl %eax,%ds
12654 movl %eax,%es
12655
12656 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
12657 */
12658 cmpb $0,ready
12659 jne 1f
12660 - movl $gdt_page,%eax
12661 + movl $cpu_gdt_table,%eax
12662 movl $stack_canary,%ecx
12663 +#ifdef CONFIG_SMP
12664 + addl $__per_cpu_load,%ecx
12665 +#endif
12666 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
12667 shrl $16, %ecx
12668 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
12669 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
12670 1:
12671 -#endif
12672 movl $(__KERNEL_STACK_CANARY),%eax
12673 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12674 + movl $(__USER_DS),%eax
12675 +#else
12676 + xorl %eax,%eax
12677 +#endif
12678 movl %eax,%gs
12679
12680 xorl %eax,%eax # Clear LDT
12681 @@ -558,22 +639,22 @@ early_page_fault:
12682 jmp early_fault
12683
12684 early_fault:
12685 - cld
12686 #ifdef CONFIG_PRINTK
12687 + cmpl $1,%ss:early_recursion_flag
12688 + je hlt_loop
12689 + incl %ss:early_recursion_flag
12690 + cld
12691 pusha
12692 movl $(__KERNEL_DS),%eax
12693 movl %eax,%ds
12694 movl %eax,%es
12695 - cmpl $2,early_recursion_flag
12696 - je hlt_loop
12697 - incl early_recursion_flag
12698 movl %cr2,%eax
12699 pushl %eax
12700 pushl %edx /* trapno */
12701 pushl $fault_msg
12702 call printk
12703 +; call dump_stack
12704 #endif
12705 - call dump_stack
12706 hlt_loop:
12707 hlt
12708 jmp hlt_loop
12709 @@ -581,8 +662,11 @@ hlt_loop:
12710 /* This is the default interrupt "handler" :-) */
12711 ALIGN
12712 ignore_int:
12713 - cld
12714 #ifdef CONFIG_PRINTK
12715 + cmpl $2,%ss:early_recursion_flag
12716 + je hlt_loop
12717 + incl %ss:early_recursion_flag
12718 + cld
12719 pushl %eax
12720 pushl %ecx
12721 pushl %edx
12722 @@ -591,9 +675,6 @@ ignore_int:
12723 movl $(__KERNEL_DS),%eax
12724 movl %eax,%ds
12725 movl %eax,%es
12726 - cmpl $2,early_recursion_flag
12727 - je hlt_loop
12728 - incl early_recursion_flag
12729 pushl 16(%esp)
12730 pushl 24(%esp)
12731 pushl 32(%esp)
12732 @@ -622,29 +703,43 @@ ENTRY(initial_code)
12733 /*
12734 * BSS section
12735 */
12736 -__PAGE_ALIGNED_BSS
12737 - .align PAGE_SIZE
12738 #ifdef CONFIG_X86_PAE
12739 +.section .initial_pg_pmd,"a",@progbits
12740 initial_pg_pmd:
12741 .fill 1024*KPMDS,4,0
12742 #else
12743 +.section .initial_page_table,"a",@progbits
12744 ENTRY(initial_page_table)
12745 .fill 1024,4,0
12746 #endif
12747 +.section .initial_pg_fixmap,"a",@progbits
12748 initial_pg_fixmap:
12749 .fill 1024,4,0
12750 +.section .empty_zero_page,"a",@progbits
12751 ENTRY(empty_zero_page)
12752 .fill 4096,1,0
12753 +.section .swapper_pg_dir,"a",@progbits
12754 ENTRY(swapper_pg_dir)
12755 +#ifdef CONFIG_X86_PAE
12756 + .fill 4,8,0
12757 +#else
12758 .fill 1024,4,0
12759 +#endif
12760 +
12761 +/*
12762 + * The IDT has to be page-aligned to simplify the Pentium
12763 + * F0 0F bug workaround.. We have a special link segment
12764 + * for this.
12765 + */
12766 +.section .idt,"a",@progbits
12767 +ENTRY(idt_table)
12768 + .fill 256,8,0
12769
12770 /*
12771 * This starts the data section.
12772 */
12773 #ifdef CONFIG_X86_PAE
12774 -__PAGE_ALIGNED_DATA
12775 - /* Page-aligned for the benefit of paravirt? */
12776 - .align PAGE_SIZE
12777 +.section .initial_page_table,"a",@progbits
12778 ENTRY(initial_page_table)
12779 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
12780 # if KPMDS == 3
12781 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
12782 # error "Kernel PMDs should be 1, 2 or 3"
12783 # endif
12784 .align PAGE_SIZE /* needs to be page-sized too */
12785 +
12786 +#ifdef CONFIG_PAX_PER_CPU_PGD
12787 +ENTRY(cpu_pgd)
12788 + .rept NR_CPUS
12789 + .fill 4,8,0
12790 + .endr
12791 +#endif
12792 +
12793 #endif
12794
12795 .data
12796 .balign 4
12797 ENTRY(stack_start)
12798 - .long init_thread_union+THREAD_SIZE
12799 + .long init_thread_union+THREAD_SIZE-8
12800 +
12801 +ready: .byte 0
12802
12803 +.section .rodata,"a",@progbits
12804 early_recursion_flag:
12805 .long 0
12806
12807 -ready: .byte 0
12808 -
12809 int_msg:
12810 .asciz "Unknown interrupt or fault at: %p %p %p\n"
12811
12812 @@ -707,7 +811,7 @@ fault_msg:
12813 .word 0 # 32 bit align gdt_desc.address
12814 boot_gdt_descr:
12815 .word __BOOT_DS+7
12816 - .long boot_gdt - __PAGE_OFFSET
12817 + .long pa(boot_gdt)
12818
12819 .word 0 # 32-bit align idt_desc.address
12820 idt_descr:
12821 @@ -718,7 +822,7 @@ idt_descr:
12822 .word 0 # 32 bit align gdt_desc.address
12823 ENTRY(early_gdt_descr)
12824 .word GDT_ENTRIES*8-1
12825 - .long gdt_page /* Overwritten for secondary CPUs */
12826 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
12827
12828 /*
12829 * The boot_gdt must mirror the equivalent in setup.S and is
12830 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
12831 .align L1_CACHE_BYTES
12832 ENTRY(boot_gdt)
12833 .fill GDT_ENTRY_BOOT_CS,8,0
12834 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
12835 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
12836 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
12837 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
12838 +
12839 + .align PAGE_SIZE_asm
12840 +ENTRY(cpu_gdt_table)
12841 + .rept NR_CPUS
12842 + .quad 0x0000000000000000 /* NULL descriptor */
12843 + .quad 0x0000000000000000 /* 0x0b reserved */
12844 + .quad 0x0000000000000000 /* 0x13 reserved */
12845 + .quad 0x0000000000000000 /* 0x1b reserved */
12846 +
12847 +#ifdef CONFIG_PAX_KERNEXEC
12848 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
12849 +#else
12850 + .quad 0x0000000000000000 /* 0x20 unused */
12851 +#endif
12852 +
12853 + .quad 0x0000000000000000 /* 0x28 unused */
12854 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
12855 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
12856 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
12857 + .quad 0x0000000000000000 /* 0x4b reserved */
12858 + .quad 0x0000000000000000 /* 0x53 reserved */
12859 + .quad 0x0000000000000000 /* 0x5b reserved */
12860 +
12861 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
12862 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
12863 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
12864 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
12865 +
12866 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
12867 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
12868 +
12869 + /*
12870 + * Segments used for calling PnP BIOS have byte granularity.
12871 + * The code segments and data segments have fixed 64k limits,
12872 + * the transfer segment sizes are set at run time.
12873 + */
12874 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
12875 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
12876 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
12877 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
12878 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
12879 +
12880 + /*
12881 + * The APM segments have byte granularity and their bases
12882 + * are set at run time. All have 64k limits.
12883 + */
12884 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
12885 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
12886 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
12887 +
12888 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
12889 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
12890 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
12891 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
12892 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
12893 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
12894 +
12895 + /* Be sure this is zeroed to avoid false validations in Xen */
12896 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
12897 + .endr
12898 diff -urNp linux-2.6.39.4/arch/x86/kernel/head_64.S linux-2.6.39.4/arch/x86/kernel/head_64.S
12899 --- linux-2.6.39.4/arch/x86/kernel/head_64.S 2011-05-19 00:06:34.000000000 -0400
12900 +++ linux-2.6.39.4/arch/x86/kernel/head_64.S 2011-08-05 19:44:33.000000000 -0400
12901 @@ -19,6 +19,7 @@
12902 #include <asm/cache.h>
12903 #include <asm/processor-flags.h>
12904 #include <asm/percpu.h>
12905 +#include <asm/cpufeature.h>
12906
12907 #ifdef CONFIG_PARAVIRT
12908 #include <asm/asm-offsets.h>
12909 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
12910 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
12911 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
12912 L3_START_KERNEL = pud_index(__START_KERNEL_map)
12913 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
12914 +L3_VMALLOC_START = pud_index(VMALLOC_START)
12915 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
12916 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
12917
12918 .text
12919 __HEAD
12920 @@ -85,35 +90,22 @@ startup_64:
12921 */
12922 addq %rbp, init_level4_pgt + 0(%rip)
12923 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
12924 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
12925 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
12926 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
12927
12928 addq %rbp, level3_ident_pgt + 0(%rip)
12929 +#ifndef CONFIG_XEN
12930 + addq %rbp, level3_ident_pgt + 8(%rip)
12931 +#endif
12932
12933 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
12934 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
12935 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
12936
12937 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12938 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
12939 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
12940
12941 - /* Add an Identity mapping if I am above 1G */
12942 - leaq _text(%rip), %rdi
12943 - andq $PMD_PAGE_MASK, %rdi
12944 -
12945 - movq %rdi, %rax
12946 - shrq $PUD_SHIFT, %rax
12947 - andq $(PTRS_PER_PUD - 1), %rax
12948 - jz ident_complete
12949 -
12950 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
12951 - leaq level3_ident_pgt(%rip), %rbx
12952 - movq %rdx, 0(%rbx, %rax, 8)
12953 -
12954 - movq %rdi, %rax
12955 - shrq $PMD_SHIFT, %rax
12956 - andq $(PTRS_PER_PMD - 1), %rax
12957 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
12958 - leaq level2_spare_pgt(%rip), %rbx
12959 - movq %rdx, 0(%rbx, %rax, 8)
12960 -ident_complete:
12961 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12962 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
12963
12964 /*
12965 * Fixup the kernel text+data virtual addresses. Note that
12966 @@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
12967 * after the boot processor executes this code.
12968 */
12969
12970 - /* Enable PAE mode and PGE */
12971 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
12972 + /* Enable PAE mode and PSE/PGE */
12973 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
12974 movq %rax, %cr4
12975
12976 /* Setup early boot stage 4 level pagetables. */
12977 @@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
12978 movl $MSR_EFER, %ecx
12979 rdmsr
12980 btsl $_EFER_SCE, %eax /* Enable System Call */
12981 - btl $20,%edi /* No Execute supported? */
12982 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
12983 jnc 1f
12984 btsl $_EFER_NX, %eax
12985 + leaq init_level4_pgt(%rip), %rdi
12986 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
12987 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
12988 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
12989 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
12990 1: wrmsr /* Make changes effective */
12991
12992 /* Setup cr0 */
12993 @@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
12994 bad_address:
12995 jmp bad_address
12996
12997 - .section ".init.text","ax"
12998 + __INIT
12999 #ifdef CONFIG_EARLY_PRINTK
13000 .globl early_idt_handlers
13001 early_idt_handlers:
13002 @@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
13003 #endif /* EARLY_PRINTK */
13004 1: hlt
13005 jmp 1b
13006 + .previous
13007
13008 #ifdef CONFIG_EARLY_PRINTK
13009 + __INITDATA
13010 early_recursion_flag:
13011 .long 0
13012 + .previous
13013
13014 + .section .rodata,"a",@progbits
13015 early_idt_msg:
13016 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
13017 early_idt_ripmsg:
13018 .asciz "RIP %s\n"
13019 -#endif /* CONFIG_EARLY_PRINTK */
13020 .previous
13021 +#endif /* CONFIG_EARLY_PRINTK */
13022
13023 + .section .rodata,"a",@progbits
13024 #define NEXT_PAGE(name) \
13025 .balign PAGE_SIZE; \
13026 ENTRY(name)
13027 @@ -338,7 +340,6 @@ ENTRY(name)
13028 i = i + 1 ; \
13029 .endr
13030
13031 - .data
13032 /*
13033 * This default setting generates an ident mapping at address 0x100000
13034 * and a mapping for the kernel that precisely maps virtual address
13035 @@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
13036 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13037 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
13038 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13039 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
13040 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
13041 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
13042 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13043 .org init_level4_pgt + L4_START_KERNEL*8, 0
13044 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
13045 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
13046
13047 +#ifdef CONFIG_PAX_PER_CPU_PGD
13048 +NEXT_PAGE(cpu_pgd)
13049 + .rept NR_CPUS
13050 + .fill 512,8,0
13051 + .endr
13052 +#endif
13053 +
13054 NEXT_PAGE(level3_ident_pgt)
13055 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13056 +#ifdef CONFIG_XEN
13057 .fill 511,8,0
13058 +#else
13059 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
13060 + .fill 510,8,0
13061 +#endif
13062 +
13063 +NEXT_PAGE(level3_vmalloc_pgt)
13064 + .fill 512,8,0
13065 +
13066 +NEXT_PAGE(level3_vmemmap_pgt)
13067 + .fill L3_VMEMMAP_START,8,0
13068 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13069
13070 NEXT_PAGE(level3_kernel_pgt)
13071 .fill L3_START_KERNEL,8,0
13072 @@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13073 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13074 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13075
13076 +NEXT_PAGE(level2_vmemmap_pgt)
13077 + .fill 512,8,0
13078 +
13079 NEXT_PAGE(level2_fixmap_pgt)
13080 - .fill 506,8,0
13081 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13082 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13083 - .fill 5,8,0
13084 + .fill 507,8,0
13085 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13086 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13087 + .fill 4,8,0
13088
13089 -NEXT_PAGE(level1_fixmap_pgt)
13090 +NEXT_PAGE(level1_vsyscall_pgt)
13091 .fill 512,8,0
13092
13093 -NEXT_PAGE(level2_ident_pgt)
13094 - /* Since I easily can, map the first 1G.
13095 + /* Since I easily can, map the first 2G.
13096 * Don't set NX because code runs from these pages.
13097 */
13098 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
13099 +NEXT_PAGE(level2_ident_pgt)
13100 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
13101
13102 NEXT_PAGE(level2_kernel_pgt)
13103 /*
13104 @@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
13105 * If you want to increase this then increase MODULES_VADDR
13106 * too.)
13107 */
13108 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
13109 - KERNEL_IMAGE_SIZE/PMD_SIZE)
13110 -
13111 -NEXT_PAGE(level2_spare_pgt)
13112 - .fill 512, 8, 0
13113 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
13114
13115 #undef PMDS
13116 #undef NEXT_PAGE
13117
13118 - .data
13119 + .align PAGE_SIZE
13120 +ENTRY(cpu_gdt_table)
13121 + .rept NR_CPUS
13122 + .quad 0x0000000000000000 /* NULL descriptor */
13123 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
13124 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
13125 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
13126 + .quad 0x00cffb000000ffff /* __USER32_CS */
13127 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
13128 + .quad 0x00affb000000ffff /* __USER_CS */
13129 +
13130 +#ifdef CONFIG_PAX_KERNEXEC
13131 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
13132 +#else
13133 + .quad 0x0 /* unused */
13134 +#endif
13135 +
13136 + .quad 0,0 /* TSS */
13137 + .quad 0,0 /* LDT */
13138 + .quad 0,0,0 /* three TLS descriptors */
13139 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
13140 + /* asm/segment.h:GDT_ENTRIES must match this */
13141 +
13142 + /* zero the remaining page */
13143 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
13144 + .endr
13145 +
13146 .align 16
13147 .globl early_gdt_descr
13148 early_gdt_descr:
13149 .word GDT_ENTRIES*8-1
13150 early_gdt_descr_base:
13151 - .quad INIT_PER_CPU_VAR(gdt_page)
13152 + .quad cpu_gdt_table
13153
13154 ENTRY(phys_base)
13155 /* This must match the first entry in level2_kernel_pgt */
13156 .quad 0x0000000000000000
13157
13158 #include "../../x86/xen/xen-head.S"
13159 -
13160 - .section .bss, "aw", @nobits
13161 +
13162 + .section .rodata,"a",@progbits
13163 .align L1_CACHE_BYTES
13164 ENTRY(idt_table)
13165 - .skip IDT_ENTRIES * 16
13166 + .fill 512,8,0
13167
13168 __PAGE_ALIGNED_BSS
13169 .align PAGE_SIZE
13170 diff -urNp linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c
13171 --- linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c 2011-05-19 00:06:34.000000000 -0400
13172 +++ linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c 2011-08-05 19:44:33.000000000 -0400
13173 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
13174 EXPORT_SYMBOL(cmpxchg8b_emu);
13175 #endif
13176
13177 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
13178 +
13179 /* Networking helper routines. */
13180 EXPORT_SYMBOL(csum_partial_copy_generic);
13181 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
13182 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
13183
13184 EXPORT_SYMBOL(__get_user_1);
13185 EXPORT_SYMBOL(__get_user_2);
13186 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
13187
13188 EXPORT_SYMBOL(csum_partial);
13189 EXPORT_SYMBOL(empty_zero_page);
13190 +
13191 +#ifdef CONFIG_PAX_KERNEXEC
13192 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
13193 +#endif
13194 diff -urNp linux-2.6.39.4/arch/x86/kernel/i8259.c linux-2.6.39.4/arch/x86/kernel/i8259.c
13195 --- linux-2.6.39.4/arch/x86/kernel/i8259.c 2011-05-19 00:06:34.000000000 -0400
13196 +++ linux-2.6.39.4/arch/x86/kernel/i8259.c 2011-08-05 19:44:33.000000000 -0400
13197 @@ -210,7 +210,7 @@ spurious_8259A_irq:
13198 "spurious 8259A interrupt: IRQ%d.\n", irq);
13199 spurious_irq_mask |= irqmask;
13200 }
13201 - atomic_inc(&irq_err_count);
13202 + atomic_inc_unchecked(&irq_err_count);
13203 /*
13204 * Theoretically we do not have to handle this IRQ,
13205 * but in Linux this does not cause problems and is
13206 diff -urNp linux-2.6.39.4/arch/x86/kernel/init_task.c linux-2.6.39.4/arch/x86/kernel/init_task.c
13207 --- linux-2.6.39.4/arch/x86/kernel/init_task.c 2011-05-19 00:06:34.000000000 -0400
13208 +++ linux-2.6.39.4/arch/x86/kernel/init_task.c 2011-08-05 19:44:33.000000000 -0400
13209 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
13210 * way process stacks are handled. This is done by having a special
13211 * "init_task" linker map entry..
13212 */
13213 -union thread_union init_thread_union __init_task_data =
13214 - { INIT_THREAD_INFO(init_task) };
13215 +union thread_union init_thread_union __init_task_data;
13216
13217 /*
13218 * Initial task structure.
13219 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
13220 * section. Since TSS's are completely CPU-local, we want them
13221 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
13222 */
13223 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
13224 -
13225 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
13226 +EXPORT_SYMBOL(init_tss);
13227 diff -urNp linux-2.6.39.4/arch/x86/kernel/ioport.c linux-2.6.39.4/arch/x86/kernel/ioport.c
13228 --- linux-2.6.39.4/arch/x86/kernel/ioport.c 2011-05-19 00:06:34.000000000 -0400
13229 +++ linux-2.6.39.4/arch/x86/kernel/ioport.c 2011-08-05 19:44:33.000000000 -0400
13230 @@ -6,6 +6,7 @@
13231 #include <linux/sched.h>
13232 #include <linux/kernel.h>
13233 #include <linux/capability.h>
13234 +#include <linux/security.h>
13235 #include <linux/errno.h>
13236 #include <linux/types.h>
13237 #include <linux/ioport.h>
13238 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
13239
13240 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
13241 return -EINVAL;
13242 +#ifdef CONFIG_GRKERNSEC_IO
13243 + if (turn_on && grsec_disable_privio) {
13244 + gr_handle_ioperm();
13245 + return -EPERM;
13246 + }
13247 +#endif
13248 if (turn_on && !capable(CAP_SYS_RAWIO))
13249 return -EPERM;
13250
13251 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
13252 * because the ->io_bitmap_max value must match the bitmap
13253 * contents:
13254 */
13255 - tss = &per_cpu(init_tss, get_cpu());
13256 + tss = init_tss + get_cpu();
13257
13258 if (turn_on)
13259 bitmap_clear(t->io_bitmap_ptr, from, num);
13260 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
13261 return -EINVAL;
13262 /* Trying to gain more privileges? */
13263 if (level > old) {
13264 +#ifdef CONFIG_GRKERNSEC_IO
13265 + if (grsec_disable_privio) {
13266 + gr_handle_iopl();
13267 + return -EPERM;
13268 + }
13269 +#endif
13270 if (!capable(CAP_SYS_RAWIO))
13271 return -EPERM;
13272 }
13273 diff -urNp linux-2.6.39.4/arch/x86/kernel/irq_32.c linux-2.6.39.4/arch/x86/kernel/irq_32.c
13274 --- linux-2.6.39.4/arch/x86/kernel/irq_32.c 2011-05-19 00:06:34.000000000 -0400
13275 +++ linux-2.6.39.4/arch/x86/kernel/irq_32.c 2011-08-05 19:44:33.000000000 -0400
13276 @@ -36,7 +36,7 @@ static int check_stack_overflow(void)
13277 __asm__ __volatile__("andl %%esp,%0" :
13278 "=r" (sp) : "0" (THREAD_SIZE - 1));
13279
13280 - return sp < (sizeof(struct thread_info) + STACK_WARN);
13281 + return sp < STACK_WARN;
13282 }
13283
13284 static void print_stack_overflow(void)
13285 @@ -54,8 +54,8 @@ static inline void print_stack_overflow(
13286 * per-CPU IRQ handling contexts (thread information and stack)
13287 */
13288 union irq_ctx {
13289 - struct thread_info tinfo;
13290 - u32 stack[THREAD_SIZE/sizeof(u32)];
13291 + unsigned long previous_esp;
13292 + u32 stack[THREAD_SIZE/sizeof(u32)];
13293 } __attribute__((aligned(THREAD_SIZE)));
13294
13295 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
13296 @@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
13297 static inline int
13298 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
13299 {
13300 - union irq_ctx *curctx, *irqctx;
13301 + union irq_ctx *irqctx;
13302 u32 *isp, arg1, arg2;
13303
13304 - curctx = (union irq_ctx *) current_thread_info();
13305 irqctx = __this_cpu_read(hardirq_ctx);
13306
13307 /*
13308 @@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
13309 * handler) we can't do that and just have to keep using the
13310 * current stack (which is the irq stack already after all)
13311 */
13312 - if (unlikely(curctx == irqctx))
13313 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
13314 return 0;
13315
13316 /* build the stack frame on the IRQ stack */
13317 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13318 - irqctx->tinfo.task = curctx->tinfo.task;
13319 - irqctx->tinfo.previous_esp = current_stack_pointer;
13320 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13321 + irqctx->previous_esp = current_stack_pointer;
13322
13323 - /*
13324 - * Copy the softirq bits in preempt_count so that the
13325 - * softirq checks work in the hardirq context.
13326 - */
13327 - irqctx->tinfo.preempt_count =
13328 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
13329 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
13330 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13331 + __set_fs(MAKE_MM_SEG(0));
13332 +#endif
13333
13334 if (unlikely(overflow))
13335 call_on_stack(print_stack_overflow, isp);
13336 @@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
13337 : "0" (irq), "1" (desc), "2" (isp),
13338 "D" (desc->handle_irq)
13339 : "memory", "cc", "ecx");
13340 +
13341 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13342 + __set_fs(current_thread_info()->addr_limit);
13343 +#endif
13344 +
13345 return 1;
13346 }
13347
13348 @@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
13349 */
13350 void __cpuinit irq_ctx_init(int cpu)
13351 {
13352 - union irq_ctx *irqctx;
13353 -
13354 if (per_cpu(hardirq_ctx, cpu))
13355 return;
13356
13357 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13358 - THREAD_FLAGS,
13359 - THREAD_ORDER));
13360 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13361 - irqctx->tinfo.cpu = cpu;
13362 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
13363 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13364 -
13365 - per_cpu(hardirq_ctx, cpu) = irqctx;
13366 -
13367 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13368 - THREAD_FLAGS,
13369 - THREAD_ORDER));
13370 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13371 - irqctx->tinfo.cpu = cpu;
13372 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13373 -
13374 - per_cpu(softirq_ctx, cpu) = irqctx;
13375 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13376 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13377
13378 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
13379 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
13380 @@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
13381 asmlinkage void do_softirq(void)
13382 {
13383 unsigned long flags;
13384 - struct thread_info *curctx;
13385 union irq_ctx *irqctx;
13386 u32 *isp;
13387
13388 @@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
13389 local_irq_save(flags);
13390
13391 if (local_softirq_pending()) {
13392 - curctx = current_thread_info();
13393 irqctx = __this_cpu_read(softirq_ctx);
13394 - irqctx->tinfo.task = curctx->task;
13395 - irqctx->tinfo.previous_esp = current_stack_pointer;
13396 + irqctx->previous_esp = current_stack_pointer;
13397
13398 /* build the stack frame on the softirq stack */
13399 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13400 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13401 +
13402 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13403 + __set_fs(MAKE_MM_SEG(0));
13404 +#endif
13405
13406 call_on_stack(__do_softirq, isp);
13407 +
13408 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13409 + __set_fs(current_thread_info()->addr_limit);
13410 +#endif
13411 +
13412 /*
13413 * Shouldn't happen, we returned above if in_interrupt():
13414 */
13415 diff -urNp linux-2.6.39.4/arch/x86/kernel/irq.c linux-2.6.39.4/arch/x86/kernel/irq.c
13416 --- linux-2.6.39.4/arch/x86/kernel/irq.c 2011-05-19 00:06:34.000000000 -0400
13417 +++ linux-2.6.39.4/arch/x86/kernel/irq.c 2011-08-05 19:44:33.000000000 -0400
13418 @@ -17,7 +17,7 @@
13419 #include <asm/mce.h>
13420 #include <asm/hw_irq.h>
13421
13422 -atomic_t irq_err_count;
13423 +atomic_unchecked_t irq_err_count;
13424
13425 /* Function pointer for generic interrupt vector handling */
13426 void (*x86_platform_ipi_callback)(void) = NULL;
13427 @@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
13428 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
13429 seq_printf(p, " Machine check polls\n");
13430 #endif
13431 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
13432 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
13433 #if defined(CONFIG_X86_IO_APIC)
13434 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
13435 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
13436 #endif
13437 return 0;
13438 }
13439 @@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
13440
13441 u64 arch_irq_stat(void)
13442 {
13443 - u64 sum = atomic_read(&irq_err_count);
13444 + u64 sum = atomic_read_unchecked(&irq_err_count);
13445
13446 #ifdef CONFIG_X86_IO_APIC
13447 - sum += atomic_read(&irq_mis_count);
13448 + sum += atomic_read_unchecked(&irq_mis_count);
13449 #endif
13450 return sum;
13451 }
13452 diff -urNp linux-2.6.39.4/arch/x86/kernel/kgdb.c linux-2.6.39.4/arch/x86/kernel/kgdb.c
13453 --- linux-2.6.39.4/arch/x86/kernel/kgdb.c 2011-05-19 00:06:34.000000000 -0400
13454 +++ linux-2.6.39.4/arch/x86/kernel/kgdb.c 2011-08-05 20:34:06.000000000 -0400
13455 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
13456 #ifdef CONFIG_X86_32
13457 switch (regno) {
13458 case GDB_SS:
13459 - if (!user_mode_vm(regs))
13460 + if (!user_mode(regs))
13461 *(unsigned long *)mem = __KERNEL_DS;
13462 break;
13463 case GDB_SP:
13464 - if (!user_mode_vm(regs))
13465 + if (!user_mode(regs))
13466 *(unsigned long *)mem = kernel_stack_pointer(regs);
13467 break;
13468 case GDB_GS:
13469 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
13470 case 'k':
13471 /* clear the trace bit */
13472 linux_regs->flags &= ~X86_EFLAGS_TF;
13473 - atomic_set(&kgdb_cpu_doing_single_step, -1);
13474 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
13475
13476 /* set the trace bit if we're stepping */
13477 if (remcomInBuffer[0] == 's') {
13478 linux_regs->flags |= X86_EFLAGS_TF;
13479 - atomic_set(&kgdb_cpu_doing_single_step,
13480 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
13481 raw_smp_processor_id());
13482 }
13483
13484 @@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
13485 return NOTIFY_DONE;
13486
13487 case DIE_DEBUG:
13488 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
13489 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
13490 if (user_mode(regs))
13491 return single_step_cont(regs, args);
13492 break;
13493 diff -urNp linux-2.6.39.4/arch/x86/kernel/kprobes.c linux-2.6.39.4/arch/x86/kernel/kprobes.c
13494 --- linux-2.6.39.4/arch/x86/kernel/kprobes.c 2011-05-19 00:06:34.000000000 -0400
13495 +++ linux-2.6.39.4/arch/x86/kernel/kprobes.c 2011-08-05 19:44:33.000000000 -0400
13496 @@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
13497 } __attribute__((packed)) *insn;
13498
13499 insn = (struct __arch_relative_insn *)from;
13500 +
13501 + pax_open_kernel();
13502 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
13503 insn->op = op;
13504 + pax_close_kernel();
13505 }
13506
13507 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
13508 @@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
13509 kprobe_opcode_t opcode;
13510 kprobe_opcode_t *orig_opcodes = opcodes;
13511
13512 - if (search_exception_tables((unsigned long)opcodes))
13513 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
13514 return 0; /* Page fault may occur on this address. */
13515
13516 retry:
13517 @@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
13518 }
13519 }
13520 insn_get_length(&insn);
13521 + pax_open_kernel();
13522 memcpy(dest, insn.kaddr, insn.length);
13523 + pax_close_kernel();
13524
13525 #ifdef CONFIG_X86_64
13526 if (insn_rip_relative(&insn)) {
13527 @@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
13528 (u8 *) dest;
13529 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
13530 disp = (u8 *) dest + insn_offset_displacement(&insn);
13531 + pax_open_kernel();
13532 *(s32 *) disp = (s32) newdisp;
13533 + pax_close_kernel();
13534 }
13535 #endif
13536 return insn.length;
13537 @@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
13538 */
13539 __copy_instruction(p->ainsn.insn, p->addr, 0);
13540
13541 - if (can_boost(p->addr))
13542 + if (can_boost(ktla_ktva(p->addr)))
13543 p->ainsn.boostable = 0;
13544 else
13545 p->ainsn.boostable = -1;
13546
13547 - p->opcode = *p->addr;
13548 + p->opcode = *(ktla_ktva(p->addr));
13549 }
13550
13551 int __kprobes arch_prepare_kprobe(struct kprobe *p)
13552 @@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
13553 * nor set current_kprobe, because it doesn't use single
13554 * stepping.
13555 */
13556 - regs->ip = (unsigned long)p->ainsn.insn;
13557 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13558 preempt_enable_no_resched();
13559 return;
13560 }
13561 @@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
13562 if (p->opcode == BREAKPOINT_INSTRUCTION)
13563 regs->ip = (unsigned long)p->addr;
13564 else
13565 - regs->ip = (unsigned long)p->ainsn.insn;
13566 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13567 }
13568
13569 /*
13570 @@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
13571 setup_singlestep(p, regs, kcb, 0);
13572 return 1;
13573 }
13574 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
13575 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
13576 /*
13577 * The breakpoint instruction was removed right
13578 * after we hit it. Another cpu has removed
13579 @@ -817,7 +824,7 @@ static void __kprobes resume_execution(s
13580 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
13581 {
13582 unsigned long *tos = stack_addr(regs);
13583 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
13584 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
13585 unsigned long orig_ip = (unsigned long)p->addr;
13586 kprobe_opcode_t *insn = p->ainsn.insn;
13587
13588 @@ -999,7 +1006,7 @@ int __kprobes kprobe_exceptions_notify(s
13589 struct die_args *args = data;
13590 int ret = NOTIFY_DONE;
13591
13592 - if (args->regs && user_mode_vm(args->regs))
13593 + if (args->regs && user_mode(args->regs))
13594 return ret;
13595
13596 switch (val) {
13597 @@ -1381,7 +1388,7 @@ int __kprobes arch_prepare_optimized_kpr
13598 * Verify if the address gap is in 2GB range, because this uses
13599 * a relative jump.
13600 */
13601 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
13602 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
13603 if (abs(rel) > 0x7fffffff)
13604 return -ERANGE;
13605
13606 @@ -1402,11 +1409,11 @@ int __kprobes arch_prepare_optimized_kpr
13607 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
13608
13609 /* Set probe function call */
13610 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
13611 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
13612
13613 /* Set returning jmp instruction at the tail of out-of-line buffer */
13614 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
13615 - (u8 *)op->kp.addr + op->optinsn.size);
13616 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
13617
13618 flush_icache_range((unsigned long) buf,
13619 (unsigned long) buf + TMPL_END_IDX +
13620 @@ -1428,7 +1435,7 @@ static void __kprobes setup_optimize_kpr
13621 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
13622
13623 /* Backup instructions which will be replaced by jump address */
13624 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
13625 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
13626 RELATIVE_ADDR_SIZE);
13627
13628 insn_buf[0] = RELATIVEJUMP_OPCODE;
13629 diff -urNp linux-2.6.39.4/arch/x86/kernel/ldt.c linux-2.6.39.4/arch/x86/kernel/ldt.c
13630 --- linux-2.6.39.4/arch/x86/kernel/ldt.c 2011-05-19 00:06:34.000000000 -0400
13631 +++ linux-2.6.39.4/arch/x86/kernel/ldt.c 2011-08-05 19:44:33.000000000 -0400
13632 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
13633 if (reload) {
13634 #ifdef CONFIG_SMP
13635 preempt_disable();
13636 - load_LDT(pc);
13637 + load_LDT_nolock(pc);
13638 if (!cpumask_equal(mm_cpumask(current->mm),
13639 cpumask_of(smp_processor_id())))
13640 smp_call_function(flush_ldt, current->mm, 1);
13641 preempt_enable();
13642 #else
13643 - load_LDT(pc);
13644 + load_LDT_nolock(pc);
13645 #endif
13646 }
13647 if (oldsize) {
13648 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
13649 return err;
13650
13651 for (i = 0; i < old->size; i++)
13652 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
13653 + write_ldt_entry(new->ldt, i, old->ldt + i);
13654 return 0;
13655 }
13656
13657 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct
13658 retval = copy_ldt(&mm->context, &old_mm->context);
13659 mutex_unlock(&old_mm->context.lock);
13660 }
13661 +
13662 + if (tsk == current) {
13663 + mm->context.vdso = 0;
13664 +
13665 +#ifdef CONFIG_X86_32
13666 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13667 + mm->context.user_cs_base = 0UL;
13668 + mm->context.user_cs_limit = ~0UL;
13669 +
13670 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13671 + cpus_clear(mm->context.cpu_user_cs_mask);
13672 +#endif
13673 +
13674 +#endif
13675 +#endif
13676 +
13677 + }
13678 +
13679 return retval;
13680 }
13681
13682 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
13683 }
13684 }
13685
13686 +#ifdef CONFIG_PAX_SEGMEXEC
13687 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
13688 + error = -EINVAL;
13689 + goto out_unlock;
13690 + }
13691 +#endif
13692 +
13693 fill_ldt(&ldt, &ldt_info);
13694 if (oldmode)
13695 ldt.avl = 0;
13696 diff -urNp linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c
13697 --- linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c 2011-05-19 00:06:34.000000000 -0400
13698 +++ linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c 2011-08-05 19:44:33.000000000 -0400
13699 @@ -27,7 +27,7 @@
13700 #include <asm/cacheflush.h>
13701 #include <asm/debugreg.h>
13702
13703 -static void set_idt(void *newidt, __u16 limit)
13704 +static void set_idt(struct desc_struct *newidt, __u16 limit)
13705 {
13706 struct desc_ptr curidt;
13707
13708 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
13709 }
13710
13711
13712 -static void set_gdt(void *newgdt, __u16 limit)
13713 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
13714 {
13715 struct desc_ptr curgdt;
13716
13717 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
13718 }
13719
13720 control_page = page_address(image->control_code_page);
13721 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
13722 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
13723
13724 relocate_kernel_ptr = control_page;
13725 page_list[PA_CONTROL_PAGE] = __pa(control_page);
13726 diff -urNp linux-2.6.39.4/arch/x86/kernel/microcode_intel.c linux-2.6.39.4/arch/x86/kernel/microcode_intel.c
13727 --- linux-2.6.39.4/arch/x86/kernel/microcode_intel.c 2011-05-19 00:06:34.000000000 -0400
13728 +++ linux-2.6.39.4/arch/x86/kernel/microcode_intel.c 2011-08-05 20:34:06.000000000 -0400
13729 @@ -440,13 +440,13 @@ static enum ucode_state request_microcod
13730
13731 static int get_ucode_user(void *to, const void *from, size_t n)
13732 {
13733 - return copy_from_user(to, from, n);
13734 + return copy_from_user(to, (__force const void __user *)from, n);
13735 }
13736
13737 static enum ucode_state
13738 request_microcode_user(int cpu, const void __user *buf, size_t size)
13739 {
13740 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
13741 + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
13742 }
13743
13744 static void microcode_fini_cpu(int cpu)
13745 diff -urNp linux-2.6.39.4/arch/x86/kernel/module.c linux-2.6.39.4/arch/x86/kernel/module.c
13746 --- linux-2.6.39.4/arch/x86/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
13747 +++ linux-2.6.39.4/arch/x86/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
13748 @@ -35,21 +35,66 @@
13749 #define DEBUGP(fmt...)
13750 #endif
13751
13752 -void *module_alloc(unsigned long size)
13753 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
13754 {
13755 if (PAGE_ALIGN(size) > MODULES_LEN)
13756 return NULL;
13757 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
13758 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
13759 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
13760 -1, __builtin_return_address(0));
13761 }
13762
13763 +void *module_alloc(unsigned long size)
13764 +{
13765 +
13766 +#ifdef CONFIG_PAX_KERNEXEC
13767 + return __module_alloc(size, PAGE_KERNEL);
13768 +#else
13769 + return __module_alloc(size, PAGE_KERNEL_EXEC);
13770 +#endif
13771 +
13772 +}
13773 +
13774 /* Free memory returned from module_alloc */
13775 void module_free(struct module *mod, void *module_region)
13776 {
13777 vfree(module_region);
13778 }
13779
13780 +#ifdef CONFIG_PAX_KERNEXEC
13781 +#ifdef CONFIG_X86_32
13782 +void *module_alloc_exec(unsigned long size)
13783 +{
13784 + struct vm_struct *area;
13785 +
13786 + if (size == 0)
13787 + return NULL;
13788 +
13789 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
13790 + return area ? area->addr : NULL;
13791 +}
13792 +EXPORT_SYMBOL(module_alloc_exec);
13793 +
13794 +void module_free_exec(struct module *mod, void *module_region)
13795 +{
13796 + vunmap(module_region);
13797 +}
13798 +EXPORT_SYMBOL(module_free_exec);
13799 +#else
13800 +void module_free_exec(struct module *mod, void *module_region)
13801 +{
13802 + module_free(mod, module_region);
13803 +}
13804 +EXPORT_SYMBOL(module_free_exec);
13805 +
13806 +void *module_alloc_exec(unsigned long size)
13807 +{
13808 + return __module_alloc(size, PAGE_KERNEL_RX);
13809 +}
13810 +EXPORT_SYMBOL(module_alloc_exec);
13811 +#endif
13812 +#endif
13813 +
13814 /* We don't need anything special. */
13815 int module_frob_arch_sections(Elf_Ehdr *hdr,
13816 Elf_Shdr *sechdrs,
13817 @@ -69,14 +114,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13818 unsigned int i;
13819 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
13820 Elf32_Sym *sym;
13821 - uint32_t *location;
13822 + uint32_t *plocation, location;
13823
13824 DEBUGP("Applying relocate section %u to %u\n", relsec,
13825 sechdrs[relsec].sh_info);
13826 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
13827 /* This is where to make the change */
13828 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
13829 - + rel[i].r_offset;
13830 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
13831 + location = (uint32_t)plocation;
13832 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
13833 + plocation = ktla_ktva((void *)plocation);
13834 /* This is the symbol it is referring to. Note that all
13835 undefined symbols have been resolved. */
13836 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
13837 @@ -85,11 +132,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13838 switch (ELF32_R_TYPE(rel[i].r_info)) {
13839 case R_386_32:
13840 /* We add the value into the location given */
13841 - *location += sym->st_value;
13842 + pax_open_kernel();
13843 + *plocation += sym->st_value;
13844 + pax_close_kernel();
13845 break;
13846 case R_386_PC32:
13847 /* Add the value, subtract its postition */
13848 - *location += sym->st_value - (uint32_t)location;
13849 + pax_open_kernel();
13850 + *plocation += sym->st_value - location;
13851 + pax_close_kernel();
13852 break;
13853 default:
13854 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
13855 @@ -145,21 +196,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
13856 case R_X86_64_NONE:
13857 break;
13858 case R_X86_64_64:
13859 + pax_open_kernel();
13860 *(u64 *)loc = val;
13861 + pax_close_kernel();
13862 break;
13863 case R_X86_64_32:
13864 + pax_open_kernel();
13865 *(u32 *)loc = val;
13866 + pax_close_kernel();
13867 if (val != *(u32 *)loc)
13868 goto overflow;
13869 break;
13870 case R_X86_64_32S:
13871 + pax_open_kernel();
13872 *(s32 *)loc = val;
13873 + pax_close_kernel();
13874 if ((s64)val != *(s32 *)loc)
13875 goto overflow;
13876 break;
13877 case R_X86_64_PC32:
13878 val -= (u64)loc;
13879 + pax_open_kernel();
13880 *(u32 *)loc = val;
13881 + pax_close_kernel();
13882 +
13883 #if 0
13884 if ((s64)val != *(s32 *)loc)
13885 goto overflow;
13886 diff -urNp linux-2.6.39.4/arch/x86/kernel/paravirt.c linux-2.6.39.4/arch/x86/kernel/paravirt.c
13887 --- linux-2.6.39.4/arch/x86/kernel/paravirt.c 2011-05-19 00:06:34.000000000 -0400
13888 +++ linux-2.6.39.4/arch/x86/kernel/paravirt.c 2011-08-05 19:44:33.000000000 -0400
13889 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
13890 {
13891 return x;
13892 }
13893 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13894 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
13895 +#endif
13896
13897 void __init default_banner(void)
13898 {
13899 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
13900 * corresponding structure. */
13901 static void *get_call_destination(u8 type)
13902 {
13903 - struct paravirt_patch_template tmpl = {
13904 + const struct paravirt_patch_template tmpl = {
13905 .pv_init_ops = pv_init_ops,
13906 .pv_time_ops = pv_time_ops,
13907 .pv_cpu_ops = pv_cpu_ops,
13908 @@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
13909 .pv_lock_ops = pv_lock_ops,
13910 #endif
13911 };
13912 +
13913 + pax_track_stack();
13914 +
13915 return *((void **)&tmpl + type);
13916 }
13917
13918 @@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
13919 if (opfunc == NULL)
13920 /* If there's no function, patch it with a ud2a (BUG) */
13921 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
13922 - else if (opfunc == _paravirt_nop)
13923 + else if (opfunc == (void *)_paravirt_nop)
13924 /* If the operation is a nop, then nop the callsite */
13925 ret = paravirt_patch_nop();
13926
13927 /* identity functions just return their single argument */
13928 - else if (opfunc == _paravirt_ident_32)
13929 + else if (opfunc == (void *)_paravirt_ident_32)
13930 ret = paravirt_patch_ident_32(insnbuf, len);
13931 - else if (opfunc == _paravirt_ident_64)
13932 + else if (opfunc == (void *)_paravirt_ident_64)
13933 ret = paravirt_patch_ident_64(insnbuf, len);
13934 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13935 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
13936 + ret = paravirt_patch_ident_64(insnbuf, len);
13937 +#endif
13938
13939 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
13940 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
13941 @@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
13942 if (insn_len > len || start == NULL)
13943 insn_len = len;
13944 else
13945 - memcpy(insnbuf, start, insn_len);
13946 + memcpy(insnbuf, ktla_ktva(start), insn_len);
13947
13948 return insn_len;
13949 }
13950 @@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
13951 preempt_enable();
13952 }
13953
13954 -struct pv_info pv_info = {
13955 +struct pv_info pv_info __read_only = {
13956 .name = "bare hardware",
13957 .paravirt_enabled = 0,
13958 .kernel_rpl = 0,
13959 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
13960 };
13961
13962 -struct pv_init_ops pv_init_ops = {
13963 +struct pv_init_ops pv_init_ops __read_only = {
13964 .patch = native_patch,
13965 };
13966
13967 -struct pv_time_ops pv_time_ops = {
13968 +struct pv_time_ops pv_time_ops __read_only = {
13969 .sched_clock = native_sched_clock,
13970 };
13971
13972 -struct pv_irq_ops pv_irq_ops = {
13973 +struct pv_irq_ops pv_irq_ops __read_only = {
13974 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
13975 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
13976 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
13977 @@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
13978 #endif
13979 };
13980
13981 -struct pv_cpu_ops pv_cpu_ops = {
13982 +struct pv_cpu_ops pv_cpu_ops __read_only = {
13983 .cpuid = native_cpuid,
13984 .get_debugreg = native_get_debugreg,
13985 .set_debugreg = native_set_debugreg,
13986 @@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
13987 .end_context_switch = paravirt_nop,
13988 };
13989
13990 -struct pv_apic_ops pv_apic_ops = {
13991 +struct pv_apic_ops pv_apic_ops __read_only = {
13992 #ifdef CONFIG_X86_LOCAL_APIC
13993 .startup_ipi_hook = paravirt_nop,
13994 #endif
13995 };
13996
13997 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
13998 +#ifdef CONFIG_X86_32
13999 +#ifdef CONFIG_X86_PAE
14000 +/* 64-bit pagetable entries */
14001 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
14002 +#else
14003 /* 32-bit pagetable entries */
14004 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
14005 +#endif
14006 #else
14007 /* 64-bit pagetable entries */
14008 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
14009 #endif
14010
14011 -struct pv_mmu_ops pv_mmu_ops = {
14012 +struct pv_mmu_ops pv_mmu_ops __read_only = {
14013
14014 .read_cr2 = native_read_cr2,
14015 .write_cr2 = native_write_cr2,
14016 @@ -465,6 +480,12 @@ struct pv_mmu_ops pv_mmu_ops = {
14017 },
14018
14019 .set_fixmap = native_set_fixmap,
14020 +
14021 +#ifdef CONFIG_PAX_KERNEXEC
14022 + .pax_open_kernel = native_pax_open_kernel,
14023 + .pax_close_kernel = native_pax_close_kernel,
14024 +#endif
14025 +
14026 };
14027
14028 EXPORT_SYMBOL_GPL(pv_time_ops);
14029 diff -urNp linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c
14030 --- linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c 2011-05-19 00:06:34.000000000 -0400
14031 +++ linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c 2011-08-05 19:44:33.000000000 -0400
14032 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
14033 arch_spin_lock(lock);
14034 }
14035
14036 -struct pv_lock_ops pv_lock_ops = {
14037 +struct pv_lock_ops pv_lock_ops __read_only = {
14038 #ifdef CONFIG_SMP
14039 .spin_is_locked = __ticket_spin_is_locked,
14040 .spin_is_contended = __ticket_spin_is_contended,
14041 diff -urNp linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c
14042 --- linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c 2011-05-19 00:06:34.000000000 -0400
14043 +++ linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c 2011-08-05 19:44:35.000000000 -0400
14044 @@ -2,7 +2,7 @@
14045 #include <asm/iommu_table.h>
14046 #include <linux/string.h>
14047 #include <linux/kallsyms.h>
14048 -
14049 +#include <linux/sched.h>
14050
14051 #define DEBUG 1
14052
14053 @@ -53,6 +53,8 @@ void __init check_iommu_entries(struct i
14054 char sym_p[KSYM_SYMBOL_LEN];
14055 char sym_q[KSYM_SYMBOL_LEN];
14056
14057 + pax_track_stack();
14058 +
14059 /* Simple cyclic dependency checker. */
14060 for (p = start; p < finish; p++) {
14061 q = find_dependents_of(start, finish, p);
14062 diff -urNp linux-2.6.39.4/arch/x86/kernel/process_32.c linux-2.6.39.4/arch/x86/kernel/process_32.c
14063 --- linux-2.6.39.4/arch/x86/kernel/process_32.c 2011-06-25 12:55:22.000000000 -0400
14064 +++ linux-2.6.39.4/arch/x86/kernel/process_32.c 2011-08-05 19:44:35.000000000 -0400
14065 @@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
14066 unsigned long thread_saved_pc(struct task_struct *tsk)
14067 {
14068 return ((unsigned long *)tsk->thread.sp)[3];
14069 +//XXX return tsk->thread.eip;
14070 }
14071
14072 #ifndef CONFIG_SMP
14073 @@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i
14074 unsigned long sp;
14075 unsigned short ss, gs;
14076
14077 - if (user_mode_vm(regs)) {
14078 + if (user_mode(regs)) {
14079 sp = regs->sp;
14080 ss = regs->ss & 0xffff;
14081 - gs = get_user_gs(regs);
14082 } else {
14083 sp = kernel_stack_pointer(regs);
14084 savesegment(ss, ss);
14085 - savesegment(gs, gs);
14086 }
14087 + gs = get_user_gs(regs);
14088
14089 show_regs_common();
14090
14091 @@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag
14092 struct task_struct *tsk;
14093 int err;
14094
14095 - childregs = task_pt_regs(p);
14096 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
14097 *childregs = *regs;
14098 childregs->ax = 0;
14099 childregs->sp = sp;
14100
14101 p->thread.sp = (unsigned long) childregs;
14102 p->thread.sp0 = (unsigned long) (childregs+1);
14103 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14104
14105 p->thread.ip = (unsigned long) ret_from_fork;
14106
14107 @@ -292,7 +293,7 @@ __switch_to(struct task_struct *prev_p,
14108 struct thread_struct *prev = &prev_p->thread,
14109 *next = &next_p->thread;
14110 int cpu = smp_processor_id();
14111 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
14112 + struct tss_struct *tss = init_tss + cpu;
14113 bool preload_fpu;
14114
14115 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
14116 @@ -327,6 +328,10 @@ __switch_to(struct task_struct *prev_p,
14117 */
14118 lazy_save_gs(prev->gs);
14119
14120 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14121 + __set_fs(task_thread_info(next_p)->addr_limit);
14122 +#endif
14123 +
14124 /*
14125 * Load the per-thread Thread-Local Storage descriptor.
14126 */
14127 @@ -362,6 +367,9 @@ __switch_to(struct task_struct *prev_p,
14128 */
14129 arch_end_context_switch(next_p);
14130
14131 + percpu_write(current_task, next_p);
14132 + percpu_write(current_tinfo, &next_p->tinfo);
14133 +
14134 if (preload_fpu)
14135 __math_state_restore();
14136
14137 @@ -371,8 +379,6 @@ __switch_to(struct task_struct *prev_p,
14138 if (prev->gs | next->gs)
14139 lazy_load_gs(next->gs);
14140
14141 - percpu_write(current_task, next_p);
14142 -
14143 return prev_p;
14144 }
14145
14146 @@ -402,4 +408,3 @@ unsigned long get_wchan(struct task_stru
14147 } while (count++ < 16);
14148 return 0;
14149 }
14150 -
14151 diff -urNp linux-2.6.39.4/arch/x86/kernel/process_64.c linux-2.6.39.4/arch/x86/kernel/process_64.c
14152 --- linux-2.6.39.4/arch/x86/kernel/process_64.c 2011-06-25 12:55:22.000000000 -0400
14153 +++ linux-2.6.39.4/arch/x86/kernel/process_64.c 2011-08-05 19:44:35.000000000 -0400
14154 @@ -87,7 +87,7 @@ static void __exit_idle(void)
14155 void exit_idle(void)
14156 {
14157 /* idle loop has pid 0 */
14158 - if (current->pid)
14159 + if (task_pid_nr(current))
14160 return;
14161 __exit_idle();
14162 }
14163 @@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag
14164 struct pt_regs *childregs;
14165 struct task_struct *me = current;
14166
14167 - childregs = ((struct pt_regs *)
14168 - (THREAD_SIZE + task_stack_page(p))) - 1;
14169 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
14170 *childregs = *regs;
14171
14172 childregs->ax = 0;
14173 @@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag
14174 p->thread.sp = (unsigned long) childregs;
14175 p->thread.sp0 = (unsigned long) (childregs+1);
14176 p->thread.usersp = me->thread.usersp;
14177 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14178
14179 set_tsk_thread_flag(p, TIF_FORK);
14180
14181 @@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p,
14182 struct thread_struct *prev = &prev_p->thread;
14183 struct thread_struct *next = &next_p->thread;
14184 int cpu = smp_processor_id();
14185 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
14186 + struct tss_struct *tss = init_tss + cpu;
14187 unsigned fsindex, gsindex;
14188 bool preload_fpu;
14189
14190 @@ -471,10 +471,9 @@ __switch_to(struct task_struct *prev_p,
14191 prev->usersp = percpu_read(old_rsp);
14192 percpu_write(old_rsp, next->usersp);
14193 percpu_write(current_task, next_p);
14194 + percpu_write(current_tinfo, &next_p->tinfo);
14195
14196 - percpu_write(kernel_stack,
14197 - (unsigned long)task_stack_page(next_p) +
14198 - THREAD_SIZE - KERNEL_STACK_OFFSET);
14199 + percpu_write(kernel_stack, next->sp0);
14200
14201 /*
14202 * Now maybe reload the debug registers and handle I/O bitmaps
14203 @@ -536,12 +535,11 @@ unsigned long get_wchan(struct task_stru
14204 if (!p || p == current || p->state == TASK_RUNNING)
14205 return 0;
14206 stack = (unsigned long)task_stack_page(p);
14207 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
14208 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
14209 return 0;
14210 fp = *(u64 *)(p->thread.sp);
14211 do {
14212 - if (fp < (unsigned long)stack ||
14213 - fp >= (unsigned long)stack+THREAD_SIZE)
14214 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
14215 return 0;
14216 ip = *(u64 *)(fp+8);
14217 if (!in_sched_functions(ip))
14218 diff -urNp linux-2.6.39.4/arch/x86/kernel/process.c linux-2.6.39.4/arch/x86/kernel/process.c
14219 --- linux-2.6.39.4/arch/x86/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
14220 +++ linux-2.6.39.4/arch/x86/kernel/process.c 2011-08-05 19:44:35.000000000 -0400
14221 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
14222
14223 void free_thread_info(struct thread_info *ti)
14224 {
14225 - free_thread_xstate(ti->task);
14226 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
14227 }
14228
14229 +static struct kmem_cache *task_struct_cachep;
14230 +
14231 void arch_task_cache_init(void)
14232 {
14233 - task_xstate_cachep =
14234 - kmem_cache_create("task_xstate", xstate_size,
14235 + /* create a slab on which task_structs can be allocated */
14236 + task_struct_cachep =
14237 + kmem_cache_create("task_struct", sizeof(struct task_struct),
14238 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
14239 +
14240 + task_xstate_cachep =
14241 + kmem_cache_create("task_xstate", xstate_size,
14242 __alignof__(union thread_xstate),
14243 - SLAB_PANIC | SLAB_NOTRACK, NULL);
14244 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
14245 +}
14246 +
14247 +struct task_struct *alloc_task_struct_node(int node)
14248 +{
14249 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
14250 +}
14251 +
14252 +void free_task_struct(struct task_struct *task)
14253 +{
14254 + free_thread_xstate(task);
14255 + kmem_cache_free(task_struct_cachep, task);
14256 }
14257
14258 /*
14259 @@ -70,7 +87,7 @@ void exit_thread(void)
14260 unsigned long *bp = t->io_bitmap_ptr;
14261
14262 if (bp) {
14263 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
14264 + struct tss_struct *tss = init_tss + get_cpu();
14265
14266 t->io_bitmap_ptr = NULL;
14267 clear_thread_flag(TIF_IO_BITMAP);
14268 @@ -106,7 +123,7 @@ void show_regs_common(void)
14269
14270 printk(KERN_CONT "\n");
14271 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
14272 - current->pid, current->comm, print_tainted(),
14273 + task_pid_nr(current), current->comm, print_tainted(),
14274 init_utsname()->release,
14275 (int)strcspn(init_utsname()->version, " "),
14276 init_utsname()->version);
14277 @@ -120,6 +137,9 @@ void flush_thread(void)
14278 {
14279 struct task_struct *tsk = current;
14280
14281 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14282 + loadsegment(gs, 0);
14283 +#endif
14284 flush_ptrace_hw_breakpoint(tsk);
14285 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
14286 /*
14287 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
14288 regs.di = (unsigned long) arg;
14289
14290 #ifdef CONFIG_X86_32
14291 - regs.ds = __USER_DS;
14292 - regs.es = __USER_DS;
14293 + regs.ds = __KERNEL_DS;
14294 + regs.es = __KERNEL_DS;
14295 regs.fs = __KERNEL_PERCPU;
14296 - regs.gs = __KERNEL_STACK_CANARY;
14297 + savesegment(gs, regs.gs);
14298 #else
14299 regs.ss = __KERNEL_DS;
14300 #endif
14301 @@ -401,7 +421,7 @@ void default_idle(void)
14302 EXPORT_SYMBOL(default_idle);
14303 #endif
14304
14305 -void stop_this_cpu(void *dummy)
14306 +__noreturn void stop_this_cpu(void *dummy)
14307 {
14308 local_irq_disable();
14309 /*
14310 @@ -665,16 +685,34 @@ static int __init idle_setup(char *str)
14311 }
14312 early_param("idle", idle_setup);
14313
14314 -unsigned long arch_align_stack(unsigned long sp)
14315 +#ifdef CONFIG_PAX_RANDKSTACK
14316 +asmlinkage void pax_randomize_kstack(void)
14317 {
14318 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
14319 - sp -= get_random_int() % 8192;
14320 - return sp & ~0xf;
14321 -}
14322 + struct thread_struct *thread = &current->thread;
14323 + unsigned long time;
14324
14325 -unsigned long arch_randomize_brk(struct mm_struct *mm)
14326 -{
14327 - unsigned long range_end = mm->brk + 0x02000000;
14328 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
14329 -}
14330 + if (!randomize_va_space)
14331 + return;
14332 +
14333 + rdtscl(time);
14334 +
14335 + /* P4 seems to return a 0 LSB, ignore it */
14336 +#ifdef CONFIG_MPENTIUM4
14337 + time &= 0x3EUL;
14338 + time <<= 2;
14339 +#elif defined(CONFIG_X86_64)
14340 + time &= 0xFUL;
14341 + time <<= 4;
14342 +#else
14343 + time &= 0x1FUL;
14344 + time <<= 3;
14345 +#endif
14346 +
14347 + thread->sp0 ^= time;
14348 + load_sp0(init_tss + smp_processor_id(), thread);
14349
14350 +#ifdef CONFIG_X86_64
14351 + percpu_write(kernel_stack, thread->sp0);
14352 +#endif
14353 +}
14354 +#endif
14355 diff -urNp linux-2.6.39.4/arch/x86/kernel/ptrace.c linux-2.6.39.4/arch/x86/kernel/ptrace.c
14356 --- linux-2.6.39.4/arch/x86/kernel/ptrace.c 2011-05-19 00:06:34.000000000 -0400
14357 +++ linux-2.6.39.4/arch/x86/kernel/ptrace.c 2011-08-05 19:44:35.000000000 -0400
14358 @@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
14359 unsigned long addr, unsigned long data)
14360 {
14361 int ret;
14362 - unsigned long __user *datap = (unsigned long __user *)data;
14363 + unsigned long __user *datap = (__force unsigned long __user *)data;
14364
14365 switch (request) {
14366 /* read the word at location addr in the USER area. */
14367 @@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
14368 if ((int) addr < 0)
14369 return -EIO;
14370 ret = do_get_thread_area(child, addr,
14371 - (struct user_desc __user *)data);
14372 + (__force struct user_desc __user *) data);
14373 break;
14374
14375 case PTRACE_SET_THREAD_AREA:
14376 if ((int) addr < 0)
14377 return -EIO;
14378 ret = do_set_thread_area(child, addr,
14379 - (struct user_desc __user *)data, 0);
14380 + (__force struct user_desc __user *) data, 0);
14381 break;
14382 #endif
14383
14384 @@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas
14385 memset(info, 0, sizeof(*info));
14386 info->si_signo = SIGTRAP;
14387 info->si_code = si_code;
14388 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
14389 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
14390 }
14391
14392 void user_single_step_siginfo(struct task_struct *tsk,
14393 @@ -1363,7 +1363,7 @@ void send_sigtrap(struct task_struct *ts
14394 * We must return the syscall number to actually look up in the table.
14395 * This can be -1L to skip running any syscall at all.
14396 */
14397 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
14398 +long syscall_trace_enter(struct pt_regs *regs)
14399 {
14400 long ret = 0;
14401
14402 @@ -1408,7 +1408,7 @@ asmregparm long syscall_trace_enter(stru
14403 return ret ?: regs->orig_ax;
14404 }
14405
14406 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
14407 +void syscall_trace_leave(struct pt_regs *regs)
14408 {
14409 bool step;
14410
14411 diff -urNp linux-2.6.39.4/arch/x86/kernel/pvclock.c linux-2.6.39.4/arch/x86/kernel/pvclock.c
14412 --- linux-2.6.39.4/arch/x86/kernel/pvclock.c 2011-05-19 00:06:34.000000000 -0400
14413 +++ linux-2.6.39.4/arch/x86/kernel/pvclock.c 2011-08-05 19:44:35.000000000 -0400
14414 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
14415 return pv_tsc_khz;
14416 }
14417
14418 -static atomic64_t last_value = ATOMIC64_INIT(0);
14419 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
14420
14421 void pvclock_resume(void)
14422 {
14423 - atomic64_set(&last_value, 0);
14424 + atomic64_set_unchecked(&last_value, 0);
14425 }
14426
14427 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
14428 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
14429 * updating at the same time, and one of them could be slightly behind,
14430 * making the assumption that last_value always go forward fail to hold.
14431 */
14432 - last = atomic64_read(&last_value);
14433 + last = atomic64_read_unchecked(&last_value);
14434 do {
14435 if (ret < last)
14436 return last;
14437 - last = atomic64_cmpxchg(&last_value, last, ret);
14438 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
14439 } while (unlikely(last != ret));
14440
14441 return ret;
14442 diff -urNp linux-2.6.39.4/arch/x86/kernel/reboot.c linux-2.6.39.4/arch/x86/kernel/reboot.c
14443 --- linux-2.6.39.4/arch/x86/kernel/reboot.c 2011-08-05 21:11:51.000000000 -0400
14444 +++ linux-2.6.39.4/arch/x86/kernel/reboot.c 2011-08-05 21:12:20.000000000 -0400
14445 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
14446 EXPORT_SYMBOL(pm_power_off);
14447
14448 static const struct desc_ptr no_idt = {};
14449 -static int reboot_mode;
14450 +static unsigned short reboot_mode;
14451 enum reboot_type reboot_type = BOOT_KBD;
14452 int reboot_force;
14453
14454 @@ -307,13 +307,17 @@ core_initcall(reboot_init);
14455 extern const unsigned char machine_real_restart_asm[];
14456 extern const u64 machine_real_restart_gdt[3];
14457
14458 -void machine_real_restart(unsigned int type)
14459 +__noreturn void machine_real_restart(unsigned int type)
14460 {
14461 void *restart_va;
14462 unsigned long restart_pa;
14463 - void (*restart_lowmem)(unsigned int);
14464 + void (* __noreturn restart_lowmem)(unsigned int);
14465 u64 *lowmem_gdt;
14466
14467 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14468 + struct desc_struct *gdt;
14469 +#endif
14470 +
14471 local_irq_disable();
14472
14473 /* Write zero to CMOS register number 0x0f, which the BIOS POST
14474 @@ -339,14 +343,14 @@ void machine_real_restart(unsigned int t
14475 boot)". This seems like a fairly standard thing that gets set by
14476 REBOOT.COM programs, and the previous reset routine did this
14477 too. */
14478 - *((unsigned short *)0x472) = reboot_mode;
14479 + *(unsigned short *)(__va(0x472)) = reboot_mode;
14480
14481 /* Patch the GDT in the low memory trampoline */
14482 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
14483
14484 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
14485 restart_pa = virt_to_phys(restart_va);
14486 - restart_lowmem = (void (*)(unsigned int))restart_pa;
14487 + restart_lowmem = (void *)restart_pa;
14488
14489 /* GDT[0]: GDT self-pointer */
14490 lowmem_gdt[0] =
14491 @@ -357,7 +361,33 @@ void machine_real_restart(unsigned int t
14492 GDT_ENTRY(0x009b, restart_pa, 0xffff);
14493
14494 /* Jump to the identity-mapped low memory code */
14495 +
14496 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14497 + gdt = get_cpu_gdt_table(smp_processor_id());
14498 + pax_open_kernel();
14499 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14500 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
14501 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
14502 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
14503 +#endif
14504 +#ifdef CONFIG_PAX_KERNEXEC
14505 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
14506 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
14507 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
14508 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
14509 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
14510 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
14511 +#endif
14512 + pax_close_kernel();
14513 +#endif
14514 +
14515 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14516 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
14517 + unreachable();
14518 +#else
14519 restart_lowmem(type);
14520 +#endif
14521 +
14522 }
14523 #ifdef CONFIG_APM_MODULE
14524 EXPORT_SYMBOL(machine_real_restart);
14525 @@ -486,7 +516,7 @@ void __attribute__((weak)) mach_reboot_f
14526 {
14527 }
14528
14529 -static void native_machine_emergency_restart(void)
14530 +__noreturn static void native_machine_emergency_restart(void)
14531 {
14532 int i;
14533
14534 @@ -601,13 +631,13 @@ void native_machine_shutdown(void)
14535 #endif
14536 }
14537
14538 -static void __machine_emergency_restart(int emergency)
14539 +static __noreturn void __machine_emergency_restart(int emergency)
14540 {
14541 reboot_emergency = emergency;
14542 machine_ops.emergency_restart();
14543 }
14544
14545 -static void native_machine_restart(char *__unused)
14546 +static __noreturn void native_machine_restart(char *__unused)
14547 {
14548 printk("machine restart\n");
14549
14550 @@ -616,7 +646,7 @@ static void native_machine_restart(char
14551 __machine_emergency_restart(0);
14552 }
14553
14554 -static void native_machine_halt(void)
14555 +static __noreturn void native_machine_halt(void)
14556 {
14557 /* stop other cpus and apics */
14558 machine_shutdown();
14559 @@ -627,7 +657,7 @@ static void native_machine_halt(void)
14560 stop_this_cpu(NULL);
14561 }
14562
14563 -static void native_machine_power_off(void)
14564 +__noreturn static void native_machine_power_off(void)
14565 {
14566 if (pm_power_off) {
14567 if (!reboot_force)
14568 @@ -636,6 +666,7 @@ static void native_machine_power_off(voi
14569 }
14570 /* a fallback in case there is no PM info available */
14571 tboot_shutdown(TB_SHUTDOWN_HALT);
14572 + unreachable();
14573 }
14574
14575 struct machine_ops machine_ops = {
14576 diff -urNp linux-2.6.39.4/arch/x86/kernel/setup.c linux-2.6.39.4/arch/x86/kernel/setup.c
14577 --- linux-2.6.39.4/arch/x86/kernel/setup.c 2011-06-25 12:55:22.000000000 -0400
14578 +++ linux-2.6.39.4/arch/x86/kernel/setup.c 2011-08-05 19:44:35.000000000 -0400
14579 @@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
14580 * area (640->1Mb) as ram even though it is not.
14581 * take them out.
14582 */
14583 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
14584 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
14585 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
14586 }
14587
14588 @@ -775,14 +775,14 @@ void __init setup_arch(char **cmdline_p)
14589
14590 if (!boot_params.hdr.root_flags)
14591 root_mountflags &= ~MS_RDONLY;
14592 - init_mm.start_code = (unsigned long) _text;
14593 - init_mm.end_code = (unsigned long) _etext;
14594 + init_mm.start_code = ktla_ktva((unsigned long) _text);
14595 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
14596 init_mm.end_data = (unsigned long) _edata;
14597 init_mm.brk = _brk_end;
14598
14599 - code_resource.start = virt_to_phys(_text);
14600 - code_resource.end = virt_to_phys(_etext)-1;
14601 - data_resource.start = virt_to_phys(_etext);
14602 + code_resource.start = virt_to_phys(ktla_ktva(_text));
14603 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
14604 + data_resource.start = virt_to_phys(_sdata);
14605 data_resource.end = virt_to_phys(_edata)-1;
14606 bss_resource.start = virt_to_phys(&__bss_start);
14607 bss_resource.end = virt_to_phys(&__bss_stop)-1;
14608 diff -urNp linux-2.6.39.4/arch/x86/kernel/setup_percpu.c linux-2.6.39.4/arch/x86/kernel/setup_percpu.c
14609 --- linux-2.6.39.4/arch/x86/kernel/setup_percpu.c 2011-05-19 00:06:34.000000000 -0400
14610 +++ linux-2.6.39.4/arch/x86/kernel/setup_percpu.c 2011-08-05 19:44:35.000000000 -0400
14611 @@ -21,19 +21,17 @@
14612 #include <asm/cpu.h>
14613 #include <asm/stackprotector.h>
14614
14615 -DEFINE_PER_CPU(int, cpu_number);
14616 +#ifdef CONFIG_SMP
14617 +DEFINE_PER_CPU(unsigned int, cpu_number);
14618 EXPORT_PER_CPU_SYMBOL(cpu_number);
14619 +#endif
14620
14621 -#ifdef CONFIG_X86_64
14622 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
14623 -#else
14624 -#define BOOT_PERCPU_OFFSET 0
14625 -#endif
14626
14627 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
14628 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
14629
14630 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
14631 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
14632 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
14633 };
14634 EXPORT_SYMBOL(__per_cpu_offset);
14635 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
14636 {
14637 #ifdef CONFIG_X86_32
14638 struct desc_struct gdt;
14639 + unsigned long base = per_cpu_offset(cpu);
14640
14641 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
14642 - 0x2 | DESCTYPE_S, 0x8);
14643 - gdt.s = 1;
14644 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
14645 + 0x83 | DESCTYPE_S, 0xC);
14646 write_gdt_entry(get_cpu_gdt_table(cpu),
14647 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
14648 #endif
14649 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
14650 /* alrighty, percpu areas up and running */
14651 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
14652 for_each_possible_cpu(cpu) {
14653 +#ifdef CONFIG_CC_STACKPROTECTOR
14654 +#ifdef CONFIG_X86_32
14655 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
14656 +#endif
14657 +#endif
14658 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
14659 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
14660 per_cpu(cpu_number, cpu) = cpu;
14661 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
14662 */
14663 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
14664 #endif
14665 +#ifdef CONFIG_CC_STACKPROTECTOR
14666 +#ifdef CONFIG_X86_32
14667 + if (!cpu)
14668 + per_cpu(stack_canary.canary, cpu) = canary;
14669 +#endif
14670 +#endif
14671 /*
14672 * Up to this point, the boot CPU has been using .init.data
14673 * area. Reload any changed state for the boot CPU.
14674 diff -urNp linux-2.6.39.4/arch/x86/kernel/signal.c linux-2.6.39.4/arch/x86/kernel/signal.c
14675 --- linux-2.6.39.4/arch/x86/kernel/signal.c 2011-05-19 00:06:34.000000000 -0400
14676 +++ linux-2.6.39.4/arch/x86/kernel/signal.c 2011-08-05 19:44:35.000000000 -0400
14677 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
14678 * Align the stack pointer according to the i386 ABI,
14679 * i.e. so that on function entry ((sp + 4) & 15) == 0.
14680 */
14681 - sp = ((sp + 4) & -16ul) - 4;
14682 + sp = ((sp - 12) & -16ul) - 4;
14683 #else /* !CONFIG_X86_32 */
14684 sp = round_down(sp, 16) - 8;
14685 #endif
14686 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
14687 * Return an always-bogus address instead so we will die with SIGSEGV.
14688 */
14689 if (onsigstack && !likely(on_sig_stack(sp)))
14690 - return (void __user *)-1L;
14691 + return (__force void __user *)-1L;
14692
14693 /* save i387 state */
14694 if (used_math() && save_i387_xstate(*fpstate) < 0)
14695 - return (void __user *)-1L;
14696 + return (__force void __user *)-1L;
14697
14698 return (void __user *)sp;
14699 }
14700 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
14701 }
14702
14703 if (current->mm->context.vdso)
14704 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14705 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14706 else
14707 - restorer = &frame->retcode;
14708 + restorer = (void __user *)&frame->retcode;
14709 if (ka->sa.sa_flags & SA_RESTORER)
14710 restorer = ka->sa.sa_restorer;
14711
14712 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
14713 * reasons and because gdb uses it as a signature to notice
14714 * signal handler stack frames.
14715 */
14716 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
14717 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
14718
14719 if (err)
14720 return -EFAULT;
14721 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
14722 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
14723
14724 /* Set up to return from userspace. */
14725 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14726 + if (current->mm->context.vdso)
14727 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14728 + else
14729 + restorer = (void __user *)&frame->retcode;
14730 if (ka->sa.sa_flags & SA_RESTORER)
14731 restorer = ka->sa.sa_restorer;
14732 put_user_ex(restorer, &frame->pretcode);
14733 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
14734 * reasons and because gdb uses it as a signature to notice
14735 * signal handler stack frames.
14736 */
14737 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
14738 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
14739 } put_user_catch(err);
14740
14741 if (err)
14742 @@ -773,6 +776,8 @@ static void do_signal(struct pt_regs *re
14743 int signr;
14744 sigset_t *oldset;
14745
14746 + pax_track_stack();
14747 +
14748 /*
14749 * We want the common case to go fast, which is why we may in certain
14750 * cases get here from kernel mode. Just return without doing anything
14751 @@ -780,7 +785,7 @@ static void do_signal(struct pt_regs *re
14752 * X86_32: vm86 regs switched out by assembly code before reaching
14753 * here, so testing against kernel CS suffices.
14754 */
14755 - if (!user_mode(regs))
14756 + if (!user_mode_novm(regs))
14757 return;
14758
14759 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
14760 diff -urNp linux-2.6.39.4/arch/x86/kernel/smpboot.c linux-2.6.39.4/arch/x86/kernel/smpboot.c
14761 --- linux-2.6.39.4/arch/x86/kernel/smpboot.c 2011-06-25 12:55:22.000000000 -0400
14762 +++ linux-2.6.39.4/arch/x86/kernel/smpboot.c 2011-08-05 19:44:35.000000000 -0400
14763 @@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
14764 set_idle_for_cpu(cpu, c_idle.idle);
14765 do_rest:
14766 per_cpu(current_task, cpu) = c_idle.idle;
14767 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
14768 #ifdef CONFIG_X86_32
14769 /* Stack for startup_32 can be just as for start_secondary onwards */
14770 irq_ctx_init(cpu);
14771 #else
14772 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
14773 initial_gs = per_cpu_offset(cpu);
14774 - per_cpu(kernel_stack, cpu) =
14775 - (unsigned long)task_stack_page(c_idle.idle) -
14776 - KERNEL_STACK_OFFSET + THREAD_SIZE;
14777 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
14778 #endif
14779 +
14780 + pax_open_kernel();
14781 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14782 + pax_close_kernel();
14783 +
14784 initial_code = (unsigned long)start_secondary;
14785 stack_start = c_idle.idle->thread.sp;
14786
14787 @@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
14788
14789 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
14790
14791 +#ifdef CONFIG_PAX_PER_CPU_PGD
14792 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
14793 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14794 + KERNEL_PGD_PTRS);
14795 +#endif
14796 +
14797 err = do_boot_cpu(apicid, cpu);
14798 if (err) {
14799 pr_debug("do_boot_cpu failed %d\n", err);
14800 diff -urNp linux-2.6.39.4/arch/x86/kernel/step.c linux-2.6.39.4/arch/x86/kernel/step.c
14801 --- linux-2.6.39.4/arch/x86/kernel/step.c 2011-05-19 00:06:34.000000000 -0400
14802 +++ linux-2.6.39.4/arch/x86/kernel/step.c 2011-08-05 19:44:35.000000000 -0400
14803 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
14804 struct desc_struct *desc;
14805 unsigned long base;
14806
14807 - seg &= ~7UL;
14808 + seg >>= 3;
14809
14810 mutex_lock(&child->mm->context.lock);
14811 - if (unlikely((seg >> 3) >= child->mm->context.size))
14812 + if (unlikely(seg >= child->mm->context.size))
14813 addr = -1L; /* bogus selector, access would fault */
14814 else {
14815 desc = child->mm->context.ldt + seg;
14816 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
14817 addr += base;
14818 }
14819 mutex_unlock(&child->mm->context.lock);
14820 - }
14821 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
14822 + addr = ktla_ktva(addr);
14823
14824 return addr;
14825 }
14826 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
14827 unsigned char opcode[15];
14828 unsigned long addr = convert_ip_to_linear(child, regs);
14829
14830 + if (addr == -EINVAL)
14831 + return 0;
14832 +
14833 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
14834 for (i = 0; i < copied; i++) {
14835 switch (opcode[i]) {
14836 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
14837
14838 #ifdef CONFIG_X86_64
14839 case 0x40 ... 0x4f:
14840 - if (regs->cs != __USER_CS)
14841 + if ((regs->cs & 0xffff) != __USER_CS)
14842 /* 32-bit mode: register increment */
14843 return 0;
14844 /* 64-bit mode: REX prefix */
14845 diff -urNp linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S
14846 --- linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S 2011-05-19 00:06:34.000000000 -0400
14847 +++ linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S 2011-08-05 19:44:35.000000000 -0400
14848 @@ -1,3 +1,4 @@
14849 +.section .rodata,"a",@progbits
14850 ENTRY(sys_call_table)
14851 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
14852 .long sys_exit
14853 diff -urNp linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c
14854 --- linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c 2011-05-19 00:06:34.000000000 -0400
14855 +++ linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c 2011-08-05 19:44:35.000000000 -0400
14856 @@ -24,17 +24,224 @@
14857
14858 #include <asm/syscalls.h>
14859
14860 -/*
14861 - * Do a system call from kernel instead of calling sys_execve so we
14862 - * end up with proper pt_regs.
14863 - */
14864 -int kernel_execve(const char *filename,
14865 - const char *const argv[],
14866 - const char *const envp[])
14867 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
14868 {
14869 - long __res;
14870 - asm volatile ("int $0x80"
14871 - : "=a" (__res)
14872 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
14873 - return __res;
14874 + unsigned long pax_task_size = TASK_SIZE;
14875 +
14876 +#ifdef CONFIG_PAX_SEGMEXEC
14877 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
14878 + pax_task_size = SEGMEXEC_TASK_SIZE;
14879 +#endif
14880 +
14881 + if (len > pax_task_size || addr > pax_task_size - len)
14882 + return -EINVAL;
14883 +
14884 + return 0;
14885 +}
14886 +
14887 +unsigned long
14888 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
14889 + unsigned long len, unsigned long pgoff, unsigned long flags)
14890 +{
14891 + struct mm_struct *mm = current->mm;
14892 + struct vm_area_struct *vma;
14893 + unsigned long start_addr, pax_task_size = TASK_SIZE;
14894 +
14895 +#ifdef CONFIG_PAX_SEGMEXEC
14896 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
14897 + pax_task_size = SEGMEXEC_TASK_SIZE;
14898 +#endif
14899 +
14900 + pax_task_size -= PAGE_SIZE;
14901 +
14902 + if (len > pax_task_size)
14903 + return -ENOMEM;
14904 +
14905 + if (flags & MAP_FIXED)
14906 + return addr;
14907 +
14908 +#ifdef CONFIG_PAX_RANDMMAP
14909 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14910 +#endif
14911 +
14912 + if (addr) {
14913 + addr = PAGE_ALIGN(addr);
14914 + if (pax_task_size - len >= addr) {
14915 + vma = find_vma(mm, addr);
14916 + if (check_heap_stack_gap(vma, addr, len))
14917 + return addr;
14918 + }
14919 + }
14920 + if (len > mm->cached_hole_size) {
14921 + start_addr = addr = mm->free_area_cache;
14922 + } else {
14923 + start_addr = addr = mm->mmap_base;
14924 + mm->cached_hole_size = 0;
14925 + }
14926 +
14927 +#ifdef CONFIG_PAX_PAGEEXEC
14928 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
14929 + start_addr = 0x00110000UL;
14930 +
14931 +#ifdef CONFIG_PAX_RANDMMAP
14932 + if (mm->pax_flags & MF_PAX_RANDMMAP)
14933 + start_addr += mm->delta_mmap & 0x03FFF000UL;
14934 +#endif
14935 +
14936 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
14937 + start_addr = addr = mm->mmap_base;
14938 + else
14939 + addr = start_addr;
14940 + }
14941 +#endif
14942 +
14943 +full_search:
14944 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
14945 + /* At this point: (!vma || addr < vma->vm_end). */
14946 + if (pax_task_size - len < addr) {
14947 + /*
14948 + * Start a new search - just in case we missed
14949 + * some holes.
14950 + */
14951 + if (start_addr != mm->mmap_base) {
14952 + start_addr = addr = mm->mmap_base;
14953 + mm->cached_hole_size = 0;
14954 + goto full_search;
14955 + }
14956 + return -ENOMEM;
14957 + }
14958 + if (check_heap_stack_gap(vma, addr, len))
14959 + break;
14960 + if (addr + mm->cached_hole_size < vma->vm_start)
14961 + mm->cached_hole_size = vma->vm_start - addr;
14962 + addr = vma->vm_end;
14963 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
14964 + start_addr = addr = mm->mmap_base;
14965 + mm->cached_hole_size = 0;
14966 + goto full_search;
14967 + }
14968 + }
14969 +
14970 + /*
14971 + * Remember the place where we stopped the search:
14972 + */
14973 + mm->free_area_cache = addr + len;
14974 + return addr;
14975 +}
14976 +
14977 +unsigned long
14978 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
14979 + const unsigned long len, const unsigned long pgoff,
14980 + const unsigned long flags)
14981 +{
14982 + struct vm_area_struct *vma;
14983 + struct mm_struct *mm = current->mm;
14984 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
14985 +
14986 +#ifdef CONFIG_PAX_SEGMEXEC
14987 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
14988 + pax_task_size = SEGMEXEC_TASK_SIZE;
14989 +#endif
14990 +
14991 + pax_task_size -= PAGE_SIZE;
14992 +
14993 + /* requested length too big for entire address space */
14994 + if (len > pax_task_size)
14995 + return -ENOMEM;
14996 +
14997 + if (flags & MAP_FIXED)
14998 + return addr;
14999 +
15000 +#ifdef CONFIG_PAX_PAGEEXEC
15001 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
15002 + goto bottomup;
15003 +#endif
15004 +
15005 +#ifdef CONFIG_PAX_RANDMMAP
15006 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15007 +#endif
15008 +
15009 + /* requesting a specific address */
15010 + if (addr) {
15011 + addr = PAGE_ALIGN(addr);
15012 + if (pax_task_size - len >= addr) {
15013 + vma = find_vma(mm, addr);
15014 + if (check_heap_stack_gap(vma, addr, len))
15015 + return addr;
15016 + }
15017 + }
15018 +
15019 + /* check if free_area_cache is useful for us */
15020 + if (len <= mm->cached_hole_size) {
15021 + mm->cached_hole_size = 0;
15022 + mm->free_area_cache = mm->mmap_base;
15023 + }
15024 +
15025 + /* either no address requested or can't fit in requested address hole */
15026 + addr = mm->free_area_cache;
15027 +
15028 + /* make sure it can fit in the remaining address space */
15029 + if (addr > len) {
15030 + vma = find_vma(mm, addr-len);
15031 + if (check_heap_stack_gap(vma, addr - len, len))
15032 + /* remember the address as a hint for next time */
15033 + return (mm->free_area_cache = addr-len);
15034 + }
15035 +
15036 + if (mm->mmap_base < len)
15037 + goto bottomup;
15038 +
15039 + addr = mm->mmap_base-len;
15040 +
15041 + do {
15042 + /*
15043 + * Lookup failure means no vma is above this address,
15044 + * else if new region fits below vma->vm_start,
15045 + * return with success:
15046 + */
15047 + vma = find_vma(mm, addr);
15048 + if (check_heap_stack_gap(vma, addr, len))
15049 + /* remember the address as a hint for next time */
15050 + return (mm->free_area_cache = addr);
15051 +
15052 + /* remember the largest hole we saw so far */
15053 + if (addr + mm->cached_hole_size < vma->vm_start)
15054 + mm->cached_hole_size = vma->vm_start - addr;
15055 +
15056 + /* try just below the current vma->vm_start */
15057 + addr = skip_heap_stack_gap(vma, len);
15058 + } while (!IS_ERR_VALUE(addr));
15059 +
15060 +bottomup:
15061 + /*
15062 + * A failed mmap() very likely causes application failure,
15063 + * so fall back to the bottom-up function here. This scenario
15064 + * can happen with large stack limits and large mmap()
15065 + * allocations.
15066 + */
15067 +
15068 +#ifdef CONFIG_PAX_SEGMEXEC
15069 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
15070 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15071 + else
15072 +#endif
15073 +
15074 + mm->mmap_base = TASK_UNMAPPED_BASE;
15075 +
15076 +#ifdef CONFIG_PAX_RANDMMAP
15077 + if (mm->pax_flags & MF_PAX_RANDMMAP)
15078 + mm->mmap_base += mm->delta_mmap;
15079 +#endif
15080 +
15081 + mm->free_area_cache = mm->mmap_base;
15082 + mm->cached_hole_size = ~0UL;
15083 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15084 + /*
15085 + * Restore the topdown base:
15086 + */
15087 + mm->mmap_base = base;
15088 + mm->free_area_cache = base;
15089 + mm->cached_hole_size = ~0UL;
15090 +
15091 + return addr;
15092 }
15093 diff -urNp linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c
15094 --- linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c 2011-05-19 00:06:34.000000000 -0400
15095 +++ linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c 2011-08-05 19:44:35.000000000 -0400
15096 @@ -32,8 +32,8 @@ out:
15097 return error;
15098 }
15099
15100 -static void find_start_end(unsigned long flags, unsigned long *begin,
15101 - unsigned long *end)
15102 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
15103 + unsigned long *begin, unsigned long *end)
15104 {
15105 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
15106 unsigned long new_begin;
15107 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
15108 *begin = new_begin;
15109 }
15110 } else {
15111 - *begin = TASK_UNMAPPED_BASE;
15112 + *begin = mm->mmap_base;
15113 *end = TASK_SIZE;
15114 }
15115 }
15116 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
15117 if (flags & MAP_FIXED)
15118 return addr;
15119
15120 - find_start_end(flags, &begin, &end);
15121 + find_start_end(mm, flags, &begin, &end);
15122
15123 if (len > end)
15124 return -ENOMEM;
15125
15126 +#ifdef CONFIG_PAX_RANDMMAP
15127 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15128 +#endif
15129 +
15130 if (addr) {
15131 addr = PAGE_ALIGN(addr);
15132 vma = find_vma(mm, addr);
15133 - if (end - len >= addr &&
15134 - (!vma || addr + len <= vma->vm_start))
15135 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
15136 return addr;
15137 }
15138 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
15139 @@ -106,7 +109,7 @@ full_search:
15140 }
15141 return -ENOMEM;
15142 }
15143 - if (!vma || addr + len <= vma->vm_start) {
15144 + if (check_heap_stack_gap(vma, addr, len)) {
15145 /*
15146 * Remember the place where we stopped the search:
15147 */
15148 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
15149 {
15150 struct vm_area_struct *vma;
15151 struct mm_struct *mm = current->mm;
15152 - unsigned long addr = addr0;
15153 + unsigned long base = mm->mmap_base, addr = addr0;
15154
15155 /* requested length too big for entire address space */
15156 if (len > TASK_SIZE)
15157 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
15158 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
15159 goto bottomup;
15160
15161 +#ifdef CONFIG_PAX_RANDMMAP
15162 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15163 +#endif
15164 +
15165 /* requesting a specific address */
15166 if (addr) {
15167 addr = PAGE_ALIGN(addr);
15168 - vma = find_vma(mm, addr);
15169 - if (TASK_SIZE - len >= addr &&
15170 - (!vma || addr + len <= vma->vm_start))
15171 - return addr;
15172 + if (TASK_SIZE - len >= addr) {
15173 + vma = find_vma(mm, addr);
15174 + if (check_heap_stack_gap(vma, addr, len))
15175 + return addr;
15176 + }
15177 }
15178
15179 /* check if free_area_cache is useful for us */
15180 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
15181 /* make sure it can fit in the remaining address space */
15182 if (addr > len) {
15183 vma = find_vma(mm, addr-len);
15184 - if (!vma || addr <= vma->vm_start)
15185 + if (check_heap_stack_gap(vma, addr - len, len))
15186 /* remember the address as a hint for next time */
15187 return mm->free_area_cache = addr-len;
15188 }
15189 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
15190 * return with success:
15191 */
15192 vma = find_vma(mm, addr);
15193 - if (!vma || addr+len <= vma->vm_start)
15194 + if (check_heap_stack_gap(vma, addr, len))
15195 /* remember the address as a hint for next time */
15196 return mm->free_area_cache = addr;
15197
15198 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
15199 mm->cached_hole_size = vma->vm_start - addr;
15200
15201 /* try just below the current vma->vm_start */
15202 - addr = vma->vm_start-len;
15203 - } while (len < vma->vm_start);
15204 + addr = skip_heap_stack_gap(vma, len);
15205 + } while (!IS_ERR_VALUE(addr));
15206
15207 bottomup:
15208 /*
15209 @@ -198,13 +206,21 @@ bottomup:
15210 * can happen with large stack limits and large mmap()
15211 * allocations.
15212 */
15213 + mm->mmap_base = TASK_UNMAPPED_BASE;
15214 +
15215 +#ifdef CONFIG_PAX_RANDMMAP
15216 + if (mm->pax_flags & MF_PAX_RANDMMAP)
15217 + mm->mmap_base += mm->delta_mmap;
15218 +#endif
15219 +
15220 + mm->free_area_cache = mm->mmap_base;
15221 mm->cached_hole_size = ~0UL;
15222 - mm->free_area_cache = TASK_UNMAPPED_BASE;
15223 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15224 /*
15225 * Restore the topdown base:
15226 */
15227 - mm->free_area_cache = mm->mmap_base;
15228 + mm->mmap_base = base;
15229 + mm->free_area_cache = base;
15230 mm->cached_hole_size = ~0UL;
15231
15232 return addr;
15233 diff -urNp linux-2.6.39.4/arch/x86/kernel/tboot.c linux-2.6.39.4/arch/x86/kernel/tboot.c
15234 --- linux-2.6.39.4/arch/x86/kernel/tboot.c 2011-05-19 00:06:34.000000000 -0400
15235 +++ linux-2.6.39.4/arch/x86/kernel/tboot.c 2011-08-05 19:44:35.000000000 -0400
15236 @@ -218,7 +218,7 @@ static int tboot_setup_sleep(void)
15237
15238 void tboot_shutdown(u32 shutdown_type)
15239 {
15240 - void (*shutdown)(void);
15241 + void (* __noreturn shutdown)(void);
15242
15243 if (!tboot_enabled())
15244 return;
15245 @@ -240,7 +240,7 @@ void tboot_shutdown(u32 shutdown_type)
15246
15247 switch_to_tboot_pt();
15248
15249 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
15250 + shutdown = (void *)tboot->shutdown_entry;
15251 shutdown();
15252
15253 /* should not reach here */
15254 @@ -297,7 +297,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
15255 tboot_shutdown(acpi_shutdown_map[sleep_state]);
15256 }
15257
15258 -static atomic_t ap_wfs_count;
15259 +static atomic_unchecked_t ap_wfs_count;
15260
15261 static int tboot_wait_for_aps(int num_aps)
15262 {
15263 @@ -321,9 +321,9 @@ static int __cpuinit tboot_cpu_callback(
15264 {
15265 switch (action) {
15266 case CPU_DYING:
15267 - atomic_inc(&ap_wfs_count);
15268 + atomic_inc_unchecked(&ap_wfs_count);
15269 if (num_online_cpus() == 1)
15270 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
15271 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
15272 return NOTIFY_BAD;
15273 break;
15274 }
15275 @@ -342,7 +342,7 @@ static __init int tboot_late_init(void)
15276
15277 tboot_create_trampoline();
15278
15279 - atomic_set(&ap_wfs_count, 0);
15280 + atomic_set_unchecked(&ap_wfs_count, 0);
15281 register_hotcpu_notifier(&tboot_cpu_notifier);
15282 return 0;
15283 }
15284 diff -urNp linux-2.6.39.4/arch/x86/kernel/time.c linux-2.6.39.4/arch/x86/kernel/time.c
15285 --- linux-2.6.39.4/arch/x86/kernel/time.c 2011-05-19 00:06:34.000000000 -0400
15286 +++ linux-2.6.39.4/arch/x86/kernel/time.c 2011-08-05 19:44:35.000000000 -0400
15287 @@ -22,17 +22,13 @@
15288 #include <asm/hpet.h>
15289 #include <asm/time.h>
15290
15291 -#ifdef CONFIG_X86_64
15292 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
15293 -#endif
15294 -
15295 unsigned long profile_pc(struct pt_regs *regs)
15296 {
15297 unsigned long pc = instruction_pointer(regs);
15298
15299 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
15300 + if (!user_mode(regs) && in_lock_functions(pc)) {
15301 #ifdef CONFIG_FRAME_POINTER
15302 - return *(unsigned long *)(regs->bp + sizeof(long));
15303 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
15304 #else
15305 unsigned long *sp =
15306 (unsigned long *)kernel_stack_pointer(regs);
15307 @@ -41,11 +37,17 @@ unsigned long profile_pc(struct pt_regs
15308 * or above a saved flags. Eflags has bits 22-31 zero,
15309 * kernel addresses don't.
15310 */
15311 +
15312 +#ifdef CONFIG_PAX_KERNEXEC
15313 + return ktla_ktva(sp[0]);
15314 +#else
15315 if (sp[0] >> 22)
15316 return sp[0];
15317 if (sp[1] >> 22)
15318 return sp[1];
15319 #endif
15320 +
15321 +#endif
15322 }
15323 return pc;
15324 }
15325 diff -urNp linux-2.6.39.4/arch/x86/kernel/tls.c linux-2.6.39.4/arch/x86/kernel/tls.c
15326 --- linux-2.6.39.4/arch/x86/kernel/tls.c 2011-05-19 00:06:34.000000000 -0400
15327 +++ linux-2.6.39.4/arch/x86/kernel/tls.c 2011-08-05 19:44:35.000000000 -0400
15328 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
15329 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
15330 return -EINVAL;
15331
15332 +#ifdef CONFIG_PAX_SEGMEXEC
15333 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
15334 + return -EINVAL;
15335 +#endif
15336 +
15337 set_tls_desc(p, idx, &info, 1);
15338
15339 return 0;
15340 diff -urNp linux-2.6.39.4/arch/x86/kernel/trampoline_32.S linux-2.6.39.4/arch/x86/kernel/trampoline_32.S
15341 --- linux-2.6.39.4/arch/x86/kernel/trampoline_32.S 2011-05-19 00:06:34.000000000 -0400
15342 +++ linux-2.6.39.4/arch/x86/kernel/trampoline_32.S 2011-08-05 19:44:35.000000000 -0400
15343 @@ -32,6 +32,12 @@
15344 #include <asm/segment.h>
15345 #include <asm/page_types.h>
15346
15347 +#ifdef CONFIG_PAX_KERNEXEC
15348 +#define ta(X) (X)
15349 +#else
15350 +#define ta(X) ((X) - __PAGE_OFFSET)
15351 +#endif
15352 +
15353 #ifdef CONFIG_SMP
15354
15355 .section ".x86_trampoline","a"
15356 @@ -62,7 +68,7 @@ r_base = .
15357 inc %ax # protected mode (PE) bit
15358 lmsw %ax # into protected mode
15359 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
15360 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
15361 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
15362
15363 # These need to be in the same 64K segment as the above;
15364 # hence we don't use the boot_gdt_descr defined in head.S
15365 diff -urNp linux-2.6.39.4/arch/x86/kernel/trampoline_64.S linux-2.6.39.4/arch/x86/kernel/trampoline_64.S
15366 --- linux-2.6.39.4/arch/x86/kernel/trampoline_64.S 2011-05-19 00:06:34.000000000 -0400
15367 +++ linux-2.6.39.4/arch/x86/kernel/trampoline_64.S 2011-08-05 19:44:35.000000000 -0400
15368 @@ -90,7 +90,7 @@ startup_32:
15369 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
15370 movl %eax, %ds
15371
15372 - movl $X86_CR4_PAE, %eax
15373 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15374 movl %eax, %cr4 # Enable PAE mode
15375
15376 # Setup trampoline 4 level pagetables
15377 @@ -138,7 +138,7 @@ tidt:
15378 # so the kernel can live anywhere
15379 .balign 4
15380 tgdt:
15381 - .short tgdt_end - tgdt # gdt limit
15382 + .short tgdt_end - tgdt - 1 # gdt limit
15383 .long tgdt - r_base
15384 .short 0
15385 .quad 0x00cf9b000000ffff # __KERNEL32_CS
15386 diff -urNp linux-2.6.39.4/arch/x86/kernel/traps.c linux-2.6.39.4/arch/x86/kernel/traps.c
15387 --- linux-2.6.39.4/arch/x86/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
15388 +++ linux-2.6.39.4/arch/x86/kernel/traps.c 2011-08-05 19:44:35.000000000 -0400
15389 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
15390
15391 /* Do we ignore FPU interrupts ? */
15392 char ignore_fpu_irq;
15393 -
15394 -/*
15395 - * The IDT has to be page-aligned to simplify the Pentium
15396 - * F0 0F bug workaround.
15397 - */
15398 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
15399 #endif
15400
15401 DECLARE_BITMAP(used_vectors, NR_VECTORS);
15402 @@ -117,13 +111,13 @@ static inline void preempt_conditional_c
15403 }
15404
15405 static void __kprobes
15406 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
15407 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
15408 long error_code, siginfo_t *info)
15409 {
15410 struct task_struct *tsk = current;
15411
15412 #ifdef CONFIG_X86_32
15413 - if (regs->flags & X86_VM_MASK) {
15414 + if (v8086_mode(regs)) {
15415 /*
15416 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
15417 * On nmi (interrupt 2), do_trap should not be called.
15418 @@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
15419 }
15420 #endif
15421
15422 - if (!user_mode(regs))
15423 + if (!user_mode_novm(regs))
15424 goto kernel_trap;
15425
15426 #ifdef CONFIG_X86_32
15427 @@ -157,7 +151,7 @@ trap_signal:
15428 printk_ratelimit()) {
15429 printk(KERN_INFO
15430 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
15431 - tsk->comm, tsk->pid, str,
15432 + tsk->comm, task_pid_nr(tsk), str,
15433 regs->ip, regs->sp, error_code);
15434 print_vma_addr(" in ", regs->ip);
15435 printk("\n");
15436 @@ -174,8 +168,20 @@ kernel_trap:
15437 if (!fixup_exception(regs)) {
15438 tsk->thread.error_code = error_code;
15439 tsk->thread.trap_no = trapnr;
15440 +
15441 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15442 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
15443 + str = "PAX: suspicious stack segment fault";
15444 +#endif
15445 +
15446 die(str, regs, error_code);
15447 }
15448 +
15449 +#ifdef CONFIG_PAX_REFCOUNT
15450 + if (trapnr == 4)
15451 + pax_report_refcount_overflow(regs);
15452 +#endif
15453 +
15454 return;
15455
15456 #ifdef CONFIG_X86_32
15457 @@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
15458 conditional_sti(regs);
15459
15460 #ifdef CONFIG_X86_32
15461 - if (regs->flags & X86_VM_MASK)
15462 + if (v8086_mode(regs))
15463 goto gp_in_vm86;
15464 #endif
15465
15466 tsk = current;
15467 - if (!user_mode(regs))
15468 + if (!user_mode_novm(regs))
15469 goto gp_in_kernel;
15470
15471 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15472 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
15473 + struct mm_struct *mm = tsk->mm;
15474 + unsigned long limit;
15475 +
15476 + down_write(&mm->mmap_sem);
15477 + limit = mm->context.user_cs_limit;
15478 + if (limit < TASK_SIZE) {
15479 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
15480 + up_write(&mm->mmap_sem);
15481 + return;
15482 + }
15483 + up_write(&mm->mmap_sem);
15484 + }
15485 +#endif
15486 +
15487 tsk->thread.error_code = error_code;
15488 tsk->thread.trap_no = 13;
15489
15490 @@ -304,6 +326,13 @@ gp_in_kernel:
15491 if (notify_die(DIE_GPF, "general protection fault", regs,
15492 error_code, 13, SIGSEGV) == NOTIFY_STOP)
15493 return;
15494 +
15495 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15496 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
15497 + die("PAX: suspicious general protection fault", regs, error_code);
15498 + else
15499 +#endif
15500 +
15501 die("general protection fault", regs, error_code);
15502 }
15503
15504 @@ -433,6 +462,17 @@ static notrace __kprobes void default_do
15505 dotraplinkage notrace __kprobes void
15506 do_nmi(struct pt_regs *regs, long error_code)
15507 {
15508 +
15509 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15510 + if (!user_mode(regs)) {
15511 + unsigned long cs = regs->cs & 0xFFFF;
15512 + unsigned long ip = ktva_ktla(regs->ip);
15513 +
15514 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
15515 + regs->ip = ip;
15516 + }
15517 +#endif
15518 +
15519 nmi_enter();
15520
15521 inc_irq_stat(__nmi_count);
15522 @@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
15523 /* It's safe to allow irq's after DR6 has been saved */
15524 preempt_conditional_sti(regs);
15525
15526 - if (regs->flags & X86_VM_MASK) {
15527 + if (v8086_mode(regs)) {
15528 handle_vm86_trap((struct kernel_vm86_regs *) regs,
15529 error_code, 1);
15530 preempt_conditional_cli(regs);
15531 @@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
15532 * We already checked v86 mode above, so we can check for kernel mode
15533 * by just checking the CPL of CS.
15534 */
15535 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
15536 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
15537 tsk->thread.debugreg6 &= ~DR_STEP;
15538 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
15539 regs->flags &= ~X86_EFLAGS_TF;
15540 @@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
15541 return;
15542 conditional_sti(regs);
15543
15544 - if (!user_mode_vm(regs))
15545 + if (!user_mode(regs))
15546 {
15547 if (!fixup_exception(regs)) {
15548 task->thread.error_code = error_code;
15549 @@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
15550 void __math_state_restore(void)
15551 {
15552 struct thread_info *thread = current_thread_info();
15553 - struct task_struct *tsk = thread->task;
15554 + struct task_struct *tsk = current;
15555
15556 /*
15557 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
15558 @@ -750,8 +790,7 @@ void __math_state_restore(void)
15559 */
15560 asmlinkage void math_state_restore(void)
15561 {
15562 - struct thread_info *thread = current_thread_info();
15563 - struct task_struct *tsk = thread->task;
15564 + struct task_struct *tsk = current;
15565
15566 if (!tsk_used_math(tsk)) {
15567 local_irq_enable();
15568 diff -urNp linux-2.6.39.4/arch/x86/kernel/verify_cpu.S linux-2.6.39.4/arch/x86/kernel/verify_cpu.S
15569 --- linux-2.6.39.4/arch/x86/kernel/verify_cpu.S 2011-05-19 00:06:34.000000000 -0400
15570 +++ linux-2.6.39.4/arch/x86/kernel/verify_cpu.S 2011-08-05 19:44:35.000000000 -0400
15571 @@ -20,6 +20,7 @@
15572 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
15573 * arch/x86/kernel/trampoline_64.S: secondary processor verification
15574 * arch/x86/kernel/head_32.S: processor startup
15575 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
15576 *
15577 * verify_cpu, returns the status of longmode and SSE in register %eax.
15578 * 0: Success 1: Failure
15579 diff -urNp linux-2.6.39.4/arch/x86/kernel/vm86_32.c linux-2.6.39.4/arch/x86/kernel/vm86_32.c
15580 --- linux-2.6.39.4/arch/x86/kernel/vm86_32.c 2011-05-19 00:06:34.000000000 -0400
15581 +++ linux-2.6.39.4/arch/x86/kernel/vm86_32.c 2011-08-05 19:44:35.000000000 -0400
15582 @@ -41,6 +41,7 @@
15583 #include <linux/ptrace.h>
15584 #include <linux/audit.h>
15585 #include <linux/stddef.h>
15586 +#include <linux/grsecurity.h>
15587
15588 #include <asm/uaccess.h>
15589 #include <asm/io.h>
15590 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
15591 do_exit(SIGSEGV);
15592 }
15593
15594 - tss = &per_cpu(init_tss, get_cpu());
15595 + tss = init_tss + get_cpu();
15596 current->thread.sp0 = current->thread.saved_sp0;
15597 current->thread.sysenter_cs = __KERNEL_CS;
15598 load_sp0(tss, &current->thread);
15599 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
15600 struct task_struct *tsk;
15601 int tmp, ret = -EPERM;
15602
15603 +#ifdef CONFIG_GRKERNSEC_VM86
15604 + if (!capable(CAP_SYS_RAWIO)) {
15605 + gr_handle_vm86();
15606 + goto out;
15607 + }
15608 +#endif
15609 +
15610 tsk = current;
15611 if (tsk->thread.saved_sp0)
15612 goto out;
15613 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
15614 int tmp, ret;
15615 struct vm86plus_struct __user *v86;
15616
15617 +#ifdef CONFIG_GRKERNSEC_VM86
15618 + if (!capable(CAP_SYS_RAWIO)) {
15619 + gr_handle_vm86();
15620 + ret = -EPERM;
15621 + goto out;
15622 + }
15623 +#endif
15624 +
15625 tsk = current;
15626 switch (cmd) {
15627 case VM86_REQUEST_IRQ:
15628 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
15629 tsk->thread.saved_fs = info->regs32->fs;
15630 tsk->thread.saved_gs = get_user_gs(info->regs32);
15631
15632 - tss = &per_cpu(init_tss, get_cpu());
15633 + tss = init_tss + get_cpu();
15634 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
15635 if (cpu_has_sep)
15636 tsk->thread.sysenter_cs = 0;
15637 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
15638 goto cannot_handle;
15639 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
15640 goto cannot_handle;
15641 - intr_ptr = (unsigned long __user *) (i << 2);
15642 + intr_ptr = (__force unsigned long __user *) (i << 2);
15643 if (get_user(segoffs, intr_ptr))
15644 goto cannot_handle;
15645 if ((segoffs >> 16) == BIOSSEG)
15646 diff -urNp linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S
15647 --- linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S 2011-05-19 00:06:34.000000000 -0400
15648 +++ linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S 2011-08-05 19:44:35.000000000 -0400
15649 @@ -26,6 +26,13 @@
15650 #include <asm/page_types.h>
15651 #include <asm/cache.h>
15652 #include <asm/boot.h>
15653 +#include <asm/segment.h>
15654 +
15655 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15656 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
15657 +#else
15658 +#define __KERNEL_TEXT_OFFSET 0
15659 +#endif
15660
15661 #undef i386 /* in case the preprocessor is a 32bit one */
15662
15663 @@ -34,11 +41,9 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
15664 #ifdef CONFIG_X86_32
15665 OUTPUT_ARCH(i386)
15666 ENTRY(phys_startup_32)
15667 -jiffies = jiffies_64;
15668 #else
15669 OUTPUT_ARCH(i386:x86-64)
15670 ENTRY(phys_startup_64)
15671 -jiffies_64 = jiffies;
15672 #endif
15673
15674 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
15675 @@ -69,31 +74,46 @@ jiffies_64 = jiffies;
15676
15677 PHDRS {
15678 text PT_LOAD FLAGS(5); /* R_E */
15679 +#ifdef CONFIG_X86_32
15680 + module PT_LOAD FLAGS(5); /* R_E */
15681 +#endif
15682 +#ifdef CONFIG_XEN
15683 + rodata PT_LOAD FLAGS(5); /* R_E */
15684 +#else
15685 + rodata PT_LOAD FLAGS(4); /* R__ */
15686 +#endif
15687 data PT_LOAD FLAGS(6); /* RW_ */
15688 #ifdef CONFIG_X86_64
15689 user PT_LOAD FLAGS(5); /* R_E */
15690 +#endif
15691 + init.begin PT_LOAD FLAGS(6); /* RW_ */
15692 #ifdef CONFIG_SMP
15693 percpu PT_LOAD FLAGS(6); /* RW_ */
15694 #endif
15695 + text.init PT_LOAD FLAGS(5); /* R_E */
15696 + text.exit PT_LOAD FLAGS(5); /* R_E */
15697 init PT_LOAD FLAGS(7); /* RWE */
15698 -#endif
15699 note PT_NOTE FLAGS(0); /* ___ */
15700 }
15701
15702 SECTIONS
15703 {
15704 #ifdef CONFIG_X86_32
15705 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
15706 - phys_startup_32 = startup_32 - LOAD_OFFSET;
15707 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
15708 #else
15709 - . = __START_KERNEL;
15710 - phys_startup_64 = startup_64 - LOAD_OFFSET;
15711 + . = __START_KERNEL;
15712 #endif
15713
15714 /* Text and read-only data */
15715 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
15716 - _text = .;
15717 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15718 /* bootstrapping code */
15719 +#ifdef CONFIG_X86_32
15720 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15721 +#else
15722 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15723 +#endif
15724 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15725 + _text = .;
15726 HEAD_TEXT
15727 #ifdef CONFIG_X86_32
15728 . = ALIGN(PAGE_SIZE);
15729 @@ -109,13 +129,47 @@ SECTIONS
15730 IRQENTRY_TEXT
15731 *(.fixup)
15732 *(.gnu.warning)
15733 - /* End of text section */
15734 - _etext = .;
15735 } :text = 0x9090
15736
15737 - NOTES :text :note
15738 + . += __KERNEL_TEXT_OFFSET;
15739 +
15740 +#ifdef CONFIG_X86_32
15741 + . = ALIGN(PAGE_SIZE);
15742 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
15743 +
15744 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
15745 + MODULES_EXEC_VADDR = .;
15746 + BYTE(0)
15747 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
15748 + . = ALIGN(HPAGE_SIZE);
15749 + MODULES_EXEC_END = . - 1;
15750 +#endif
15751 +
15752 + } :module
15753 +#endif
15754 +
15755 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
15756 + /* End of text section */
15757 + _etext = . - __KERNEL_TEXT_OFFSET;
15758 + }
15759
15760 - EXCEPTION_TABLE(16) :text = 0x9090
15761 +#ifdef CONFIG_X86_32
15762 + . = ALIGN(PAGE_SIZE);
15763 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
15764 + *(.idt)
15765 + . = ALIGN(PAGE_SIZE);
15766 + *(.empty_zero_page)
15767 + *(.initial_pg_fixmap)
15768 + *(.initial_pg_pmd)
15769 + *(.initial_page_table)
15770 + *(.swapper_pg_dir)
15771 + } :rodata
15772 +#endif
15773 +
15774 + . = ALIGN(PAGE_SIZE);
15775 + NOTES :rodata :note
15776 +
15777 + EXCEPTION_TABLE(16) :rodata
15778
15779 #if defined(CONFIG_DEBUG_RODATA)
15780 /* .text should occupy whole number of pages */
15781 @@ -127,16 +181,20 @@ SECTIONS
15782
15783 /* Data */
15784 .data : AT(ADDR(.data) - LOAD_OFFSET) {
15785 +
15786 +#ifdef CONFIG_PAX_KERNEXEC
15787 + . = ALIGN(HPAGE_SIZE);
15788 +#else
15789 + . = ALIGN(PAGE_SIZE);
15790 +#endif
15791 +
15792 /* Start of data section */
15793 _sdata = .;
15794
15795 /* init_task */
15796 INIT_TASK_DATA(THREAD_SIZE)
15797
15798 -#ifdef CONFIG_X86_32
15799 - /* 32 bit has nosave before _edata */
15800 NOSAVE_DATA
15801 -#endif
15802
15803 PAGE_ALIGNED_DATA(PAGE_SIZE)
15804
15805 @@ -145,6 +203,8 @@ SECTIONS
15806 DATA_DATA
15807 CONSTRUCTORS
15808
15809 + jiffies = jiffies_64;
15810 +
15811 /* rarely changed data like cpu maps */
15812 READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
15813
15814 @@ -199,12 +259,6 @@ SECTIONS
15815 }
15816 vgetcpu_mode = VVIRT(.vgetcpu_mode);
15817
15818 - . = ALIGN(L1_CACHE_BYTES);
15819 - .jiffies : AT(VLOAD(.jiffies)) {
15820 - *(.jiffies)
15821 - }
15822 - jiffies = VVIRT(.jiffies);
15823 -
15824 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
15825 *(.vsyscall_3)
15826 }
15827 @@ -220,12 +274,19 @@ SECTIONS
15828 #endif /* CONFIG_X86_64 */
15829
15830 /* Init code and data - will be freed after init */
15831 - . = ALIGN(PAGE_SIZE);
15832 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
15833 + BYTE(0)
15834 +
15835 +#ifdef CONFIG_PAX_KERNEXEC
15836 + . = ALIGN(HPAGE_SIZE);
15837 +#else
15838 + . = ALIGN(PAGE_SIZE);
15839 +#endif
15840 +
15841 __init_begin = .; /* paired with __init_end */
15842 - }
15843 + } :init.begin
15844
15845 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
15846 +#ifdef CONFIG_SMP
15847 /*
15848 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
15849 * output PHDR, so the next output section - .init.text - should
15850 @@ -234,12 +295,27 @@ SECTIONS
15851 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
15852 #endif
15853
15854 - INIT_TEXT_SECTION(PAGE_SIZE)
15855 -#ifdef CONFIG_X86_64
15856 - :init
15857 -#endif
15858 + . = ALIGN(PAGE_SIZE);
15859 + init_begin = .;
15860 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
15861 + VMLINUX_SYMBOL(_sinittext) = .;
15862 + INIT_TEXT
15863 + VMLINUX_SYMBOL(_einittext) = .;
15864 + . = ALIGN(PAGE_SIZE);
15865 + } :text.init
15866
15867 - INIT_DATA_SECTION(16)
15868 + /*
15869 + * .exit.text is discard at runtime, not link time, to deal with
15870 + * references from .altinstructions and .eh_frame
15871 + */
15872 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15873 + EXIT_TEXT
15874 + . = ALIGN(16);
15875 + } :text.exit
15876 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
15877 +
15878 + . = ALIGN(PAGE_SIZE);
15879 + INIT_DATA_SECTION(16) :init
15880
15881 /*
15882 * Code and data for a variety of lowlevel trampolines, to be
15883 @@ -306,19 +382,12 @@ SECTIONS
15884 }
15885
15886 . = ALIGN(8);
15887 - /*
15888 - * .exit.text is discard at runtime, not link time, to deal with
15889 - * references from .altinstructions and .eh_frame
15890 - */
15891 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
15892 - EXIT_TEXT
15893 - }
15894
15895 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
15896 EXIT_DATA
15897 }
15898
15899 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
15900 +#ifndef CONFIG_SMP
15901 PERCPU(INTERNODE_CACHE_BYTES, PAGE_SIZE)
15902 #endif
15903
15904 @@ -337,16 +406,10 @@ SECTIONS
15905 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
15906 __smp_locks = .;
15907 *(.smp_locks)
15908 - . = ALIGN(PAGE_SIZE);
15909 __smp_locks_end = .;
15910 + . = ALIGN(PAGE_SIZE);
15911 }
15912
15913 -#ifdef CONFIG_X86_64
15914 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
15915 - NOSAVE_DATA
15916 - }
15917 -#endif
15918 -
15919 /* BSS */
15920 . = ALIGN(PAGE_SIZE);
15921 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
15922 @@ -362,6 +425,7 @@ SECTIONS
15923 __brk_base = .;
15924 . += 64 * 1024; /* 64k alignment slop space */
15925 *(.brk_reservation) /* areas brk users have reserved */
15926 + . = ALIGN(HPAGE_SIZE);
15927 __brk_limit = .;
15928 }
15929
15930 @@ -388,13 +452,12 @@ SECTIONS
15931 * for the boot processor.
15932 */
15933 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
15934 -INIT_PER_CPU(gdt_page);
15935 INIT_PER_CPU(irq_stack_union);
15936
15937 /*
15938 * Build-time check on the image size:
15939 */
15940 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
15941 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
15942 "kernel image bigger than KERNEL_IMAGE_SIZE");
15943
15944 #ifdef CONFIG_SMP
15945 diff -urNp linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c
15946 --- linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c 2011-05-19 00:06:34.000000000 -0400
15947 +++ linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c 2011-08-05 19:44:35.000000000 -0400
15948 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
15949
15950 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
15951 /* copy vsyscall data */
15952 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
15953 vsyscall_gtod_data.clock.vread = clock->vread;
15954 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
15955 vsyscall_gtod_data.clock.mask = clock->mask;
15956 @@ -208,7 +209,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
15957 We do this here because otherwise user space would do it on
15958 its own in a likely inferior way (no access to jiffies).
15959 If you don't like it pass NULL. */
15960 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
15961 + if (tcache && tcache->blob[0] == (j = jiffies)) {
15962 p = tcache->blob[1];
15963 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
15964 /* Load per CPU data from RDTSCP */
15965 diff -urNp linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c
15966 --- linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c 2011-05-19 00:06:34.000000000 -0400
15967 +++ linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c 2011-08-05 19:44:35.000000000 -0400
15968 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
15969 EXPORT_SYMBOL(copy_user_generic_string);
15970 EXPORT_SYMBOL(copy_user_generic_unrolled);
15971 EXPORT_SYMBOL(__copy_user_nocache);
15972 -EXPORT_SYMBOL(_copy_from_user);
15973 -EXPORT_SYMBOL(_copy_to_user);
15974
15975 EXPORT_SYMBOL(copy_page);
15976 EXPORT_SYMBOL(clear_page);
15977 diff -urNp linux-2.6.39.4/arch/x86/kernel/xsave.c linux-2.6.39.4/arch/x86/kernel/xsave.c
15978 --- linux-2.6.39.4/arch/x86/kernel/xsave.c 2011-05-19 00:06:34.000000000 -0400
15979 +++ linux-2.6.39.4/arch/x86/kernel/xsave.c 2011-08-05 19:44:35.000000000 -0400
15980 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
15981 fx_sw_user->xstate_size > fx_sw_user->extended_size)
15982 return -EINVAL;
15983
15984 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
15985 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
15986 fx_sw_user->extended_size -
15987 FP_XSTATE_MAGIC2_SIZE));
15988 if (err)
15989 @@ -267,7 +267,7 @@ fx_only:
15990 * the other extended state.
15991 */
15992 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
15993 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
15994 + return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
15995 }
15996
15997 /*
15998 @@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
15999 if (use_xsave())
16000 err = restore_user_xstate(buf);
16001 else
16002 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
16003 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
16004 buf);
16005 if (unlikely(err)) {
16006 /*
16007 diff -urNp linux-2.6.39.4/arch/x86/kvm/emulate.c linux-2.6.39.4/arch/x86/kvm/emulate.c
16008 --- linux-2.6.39.4/arch/x86/kvm/emulate.c 2011-05-19 00:06:34.000000000 -0400
16009 +++ linux-2.6.39.4/arch/x86/kvm/emulate.c 2011-08-05 19:44:35.000000000 -0400
16010 @@ -89,7 +89,7 @@
16011 #define Src2ImmByte (2<<29)
16012 #define Src2One (3<<29)
16013 #define Src2Imm (4<<29)
16014 -#define Src2Mask (7<<29)
16015 +#define Src2Mask (7U<<29)
16016
16017 #define X2(x...) x, x
16018 #define X3(x...) X2(x), x
16019 @@ -190,6 +190,7 @@ struct group_dual {
16020
16021 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
16022 do { \
16023 + unsigned long _tmp; \
16024 __asm__ __volatile__ ( \
16025 _PRE_EFLAGS("0", "4", "2") \
16026 _op _suffix " %"_x"3,%1; " \
16027 @@ -203,8 +204,6 @@ struct group_dual {
16028 /* Raw emulation: instruction has two explicit operands. */
16029 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
16030 do { \
16031 - unsigned long _tmp; \
16032 - \
16033 switch ((_dst).bytes) { \
16034 case 2: \
16035 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
16036 @@ -220,7 +219,6 @@ struct group_dual {
16037
16038 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
16039 do { \
16040 - unsigned long _tmp; \
16041 switch ((_dst).bytes) { \
16042 case 1: \
16043 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
16044 diff -urNp linux-2.6.39.4/arch/x86/kvm/lapic.c linux-2.6.39.4/arch/x86/kvm/lapic.c
16045 --- linux-2.6.39.4/arch/x86/kvm/lapic.c 2011-05-19 00:06:34.000000000 -0400
16046 +++ linux-2.6.39.4/arch/x86/kvm/lapic.c 2011-08-05 19:44:35.000000000 -0400
16047 @@ -53,7 +53,7 @@
16048 #define APIC_BUS_CYCLE_NS 1
16049
16050 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
16051 -#define apic_debug(fmt, arg...)
16052 +#define apic_debug(fmt, arg...) do {} while (0)
16053
16054 #define APIC_LVT_NUM 6
16055 /* 14 is the version for Xeon and Pentium 8.4.8*/
16056 diff -urNp linux-2.6.39.4/arch/x86/kvm/mmu.c linux-2.6.39.4/arch/x86/kvm/mmu.c
16057 --- linux-2.6.39.4/arch/x86/kvm/mmu.c 2011-05-19 00:06:34.000000000 -0400
16058 +++ linux-2.6.39.4/arch/x86/kvm/mmu.c 2011-08-05 19:44:35.000000000 -0400
16059 @@ -3240,7 +3240,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16060
16061 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
16062
16063 - invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
16064 + invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
16065
16066 /*
16067 * Assume that the pte write on a page table of the same type
16068 @@ -3275,7 +3275,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16069 smp_rmb();
16070
16071 spin_lock(&vcpu->kvm->mmu_lock);
16072 - if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16073 + if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16074 gentry = 0;
16075 kvm_mmu_free_some_pages(vcpu);
16076 ++vcpu->kvm->stat.mmu_pte_write;
16077 diff -urNp linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h
16078 --- linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h 2011-05-19 00:06:34.000000000 -0400
16079 +++ linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h 2011-08-05 19:44:35.000000000 -0400
16080 @@ -552,6 +552,8 @@ static int FNAME(page_fault)(struct kvm_
16081 unsigned long mmu_seq;
16082 bool map_writable;
16083
16084 + pax_track_stack();
16085 +
16086 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
16087
16088 r = mmu_topup_memory_caches(vcpu);
16089 @@ -672,7 +674,7 @@ static void FNAME(invlpg)(struct kvm_vcp
16090 if (need_flush)
16091 kvm_flush_remote_tlbs(vcpu->kvm);
16092
16093 - atomic_inc(&vcpu->kvm->arch.invlpg_counter);
16094 + atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
16095
16096 spin_unlock(&vcpu->kvm->mmu_lock);
16097
16098 diff -urNp linux-2.6.39.4/arch/x86/kvm/svm.c linux-2.6.39.4/arch/x86/kvm/svm.c
16099 --- linux-2.6.39.4/arch/x86/kvm/svm.c 2011-05-19 00:06:34.000000000 -0400
16100 +++ linux-2.6.39.4/arch/x86/kvm/svm.c 2011-08-05 20:34:06.000000000 -0400
16101 @@ -3278,7 +3278,11 @@ static void reload_tss(struct kvm_vcpu *
16102 int cpu = raw_smp_processor_id();
16103
16104 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
16105 +
16106 + pax_open_kernel();
16107 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
16108 + pax_close_kernel();
16109 +
16110 load_TR_desc();
16111 }
16112
16113 @@ -3656,6 +3660,10 @@ static void svm_vcpu_run(struct kvm_vcpu
16114 #endif
16115 #endif
16116
16117 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16118 + __set_fs(current_thread_info()->addr_limit);
16119 +#endif
16120 +
16121 reload_tss(vcpu);
16122
16123 local_irq_disable();
16124 diff -urNp linux-2.6.39.4/arch/x86/kvm/vmx.c linux-2.6.39.4/arch/x86/kvm/vmx.c
16125 --- linux-2.6.39.4/arch/x86/kvm/vmx.c 2011-05-19 00:06:34.000000000 -0400
16126 +++ linux-2.6.39.4/arch/x86/kvm/vmx.c 2011-08-05 20:34:06.000000000 -0400
16127 @@ -725,7 +725,11 @@ static void reload_tss(void)
16128 struct desc_struct *descs;
16129
16130 descs = (void *)gdt->address;
16131 +
16132 + pax_open_kernel();
16133 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
16134 + pax_close_kernel();
16135 +
16136 load_TR_desc();
16137 }
16138
16139 @@ -1648,8 +1652,11 @@ static __init int hardware_setup(void)
16140 if (!cpu_has_vmx_flexpriority())
16141 flexpriority_enabled = 0;
16142
16143 - if (!cpu_has_vmx_tpr_shadow())
16144 - kvm_x86_ops->update_cr8_intercept = NULL;
16145 + if (!cpu_has_vmx_tpr_shadow()) {
16146 + pax_open_kernel();
16147 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
16148 + pax_close_kernel();
16149 + }
16150
16151 if (enable_ept && !cpu_has_vmx_ept_2m_page())
16152 kvm_disable_largepages();
16153 @@ -2693,7 +2700,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
16154 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
16155
16156 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
16157 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
16158 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
16159 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
16160 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
16161 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
16162 @@ -4068,6 +4075,12 @@ static void __noclone vmx_vcpu_run(struc
16163 "jmp .Lkvm_vmx_return \n\t"
16164 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
16165 ".Lkvm_vmx_return: "
16166 +
16167 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16168 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
16169 + ".Lkvm_vmx_return2: "
16170 +#endif
16171 +
16172 /* Save guest registers, load host registers, keep flags */
16173 "mov %0, %c[wordsize](%%"R"sp) \n\t"
16174 "pop %0 \n\t"
16175 @@ -4116,6 +4129,11 @@ static void __noclone vmx_vcpu_run(struc
16176 #endif
16177 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
16178 [wordsize]"i"(sizeof(ulong))
16179 +
16180 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16181 + ,[cs]"i"(__KERNEL_CS)
16182 +#endif
16183 +
16184 : "cc", "memory"
16185 , R"ax", R"bx", R"di", R"si"
16186 #ifdef CONFIG_X86_64
16187 @@ -4130,7 +4148,16 @@ static void __noclone vmx_vcpu_run(struc
16188
16189 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
16190
16191 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
16192 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
16193 +
16194 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16195 + loadsegment(fs, __KERNEL_PERCPU);
16196 +#endif
16197 +
16198 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16199 + __set_fs(current_thread_info()->addr_limit);
16200 +#endif
16201 +
16202 vmx->launched = 1;
16203
16204 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
16205 diff -urNp linux-2.6.39.4/arch/x86/kvm/x86.c linux-2.6.39.4/arch/x86/kvm/x86.c
16206 --- linux-2.6.39.4/arch/x86/kvm/x86.c 2011-05-19 00:06:34.000000000 -0400
16207 +++ linux-2.6.39.4/arch/x86/kvm/x86.c 2011-08-05 20:34:06.000000000 -0400
16208 @@ -2050,6 +2050,8 @@ long kvm_arch_dev_ioctl(struct file *fil
16209 if (n < msr_list.nmsrs)
16210 goto out;
16211 r = -EFAULT;
16212 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
16213 + goto out;
16214 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
16215 num_msrs_to_save * sizeof(u32)))
16216 goto out;
16217 @@ -2217,15 +2219,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
16218 struct kvm_cpuid2 *cpuid,
16219 struct kvm_cpuid_entry2 __user *entries)
16220 {
16221 - int r;
16222 + int r, i;
16223
16224 r = -E2BIG;
16225 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
16226 goto out;
16227 r = -EFAULT;
16228 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
16229 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16230 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16231 goto out;
16232 + for (i = 0; i < cpuid->nent; ++i) {
16233 + struct kvm_cpuid_entry2 cpuid_entry;
16234 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
16235 + goto out;
16236 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
16237 + }
16238 vcpu->arch.cpuid_nent = cpuid->nent;
16239 kvm_apic_set_version(vcpu);
16240 kvm_x86_ops->cpuid_update(vcpu);
16241 @@ -2240,15 +2247,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
16242 struct kvm_cpuid2 *cpuid,
16243 struct kvm_cpuid_entry2 __user *entries)
16244 {
16245 - int r;
16246 + int r, i;
16247
16248 r = -E2BIG;
16249 if (cpuid->nent < vcpu->arch.cpuid_nent)
16250 goto out;
16251 r = -EFAULT;
16252 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
16253 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16254 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16255 goto out;
16256 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
16257 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
16258 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
16259 + goto out;
16260 + }
16261 return 0;
16262
16263 out:
16264 @@ -2526,7 +2537,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
16265 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
16266 struct kvm_interrupt *irq)
16267 {
16268 - if (irq->irq < 0 || irq->irq >= 256)
16269 + if (irq->irq >= 256)
16270 return -EINVAL;
16271 if (irqchip_in_kernel(vcpu->kvm))
16272 return -ENXIO;
16273 @@ -4690,7 +4701,7 @@ void kvm_after_handle_nmi(struct kvm_vcp
16274 }
16275 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
16276
16277 -int kvm_arch_init(void *opaque)
16278 +int kvm_arch_init(const void *opaque)
16279 {
16280 int r;
16281 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
16282 diff -urNp linux-2.6.39.4/arch/x86/lguest/boot.c linux-2.6.39.4/arch/x86/lguest/boot.c
16283 --- linux-2.6.39.4/arch/x86/lguest/boot.c 2011-06-25 12:55:22.000000000 -0400
16284 +++ linux-2.6.39.4/arch/x86/lguest/boot.c 2011-08-05 20:34:06.000000000 -0400
16285 @@ -1178,9 +1178,10 @@ static __init int early_put_chars(u32 vt
16286 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
16287 * Launcher to reboot us.
16288 */
16289 -static void lguest_restart(char *reason)
16290 +static __noreturn void lguest_restart(char *reason)
16291 {
16292 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
16293 + BUG();
16294 }
16295
16296 /*G:050
16297 diff -urNp linux-2.6.39.4/arch/x86/lib/atomic64_32.c linux-2.6.39.4/arch/x86/lib/atomic64_32.c
16298 --- linux-2.6.39.4/arch/x86/lib/atomic64_32.c 2011-05-19 00:06:34.000000000 -0400
16299 +++ linux-2.6.39.4/arch/x86/lib/atomic64_32.c 2011-08-05 19:44:35.000000000 -0400
16300 @@ -8,18 +8,30 @@
16301
16302 long long atomic64_read_cx8(long long, const atomic64_t *v);
16303 EXPORT_SYMBOL(atomic64_read_cx8);
16304 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16305 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
16306 long long atomic64_set_cx8(long long, const atomic64_t *v);
16307 EXPORT_SYMBOL(atomic64_set_cx8);
16308 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16309 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
16310 long long atomic64_xchg_cx8(long long, unsigned high);
16311 EXPORT_SYMBOL(atomic64_xchg_cx8);
16312 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
16313 EXPORT_SYMBOL(atomic64_add_return_cx8);
16314 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16315 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
16316 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
16317 EXPORT_SYMBOL(atomic64_sub_return_cx8);
16318 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16319 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
16320 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
16321 EXPORT_SYMBOL(atomic64_inc_return_cx8);
16322 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16323 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
16324 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
16325 EXPORT_SYMBOL(atomic64_dec_return_cx8);
16326 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16327 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
16328 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
16329 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
16330 int atomic64_inc_not_zero_cx8(atomic64_t *v);
16331 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
16332 #ifndef CONFIG_X86_CMPXCHG64
16333 long long atomic64_read_386(long long, const atomic64_t *v);
16334 EXPORT_SYMBOL(atomic64_read_386);
16335 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
16336 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
16337 long long atomic64_set_386(long long, const atomic64_t *v);
16338 EXPORT_SYMBOL(atomic64_set_386);
16339 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
16340 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
16341 long long atomic64_xchg_386(long long, unsigned high);
16342 EXPORT_SYMBOL(atomic64_xchg_386);
16343 long long atomic64_add_return_386(long long a, atomic64_t *v);
16344 EXPORT_SYMBOL(atomic64_add_return_386);
16345 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16346 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
16347 long long atomic64_sub_return_386(long long a, atomic64_t *v);
16348 EXPORT_SYMBOL(atomic64_sub_return_386);
16349 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16350 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
16351 long long atomic64_inc_return_386(long long a, atomic64_t *v);
16352 EXPORT_SYMBOL(atomic64_inc_return_386);
16353 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16354 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
16355 long long atomic64_dec_return_386(long long a, atomic64_t *v);
16356 EXPORT_SYMBOL(atomic64_dec_return_386);
16357 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16358 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
16359 long long atomic64_add_386(long long a, atomic64_t *v);
16360 EXPORT_SYMBOL(atomic64_add_386);
16361 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
16362 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
16363 long long atomic64_sub_386(long long a, atomic64_t *v);
16364 EXPORT_SYMBOL(atomic64_sub_386);
16365 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
16366 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
16367 long long atomic64_inc_386(long long a, atomic64_t *v);
16368 EXPORT_SYMBOL(atomic64_inc_386);
16369 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
16370 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
16371 long long atomic64_dec_386(long long a, atomic64_t *v);
16372 EXPORT_SYMBOL(atomic64_dec_386);
16373 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
16374 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
16375 long long atomic64_dec_if_positive_386(atomic64_t *v);
16376 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
16377 int atomic64_inc_not_zero_386(atomic64_t *v);
16378 diff -urNp linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S
16379 --- linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S 2011-05-19 00:06:34.000000000 -0400
16380 +++ linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S 2011-08-05 19:44:35.000000000 -0400
16381 @@ -48,6 +48,10 @@ BEGIN(read)
16382 movl (v), %eax
16383 movl 4(v), %edx
16384 RET_ENDP
16385 +BEGIN(read_unchecked)
16386 + movl (v), %eax
16387 + movl 4(v), %edx
16388 +RET_ENDP
16389 #undef v
16390
16391 #define v %esi
16392 @@ -55,6 +59,10 @@ BEGIN(set)
16393 movl %ebx, (v)
16394 movl %ecx, 4(v)
16395 RET_ENDP
16396 +BEGIN(set_unchecked)
16397 + movl %ebx, (v)
16398 + movl %ecx, 4(v)
16399 +RET_ENDP
16400 #undef v
16401
16402 #define v %esi
16403 @@ -70,6 +78,20 @@ RET_ENDP
16404 BEGIN(add)
16405 addl %eax, (v)
16406 adcl %edx, 4(v)
16407 +
16408 +#ifdef CONFIG_PAX_REFCOUNT
16409 + jno 0f
16410 + subl %eax, (v)
16411 + sbbl %edx, 4(v)
16412 + int $4
16413 +0:
16414 + _ASM_EXTABLE(0b, 0b)
16415 +#endif
16416 +
16417 +RET_ENDP
16418 +BEGIN(add_unchecked)
16419 + addl %eax, (v)
16420 + adcl %edx, 4(v)
16421 RET_ENDP
16422 #undef v
16423
16424 @@ -77,6 +99,24 @@ RET_ENDP
16425 BEGIN(add_return)
16426 addl (v), %eax
16427 adcl 4(v), %edx
16428 +
16429 +#ifdef CONFIG_PAX_REFCOUNT
16430 + into
16431 +1234:
16432 + _ASM_EXTABLE(1234b, 2f)
16433 +#endif
16434 +
16435 + movl %eax, (v)
16436 + movl %edx, 4(v)
16437 +
16438 +#ifdef CONFIG_PAX_REFCOUNT
16439 +2:
16440 +#endif
16441 +
16442 +RET_ENDP
16443 +BEGIN(add_return_unchecked)
16444 + addl (v), %eax
16445 + adcl 4(v), %edx
16446 movl %eax, (v)
16447 movl %edx, 4(v)
16448 RET_ENDP
16449 @@ -86,6 +126,20 @@ RET_ENDP
16450 BEGIN(sub)
16451 subl %eax, (v)
16452 sbbl %edx, 4(v)
16453 +
16454 +#ifdef CONFIG_PAX_REFCOUNT
16455 + jno 0f
16456 + addl %eax, (v)
16457 + adcl %edx, 4(v)
16458 + int $4
16459 +0:
16460 + _ASM_EXTABLE(0b, 0b)
16461 +#endif
16462 +
16463 +RET_ENDP
16464 +BEGIN(sub_unchecked)
16465 + subl %eax, (v)
16466 + sbbl %edx, 4(v)
16467 RET_ENDP
16468 #undef v
16469
16470 @@ -96,6 +150,27 @@ BEGIN(sub_return)
16471 sbbl $0, %edx
16472 addl (v), %eax
16473 adcl 4(v), %edx
16474 +
16475 +#ifdef CONFIG_PAX_REFCOUNT
16476 + into
16477 +1234:
16478 + _ASM_EXTABLE(1234b, 2f)
16479 +#endif
16480 +
16481 + movl %eax, (v)
16482 + movl %edx, 4(v)
16483 +
16484 +#ifdef CONFIG_PAX_REFCOUNT
16485 +2:
16486 +#endif
16487 +
16488 +RET_ENDP
16489 +BEGIN(sub_return_unchecked)
16490 + negl %edx
16491 + negl %eax
16492 + sbbl $0, %edx
16493 + addl (v), %eax
16494 + adcl 4(v), %edx
16495 movl %eax, (v)
16496 movl %edx, 4(v)
16497 RET_ENDP
16498 @@ -105,6 +180,20 @@ RET_ENDP
16499 BEGIN(inc)
16500 addl $1, (v)
16501 adcl $0, 4(v)
16502 +
16503 +#ifdef CONFIG_PAX_REFCOUNT
16504 + jno 0f
16505 + subl $1, (v)
16506 + sbbl $0, 4(v)
16507 + int $4
16508 +0:
16509 + _ASM_EXTABLE(0b, 0b)
16510 +#endif
16511 +
16512 +RET_ENDP
16513 +BEGIN(inc_unchecked)
16514 + addl $1, (v)
16515 + adcl $0, 4(v)
16516 RET_ENDP
16517 #undef v
16518
16519 @@ -114,6 +203,26 @@ BEGIN(inc_return)
16520 movl 4(v), %edx
16521 addl $1, %eax
16522 adcl $0, %edx
16523 +
16524 +#ifdef CONFIG_PAX_REFCOUNT
16525 + into
16526 +1234:
16527 + _ASM_EXTABLE(1234b, 2f)
16528 +#endif
16529 +
16530 + movl %eax, (v)
16531 + movl %edx, 4(v)
16532 +
16533 +#ifdef CONFIG_PAX_REFCOUNT
16534 +2:
16535 +#endif
16536 +
16537 +RET_ENDP
16538 +BEGIN(inc_return_unchecked)
16539 + movl (v), %eax
16540 + movl 4(v), %edx
16541 + addl $1, %eax
16542 + adcl $0, %edx
16543 movl %eax, (v)
16544 movl %edx, 4(v)
16545 RET_ENDP
16546 @@ -123,6 +232,20 @@ RET_ENDP
16547 BEGIN(dec)
16548 subl $1, (v)
16549 sbbl $0, 4(v)
16550 +
16551 +#ifdef CONFIG_PAX_REFCOUNT
16552 + jno 0f
16553 + addl $1, (v)
16554 + adcl $0, 4(v)
16555 + int $4
16556 +0:
16557 + _ASM_EXTABLE(0b, 0b)
16558 +#endif
16559 +
16560 +RET_ENDP
16561 +BEGIN(dec_unchecked)
16562 + subl $1, (v)
16563 + sbbl $0, 4(v)
16564 RET_ENDP
16565 #undef v
16566
16567 @@ -132,6 +255,26 @@ BEGIN(dec_return)
16568 movl 4(v), %edx
16569 subl $1, %eax
16570 sbbl $0, %edx
16571 +
16572 +#ifdef CONFIG_PAX_REFCOUNT
16573 + into
16574 +1234:
16575 + _ASM_EXTABLE(1234b, 2f)
16576 +#endif
16577 +
16578 + movl %eax, (v)
16579 + movl %edx, 4(v)
16580 +
16581 +#ifdef CONFIG_PAX_REFCOUNT
16582 +2:
16583 +#endif
16584 +
16585 +RET_ENDP
16586 +BEGIN(dec_return_unchecked)
16587 + movl (v), %eax
16588 + movl 4(v), %edx
16589 + subl $1, %eax
16590 + sbbl $0, %edx
16591 movl %eax, (v)
16592 movl %edx, 4(v)
16593 RET_ENDP
16594 @@ -143,6 +286,13 @@ BEGIN(add_unless)
16595 adcl %edx, %edi
16596 addl (v), %eax
16597 adcl 4(v), %edx
16598 +
16599 +#ifdef CONFIG_PAX_REFCOUNT
16600 + into
16601 +1234:
16602 + _ASM_EXTABLE(1234b, 2f)
16603 +#endif
16604 +
16605 cmpl %eax, %esi
16606 je 3f
16607 1:
16608 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
16609 1:
16610 addl $1, %eax
16611 adcl $0, %edx
16612 +
16613 +#ifdef CONFIG_PAX_REFCOUNT
16614 + into
16615 +1234:
16616 + _ASM_EXTABLE(1234b, 2f)
16617 +#endif
16618 +
16619 movl %eax, (v)
16620 movl %edx, 4(v)
16621 movl $1, %eax
16622 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
16623 movl 4(v), %edx
16624 subl $1, %eax
16625 sbbl $0, %edx
16626 +
16627 +#ifdef CONFIG_PAX_REFCOUNT
16628 + into
16629 +1234:
16630 + _ASM_EXTABLE(1234b, 1f)
16631 +#endif
16632 +
16633 js 1f
16634 movl %eax, (v)
16635 movl %edx, 4(v)
16636 diff -urNp linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S
16637 --- linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S 2011-05-19 00:06:34.000000000 -0400
16638 +++ linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S 2011-08-05 19:44:35.000000000 -0400
16639 @@ -39,6 +39,14 @@ ENTRY(atomic64_read_cx8)
16640 CFI_ENDPROC
16641 ENDPROC(atomic64_read_cx8)
16642
16643 +ENTRY(atomic64_read_unchecked_cx8)
16644 + CFI_STARTPROC
16645 +
16646 + read64 %ecx
16647 + ret
16648 + CFI_ENDPROC
16649 +ENDPROC(atomic64_read_unchecked_cx8)
16650 +
16651 ENTRY(atomic64_set_cx8)
16652 CFI_STARTPROC
16653
16654 @@ -52,6 +60,19 @@ ENTRY(atomic64_set_cx8)
16655 CFI_ENDPROC
16656 ENDPROC(atomic64_set_cx8)
16657
16658 +ENTRY(atomic64_set_unchecked_cx8)
16659 + CFI_STARTPROC
16660 +
16661 +1:
16662 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
16663 + * are atomic on 586 and newer */
16664 + cmpxchg8b (%esi)
16665 + jne 1b
16666 +
16667 + ret
16668 + CFI_ENDPROC
16669 +ENDPROC(atomic64_set_unchecked_cx8)
16670 +
16671 ENTRY(atomic64_xchg_cx8)
16672 CFI_STARTPROC
16673
16674 @@ -66,8 +87,8 @@ ENTRY(atomic64_xchg_cx8)
16675 CFI_ENDPROC
16676 ENDPROC(atomic64_xchg_cx8)
16677
16678 -.macro addsub_return func ins insc
16679 -ENTRY(atomic64_\func\()_return_cx8)
16680 +.macro addsub_return func ins insc unchecked=""
16681 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16682 CFI_STARTPROC
16683 SAVE ebp
16684 SAVE ebx
16685 @@ -84,27 +105,43 @@ ENTRY(atomic64_\func\()_return_cx8)
16686 movl %edx, %ecx
16687 \ins\()l %esi, %ebx
16688 \insc\()l %edi, %ecx
16689 +
16690 +.ifb \unchecked
16691 +#ifdef CONFIG_PAX_REFCOUNT
16692 + into
16693 +2:
16694 + _ASM_EXTABLE(2b, 3f)
16695 +#endif
16696 +.endif
16697 +
16698 LOCK_PREFIX
16699 cmpxchg8b (%ebp)
16700 jne 1b
16701 -
16702 -10:
16703 movl %ebx, %eax
16704 movl %ecx, %edx
16705 +
16706 +.ifb \unchecked
16707 +#ifdef CONFIG_PAX_REFCOUNT
16708 +3:
16709 +#endif
16710 +.endif
16711 +
16712 RESTORE edi
16713 RESTORE esi
16714 RESTORE ebx
16715 RESTORE ebp
16716 ret
16717 CFI_ENDPROC
16718 -ENDPROC(atomic64_\func\()_return_cx8)
16719 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16720 .endm
16721
16722 addsub_return add add adc
16723 addsub_return sub sub sbb
16724 +addsub_return add add adc _unchecked
16725 +addsub_return sub sub sbb _unchecked
16726
16727 -.macro incdec_return func ins insc
16728 -ENTRY(atomic64_\func\()_return_cx8)
16729 +.macro incdec_return func ins insc unchecked
16730 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16731 CFI_STARTPROC
16732 SAVE ebx
16733
16734 @@ -114,21 +151,38 @@ ENTRY(atomic64_\func\()_return_cx8)
16735 movl %edx, %ecx
16736 \ins\()l $1, %ebx
16737 \insc\()l $0, %ecx
16738 +
16739 +.ifb \unchecked
16740 +#ifdef CONFIG_PAX_REFCOUNT
16741 + into
16742 +2:
16743 + _ASM_EXTABLE(2b, 3f)
16744 +#endif
16745 +.endif
16746 +
16747 LOCK_PREFIX
16748 cmpxchg8b (%esi)
16749 jne 1b
16750
16751 -10:
16752 movl %ebx, %eax
16753 movl %ecx, %edx
16754 +
16755 +.ifb \unchecked
16756 +#ifdef CONFIG_PAX_REFCOUNT
16757 +3:
16758 +#endif
16759 +.endif
16760 +
16761 RESTORE ebx
16762 ret
16763 CFI_ENDPROC
16764 -ENDPROC(atomic64_\func\()_return_cx8)
16765 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16766 .endm
16767
16768 incdec_return inc add adc
16769 incdec_return dec sub sbb
16770 +incdec_return inc add adc _unchecked
16771 +incdec_return dec sub sbb _unchecked
16772
16773 ENTRY(atomic64_dec_if_positive_cx8)
16774 CFI_STARTPROC
16775 @@ -140,6 +194,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
16776 movl %edx, %ecx
16777 subl $1, %ebx
16778 sbb $0, %ecx
16779 +
16780 +#ifdef CONFIG_PAX_REFCOUNT
16781 + into
16782 +1234:
16783 + _ASM_EXTABLE(1234b, 2f)
16784 +#endif
16785 +
16786 js 2f
16787 LOCK_PREFIX
16788 cmpxchg8b (%esi)
16789 @@ -174,6 +235,13 @@ ENTRY(atomic64_add_unless_cx8)
16790 movl %edx, %ecx
16791 addl %esi, %ebx
16792 adcl %edi, %ecx
16793 +
16794 +#ifdef CONFIG_PAX_REFCOUNT
16795 + into
16796 +1234:
16797 + _ASM_EXTABLE(1234b, 3f)
16798 +#endif
16799 +
16800 LOCK_PREFIX
16801 cmpxchg8b (%ebp)
16802 jne 1b
16803 @@ -206,6 +274,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
16804 movl %edx, %ecx
16805 addl $1, %ebx
16806 adcl $0, %ecx
16807 +
16808 +#ifdef CONFIG_PAX_REFCOUNT
16809 + into
16810 +1234:
16811 + _ASM_EXTABLE(1234b, 3f)
16812 +#endif
16813 +
16814 LOCK_PREFIX
16815 cmpxchg8b (%esi)
16816 jne 1b
16817 diff -urNp linux-2.6.39.4/arch/x86/lib/checksum_32.S linux-2.6.39.4/arch/x86/lib/checksum_32.S
16818 --- linux-2.6.39.4/arch/x86/lib/checksum_32.S 2011-05-19 00:06:34.000000000 -0400
16819 +++ linux-2.6.39.4/arch/x86/lib/checksum_32.S 2011-08-05 19:44:35.000000000 -0400
16820 @@ -28,7 +28,8 @@
16821 #include <linux/linkage.h>
16822 #include <asm/dwarf2.h>
16823 #include <asm/errno.h>
16824 -
16825 +#include <asm/segment.h>
16826 +
16827 /*
16828 * computes a partial checksum, e.g. for TCP/UDP fragments
16829 */
16830 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
16831
16832 #define ARGBASE 16
16833 #define FP 12
16834 -
16835 -ENTRY(csum_partial_copy_generic)
16836 +
16837 +ENTRY(csum_partial_copy_generic_to_user)
16838 CFI_STARTPROC
16839 +
16840 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16841 + pushl_cfi %gs
16842 + popl_cfi %es
16843 + jmp csum_partial_copy_generic
16844 +#endif
16845 +
16846 +ENTRY(csum_partial_copy_generic_from_user)
16847 +
16848 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16849 + pushl_cfi %gs
16850 + popl_cfi %ds
16851 +#endif
16852 +
16853 +ENTRY(csum_partial_copy_generic)
16854 subl $4,%esp
16855 CFI_ADJUST_CFA_OFFSET 4
16856 pushl_cfi %edi
16857 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
16858 jmp 4f
16859 SRC(1: movw (%esi), %bx )
16860 addl $2, %esi
16861 -DST( movw %bx, (%edi) )
16862 +DST( movw %bx, %es:(%edi) )
16863 addl $2, %edi
16864 addw %bx, %ax
16865 adcl $0, %eax
16866 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
16867 SRC(1: movl (%esi), %ebx )
16868 SRC( movl 4(%esi), %edx )
16869 adcl %ebx, %eax
16870 -DST( movl %ebx, (%edi) )
16871 +DST( movl %ebx, %es:(%edi) )
16872 adcl %edx, %eax
16873 -DST( movl %edx, 4(%edi) )
16874 +DST( movl %edx, %es:4(%edi) )
16875
16876 SRC( movl 8(%esi), %ebx )
16877 SRC( movl 12(%esi), %edx )
16878 adcl %ebx, %eax
16879 -DST( movl %ebx, 8(%edi) )
16880 +DST( movl %ebx, %es:8(%edi) )
16881 adcl %edx, %eax
16882 -DST( movl %edx, 12(%edi) )
16883 +DST( movl %edx, %es:12(%edi) )
16884
16885 SRC( movl 16(%esi), %ebx )
16886 SRC( movl 20(%esi), %edx )
16887 adcl %ebx, %eax
16888 -DST( movl %ebx, 16(%edi) )
16889 +DST( movl %ebx, %es:16(%edi) )
16890 adcl %edx, %eax
16891 -DST( movl %edx, 20(%edi) )
16892 +DST( movl %edx, %es:20(%edi) )
16893
16894 SRC( movl 24(%esi), %ebx )
16895 SRC( movl 28(%esi), %edx )
16896 adcl %ebx, %eax
16897 -DST( movl %ebx, 24(%edi) )
16898 +DST( movl %ebx, %es:24(%edi) )
16899 adcl %edx, %eax
16900 -DST( movl %edx, 28(%edi) )
16901 +DST( movl %edx, %es:28(%edi) )
16902
16903 lea 32(%esi), %esi
16904 lea 32(%edi), %edi
16905 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
16906 shrl $2, %edx # This clears CF
16907 SRC(3: movl (%esi), %ebx )
16908 adcl %ebx, %eax
16909 -DST( movl %ebx, (%edi) )
16910 +DST( movl %ebx, %es:(%edi) )
16911 lea 4(%esi), %esi
16912 lea 4(%edi), %edi
16913 dec %edx
16914 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
16915 jb 5f
16916 SRC( movw (%esi), %cx )
16917 leal 2(%esi), %esi
16918 -DST( movw %cx, (%edi) )
16919 +DST( movw %cx, %es:(%edi) )
16920 leal 2(%edi), %edi
16921 je 6f
16922 shll $16,%ecx
16923 SRC(5: movb (%esi), %cl )
16924 -DST( movb %cl, (%edi) )
16925 +DST( movb %cl, %es:(%edi) )
16926 6: addl %ecx, %eax
16927 adcl $0, %eax
16928 7:
16929 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
16930
16931 6001:
16932 movl ARGBASE+20(%esp), %ebx # src_err_ptr
16933 - movl $-EFAULT, (%ebx)
16934 + movl $-EFAULT, %ss:(%ebx)
16935
16936 # zero the complete destination - computing the rest
16937 # is too much work
16938 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
16939
16940 6002:
16941 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
16942 - movl $-EFAULT,(%ebx)
16943 + movl $-EFAULT,%ss:(%ebx)
16944 jmp 5000b
16945
16946 .previous
16947
16948 + pushl_cfi %ss
16949 + popl_cfi %ds
16950 + pushl_cfi %ss
16951 + popl_cfi %es
16952 popl_cfi %ebx
16953 CFI_RESTORE ebx
16954 popl_cfi %esi
16955 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
16956 popl_cfi %ecx # equivalent to addl $4,%esp
16957 ret
16958 CFI_ENDPROC
16959 -ENDPROC(csum_partial_copy_generic)
16960 +ENDPROC(csum_partial_copy_generic_to_user)
16961
16962 #else
16963
16964 /* Version for PentiumII/PPro */
16965
16966 #define ROUND1(x) \
16967 + nop; nop; nop; \
16968 SRC(movl x(%esi), %ebx ) ; \
16969 addl %ebx, %eax ; \
16970 - DST(movl %ebx, x(%edi) ) ;
16971 + DST(movl %ebx, %es:x(%edi)) ;
16972
16973 #define ROUND(x) \
16974 + nop; nop; nop; \
16975 SRC(movl x(%esi), %ebx ) ; \
16976 adcl %ebx, %eax ; \
16977 - DST(movl %ebx, x(%edi) ) ;
16978 + DST(movl %ebx, %es:x(%edi)) ;
16979
16980 #define ARGBASE 12
16981 -
16982 -ENTRY(csum_partial_copy_generic)
16983 +
16984 +ENTRY(csum_partial_copy_generic_to_user)
16985 CFI_STARTPROC
16986 +
16987 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16988 + pushl_cfi %gs
16989 + popl_cfi %es
16990 + jmp csum_partial_copy_generic
16991 +#endif
16992 +
16993 +ENTRY(csum_partial_copy_generic_from_user)
16994 +
16995 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16996 + pushl_cfi %gs
16997 + popl_cfi %ds
16998 +#endif
16999 +
17000 +ENTRY(csum_partial_copy_generic)
17001 pushl_cfi %ebx
17002 CFI_REL_OFFSET ebx, 0
17003 pushl_cfi %edi
17004 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
17005 subl %ebx, %edi
17006 lea -1(%esi),%edx
17007 andl $-32,%edx
17008 - lea 3f(%ebx,%ebx), %ebx
17009 + lea 3f(%ebx,%ebx,2), %ebx
17010 testl %esi, %esi
17011 jmp *%ebx
17012 1: addl $64,%esi
17013 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
17014 jb 5f
17015 SRC( movw (%esi), %dx )
17016 leal 2(%esi), %esi
17017 -DST( movw %dx, (%edi) )
17018 +DST( movw %dx, %es:(%edi) )
17019 leal 2(%edi), %edi
17020 je 6f
17021 shll $16,%edx
17022 5:
17023 SRC( movb (%esi), %dl )
17024 -DST( movb %dl, (%edi) )
17025 +DST( movb %dl, %es:(%edi) )
17026 6: addl %edx, %eax
17027 adcl $0, %eax
17028 7:
17029 .section .fixup, "ax"
17030 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
17031 - movl $-EFAULT, (%ebx)
17032 + movl $-EFAULT, %ss:(%ebx)
17033 # zero the complete destination (computing the rest is too much work)
17034 movl ARGBASE+8(%esp),%edi # dst
17035 movl ARGBASE+12(%esp),%ecx # len
17036 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
17037 rep; stosb
17038 jmp 7b
17039 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
17040 - movl $-EFAULT, (%ebx)
17041 + movl $-EFAULT, %ss:(%ebx)
17042 jmp 7b
17043 .previous
17044
17045 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17046 + pushl_cfi %ss
17047 + popl_cfi %ds
17048 + pushl_cfi %ss
17049 + popl_cfi %es
17050 +#endif
17051 +
17052 popl_cfi %esi
17053 CFI_RESTORE esi
17054 popl_cfi %edi
17055 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
17056 CFI_RESTORE ebx
17057 ret
17058 CFI_ENDPROC
17059 -ENDPROC(csum_partial_copy_generic)
17060 +ENDPROC(csum_partial_copy_generic_to_user)
17061
17062 #undef ROUND
17063 #undef ROUND1
17064 diff -urNp linux-2.6.39.4/arch/x86/lib/clear_page_64.S linux-2.6.39.4/arch/x86/lib/clear_page_64.S
17065 --- linux-2.6.39.4/arch/x86/lib/clear_page_64.S 2011-05-19 00:06:34.000000000 -0400
17066 +++ linux-2.6.39.4/arch/x86/lib/clear_page_64.S 2011-08-05 19:44:35.000000000 -0400
17067 @@ -43,7 +43,7 @@ ENDPROC(clear_page)
17068
17069 #include <asm/cpufeature.h>
17070
17071 - .section .altinstr_replacement,"ax"
17072 + .section .altinstr_replacement,"a"
17073 1: .byte 0xeb /* jmp <disp8> */
17074 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
17075 2:
17076 diff -urNp linux-2.6.39.4/arch/x86/lib/copy_page_64.S linux-2.6.39.4/arch/x86/lib/copy_page_64.S
17077 --- linux-2.6.39.4/arch/x86/lib/copy_page_64.S 2011-05-19 00:06:34.000000000 -0400
17078 +++ linux-2.6.39.4/arch/x86/lib/copy_page_64.S 2011-08-05 19:44:35.000000000 -0400
17079 @@ -104,7 +104,7 @@ ENDPROC(copy_page)
17080
17081 #include <asm/cpufeature.h>
17082
17083 - .section .altinstr_replacement,"ax"
17084 + .section .altinstr_replacement,"a"
17085 1: .byte 0xeb /* jmp <disp8> */
17086 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
17087 2:
17088 diff -urNp linux-2.6.39.4/arch/x86/lib/copy_user_64.S linux-2.6.39.4/arch/x86/lib/copy_user_64.S
17089 --- linux-2.6.39.4/arch/x86/lib/copy_user_64.S 2011-06-03 00:04:13.000000000 -0400
17090 +++ linux-2.6.39.4/arch/x86/lib/copy_user_64.S 2011-08-05 19:44:35.000000000 -0400
17091 @@ -15,13 +15,14 @@
17092 #include <asm/asm-offsets.h>
17093 #include <asm/thread_info.h>
17094 #include <asm/cpufeature.h>
17095 +#include <asm/pgtable.h>
17096
17097 .macro ALTERNATIVE_JUMP feature,orig,alt
17098 0:
17099 .byte 0xe9 /* 32bit jump */
17100 .long \orig-1f /* by default jump to orig */
17101 1:
17102 - .section .altinstr_replacement,"ax"
17103 + .section .altinstr_replacement,"a"
17104 2: .byte 0xe9 /* near jump with 32bit immediate */
17105 .long \alt-1b /* offset */ /* or alternatively to alt */
17106 .previous
17107 @@ -64,37 +65,13 @@
17108 #endif
17109 .endm
17110
17111 -/* Standard copy_to_user with segment limit checking */
17112 -ENTRY(_copy_to_user)
17113 - CFI_STARTPROC
17114 - GET_THREAD_INFO(%rax)
17115 - movq %rdi,%rcx
17116 - addq %rdx,%rcx
17117 - jc bad_to_user
17118 - cmpq TI_addr_limit(%rax),%rcx
17119 - ja bad_to_user
17120 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
17121 - CFI_ENDPROC
17122 -ENDPROC(_copy_to_user)
17123 -
17124 -/* Standard copy_from_user with segment limit checking */
17125 -ENTRY(_copy_from_user)
17126 - CFI_STARTPROC
17127 - GET_THREAD_INFO(%rax)
17128 - movq %rsi,%rcx
17129 - addq %rdx,%rcx
17130 - jc bad_from_user
17131 - cmpq TI_addr_limit(%rax),%rcx
17132 - ja bad_from_user
17133 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
17134 - CFI_ENDPROC
17135 -ENDPROC(_copy_from_user)
17136 -
17137 .section .fixup,"ax"
17138 /* must zero dest */
17139 ENTRY(bad_from_user)
17140 bad_from_user:
17141 CFI_STARTPROC
17142 + testl %edx,%edx
17143 + js bad_to_user
17144 movl %edx,%ecx
17145 xorl %eax,%eax
17146 rep
17147 diff -urNp linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S
17148 --- linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S 2011-05-19 00:06:34.000000000 -0400
17149 +++ linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S 2011-08-05 19:44:35.000000000 -0400
17150 @@ -14,6 +14,7 @@
17151 #include <asm/current.h>
17152 #include <asm/asm-offsets.h>
17153 #include <asm/thread_info.h>
17154 +#include <asm/pgtable.h>
17155
17156 .macro ALIGN_DESTINATION
17157 #ifdef FIX_ALIGNMENT
17158 @@ -50,6 +51,15 @@
17159 */
17160 ENTRY(__copy_user_nocache)
17161 CFI_STARTPROC
17162 +
17163 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17164 + mov $PAX_USER_SHADOW_BASE,%rcx
17165 + cmp %rcx,%rsi
17166 + jae 1f
17167 + add %rcx,%rsi
17168 +1:
17169 +#endif
17170 +
17171 cmpl $8,%edx
17172 jb 20f /* less then 8 bytes, go to byte copy loop */
17173 ALIGN_DESTINATION
17174 diff -urNp linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c
17175 --- linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c 2011-05-19 00:06:34.000000000 -0400
17176 +++ linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c 2011-08-05 19:44:35.000000000 -0400
17177 @@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
17178 len -= 2;
17179 }
17180 }
17181 +
17182 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17183 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
17184 + src += PAX_USER_SHADOW_BASE;
17185 +#endif
17186 +
17187 isum = csum_partial_copy_generic((__force const void *)src,
17188 dst, len, isum, errp, NULL);
17189 if (unlikely(*errp))
17190 @@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
17191 }
17192
17193 *errp = 0;
17194 +
17195 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17196 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
17197 + dst += PAX_USER_SHADOW_BASE;
17198 +#endif
17199 +
17200 return csum_partial_copy_generic(src, (void __force *)dst,
17201 len, isum, NULL, errp);
17202 }
17203 diff -urNp linux-2.6.39.4/arch/x86/lib/getuser.S linux-2.6.39.4/arch/x86/lib/getuser.S
17204 --- linux-2.6.39.4/arch/x86/lib/getuser.S 2011-05-19 00:06:34.000000000 -0400
17205 +++ linux-2.6.39.4/arch/x86/lib/getuser.S 2011-08-05 19:44:35.000000000 -0400
17206 @@ -33,14 +33,35 @@
17207 #include <asm/asm-offsets.h>
17208 #include <asm/thread_info.h>
17209 #include <asm/asm.h>
17210 +#include <asm/segment.h>
17211 +#include <asm/pgtable.h>
17212 +
17213 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17214 +#define __copyuser_seg gs;
17215 +#else
17216 +#define __copyuser_seg
17217 +#endif
17218
17219 .text
17220 ENTRY(__get_user_1)
17221 CFI_STARTPROC
17222 +
17223 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17224 GET_THREAD_INFO(%_ASM_DX)
17225 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17226 jae bad_get_user
17227 -1: movzb (%_ASM_AX),%edx
17228 +
17229 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17230 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17231 + cmp %_ASM_DX,%_ASM_AX
17232 + jae 1234f
17233 + add %_ASM_DX,%_ASM_AX
17234 +1234:
17235 +#endif
17236 +
17237 +#endif
17238 +
17239 +1: __copyuser_seg movzb (%_ASM_AX),%edx
17240 xor %eax,%eax
17241 ret
17242 CFI_ENDPROC
17243 @@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
17244 ENTRY(__get_user_2)
17245 CFI_STARTPROC
17246 add $1,%_ASM_AX
17247 +
17248 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17249 jc bad_get_user
17250 GET_THREAD_INFO(%_ASM_DX)
17251 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17252 jae bad_get_user
17253 -2: movzwl -1(%_ASM_AX),%edx
17254 +
17255 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17256 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17257 + cmp %_ASM_DX,%_ASM_AX
17258 + jae 1234f
17259 + add %_ASM_DX,%_ASM_AX
17260 +1234:
17261 +#endif
17262 +
17263 +#endif
17264 +
17265 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
17266 xor %eax,%eax
17267 ret
17268 CFI_ENDPROC
17269 @@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
17270 ENTRY(__get_user_4)
17271 CFI_STARTPROC
17272 add $3,%_ASM_AX
17273 +
17274 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17275 jc bad_get_user
17276 GET_THREAD_INFO(%_ASM_DX)
17277 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17278 jae bad_get_user
17279 -3: mov -3(%_ASM_AX),%edx
17280 +
17281 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17282 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17283 + cmp %_ASM_DX,%_ASM_AX
17284 + jae 1234f
17285 + add %_ASM_DX,%_ASM_AX
17286 +1234:
17287 +#endif
17288 +
17289 +#endif
17290 +
17291 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
17292 xor %eax,%eax
17293 ret
17294 CFI_ENDPROC
17295 @@ -80,6 +127,15 @@ ENTRY(__get_user_8)
17296 GET_THREAD_INFO(%_ASM_DX)
17297 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17298 jae bad_get_user
17299 +
17300 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17301 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17302 + cmp %_ASM_DX,%_ASM_AX
17303 + jae 1234f
17304 + add %_ASM_DX,%_ASM_AX
17305 +1234:
17306 +#endif
17307 +
17308 4: movq -7(%_ASM_AX),%_ASM_DX
17309 xor %eax,%eax
17310 ret
17311 diff -urNp linux-2.6.39.4/arch/x86/lib/insn.c linux-2.6.39.4/arch/x86/lib/insn.c
17312 --- linux-2.6.39.4/arch/x86/lib/insn.c 2011-05-19 00:06:34.000000000 -0400
17313 +++ linux-2.6.39.4/arch/x86/lib/insn.c 2011-08-05 19:44:35.000000000 -0400
17314 @@ -21,6 +21,11 @@
17315 #include <linux/string.h>
17316 #include <asm/inat.h>
17317 #include <asm/insn.h>
17318 +#ifdef __KERNEL__
17319 +#include <asm/pgtable_types.h>
17320 +#else
17321 +#define ktla_ktva(addr) addr
17322 +#endif
17323
17324 #define get_next(t, insn) \
17325 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
17326 @@ -40,8 +45,8 @@
17327 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
17328 {
17329 memset(insn, 0, sizeof(*insn));
17330 - insn->kaddr = kaddr;
17331 - insn->next_byte = kaddr;
17332 + insn->kaddr = ktla_ktva(kaddr);
17333 + insn->next_byte = ktla_ktva(kaddr);
17334 insn->x86_64 = x86_64 ? 1 : 0;
17335 insn->opnd_bytes = 4;
17336 if (x86_64)
17337 diff -urNp linux-2.6.39.4/arch/x86/lib/mmx_32.c linux-2.6.39.4/arch/x86/lib/mmx_32.c
17338 --- linux-2.6.39.4/arch/x86/lib/mmx_32.c 2011-05-19 00:06:34.000000000 -0400
17339 +++ linux-2.6.39.4/arch/x86/lib/mmx_32.c 2011-08-05 19:44:35.000000000 -0400
17340 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
17341 {
17342 void *p;
17343 int i;
17344 + unsigned long cr0;
17345
17346 if (unlikely(in_interrupt()))
17347 return __memcpy(to, from, len);
17348 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
17349 kernel_fpu_begin();
17350
17351 __asm__ __volatile__ (
17352 - "1: prefetch (%0)\n" /* This set is 28 bytes */
17353 - " prefetch 64(%0)\n"
17354 - " prefetch 128(%0)\n"
17355 - " prefetch 192(%0)\n"
17356 - " prefetch 256(%0)\n"
17357 + "1: prefetch (%1)\n" /* This set is 28 bytes */
17358 + " prefetch 64(%1)\n"
17359 + " prefetch 128(%1)\n"
17360 + " prefetch 192(%1)\n"
17361 + " prefetch 256(%1)\n"
17362 "2: \n"
17363 ".section .fixup, \"ax\"\n"
17364 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17365 + "3: \n"
17366 +
17367 +#ifdef CONFIG_PAX_KERNEXEC
17368 + " movl %%cr0, %0\n"
17369 + " movl %0, %%eax\n"
17370 + " andl $0xFFFEFFFF, %%eax\n"
17371 + " movl %%eax, %%cr0\n"
17372 +#endif
17373 +
17374 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17375 +
17376 +#ifdef CONFIG_PAX_KERNEXEC
17377 + " movl %0, %%cr0\n"
17378 +#endif
17379 +
17380 " jmp 2b\n"
17381 ".previous\n"
17382 _ASM_EXTABLE(1b, 3b)
17383 - : : "r" (from));
17384 + : "=&r" (cr0) : "r" (from) : "ax");
17385
17386 for ( ; i > 5; i--) {
17387 __asm__ __volatile__ (
17388 - "1: prefetch 320(%0)\n"
17389 - "2: movq (%0), %%mm0\n"
17390 - " movq 8(%0), %%mm1\n"
17391 - " movq 16(%0), %%mm2\n"
17392 - " movq 24(%0), %%mm3\n"
17393 - " movq %%mm0, (%1)\n"
17394 - " movq %%mm1, 8(%1)\n"
17395 - " movq %%mm2, 16(%1)\n"
17396 - " movq %%mm3, 24(%1)\n"
17397 - " movq 32(%0), %%mm0\n"
17398 - " movq 40(%0), %%mm1\n"
17399 - " movq 48(%0), %%mm2\n"
17400 - " movq 56(%0), %%mm3\n"
17401 - " movq %%mm0, 32(%1)\n"
17402 - " movq %%mm1, 40(%1)\n"
17403 - " movq %%mm2, 48(%1)\n"
17404 - " movq %%mm3, 56(%1)\n"
17405 + "1: prefetch 320(%1)\n"
17406 + "2: movq (%1), %%mm0\n"
17407 + " movq 8(%1), %%mm1\n"
17408 + " movq 16(%1), %%mm2\n"
17409 + " movq 24(%1), %%mm3\n"
17410 + " movq %%mm0, (%2)\n"
17411 + " movq %%mm1, 8(%2)\n"
17412 + " movq %%mm2, 16(%2)\n"
17413 + " movq %%mm3, 24(%2)\n"
17414 + " movq 32(%1), %%mm0\n"
17415 + " movq 40(%1), %%mm1\n"
17416 + " movq 48(%1), %%mm2\n"
17417 + " movq 56(%1), %%mm3\n"
17418 + " movq %%mm0, 32(%2)\n"
17419 + " movq %%mm1, 40(%2)\n"
17420 + " movq %%mm2, 48(%2)\n"
17421 + " movq %%mm3, 56(%2)\n"
17422 ".section .fixup, \"ax\"\n"
17423 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17424 + "3:\n"
17425 +
17426 +#ifdef CONFIG_PAX_KERNEXEC
17427 + " movl %%cr0, %0\n"
17428 + " movl %0, %%eax\n"
17429 + " andl $0xFFFEFFFF, %%eax\n"
17430 + " movl %%eax, %%cr0\n"
17431 +#endif
17432 +
17433 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17434 +
17435 +#ifdef CONFIG_PAX_KERNEXEC
17436 + " movl %0, %%cr0\n"
17437 +#endif
17438 +
17439 " jmp 2b\n"
17440 ".previous\n"
17441 _ASM_EXTABLE(1b, 3b)
17442 - : : "r" (from), "r" (to) : "memory");
17443 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17444
17445 from += 64;
17446 to += 64;
17447 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
17448 static void fast_copy_page(void *to, void *from)
17449 {
17450 int i;
17451 + unsigned long cr0;
17452
17453 kernel_fpu_begin();
17454
17455 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
17456 * but that is for later. -AV
17457 */
17458 __asm__ __volatile__(
17459 - "1: prefetch (%0)\n"
17460 - " prefetch 64(%0)\n"
17461 - " prefetch 128(%0)\n"
17462 - " prefetch 192(%0)\n"
17463 - " prefetch 256(%0)\n"
17464 + "1: prefetch (%1)\n"
17465 + " prefetch 64(%1)\n"
17466 + " prefetch 128(%1)\n"
17467 + " prefetch 192(%1)\n"
17468 + " prefetch 256(%1)\n"
17469 "2: \n"
17470 ".section .fixup, \"ax\"\n"
17471 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17472 + "3: \n"
17473 +
17474 +#ifdef CONFIG_PAX_KERNEXEC
17475 + " movl %%cr0, %0\n"
17476 + " movl %0, %%eax\n"
17477 + " andl $0xFFFEFFFF, %%eax\n"
17478 + " movl %%eax, %%cr0\n"
17479 +#endif
17480 +
17481 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17482 +
17483 +#ifdef CONFIG_PAX_KERNEXEC
17484 + " movl %0, %%cr0\n"
17485 +#endif
17486 +
17487 " jmp 2b\n"
17488 ".previous\n"
17489 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
17490 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17491
17492 for (i = 0; i < (4096-320)/64; i++) {
17493 __asm__ __volatile__ (
17494 - "1: prefetch 320(%0)\n"
17495 - "2: movq (%0), %%mm0\n"
17496 - " movntq %%mm0, (%1)\n"
17497 - " movq 8(%0), %%mm1\n"
17498 - " movntq %%mm1, 8(%1)\n"
17499 - " movq 16(%0), %%mm2\n"
17500 - " movntq %%mm2, 16(%1)\n"
17501 - " movq 24(%0), %%mm3\n"
17502 - " movntq %%mm3, 24(%1)\n"
17503 - " movq 32(%0), %%mm4\n"
17504 - " movntq %%mm4, 32(%1)\n"
17505 - " movq 40(%0), %%mm5\n"
17506 - " movntq %%mm5, 40(%1)\n"
17507 - " movq 48(%0), %%mm6\n"
17508 - " movntq %%mm6, 48(%1)\n"
17509 - " movq 56(%0), %%mm7\n"
17510 - " movntq %%mm7, 56(%1)\n"
17511 + "1: prefetch 320(%1)\n"
17512 + "2: movq (%1), %%mm0\n"
17513 + " movntq %%mm0, (%2)\n"
17514 + " movq 8(%1), %%mm1\n"
17515 + " movntq %%mm1, 8(%2)\n"
17516 + " movq 16(%1), %%mm2\n"
17517 + " movntq %%mm2, 16(%2)\n"
17518 + " movq 24(%1), %%mm3\n"
17519 + " movntq %%mm3, 24(%2)\n"
17520 + " movq 32(%1), %%mm4\n"
17521 + " movntq %%mm4, 32(%2)\n"
17522 + " movq 40(%1), %%mm5\n"
17523 + " movntq %%mm5, 40(%2)\n"
17524 + " movq 48(%1), %%mm6\n"
17525 + " movntq %%mm6, 48(%2)\n"
17526 + " movq 56(%1), %%mm7\n"
17527 + " movntq %%mm7, 56(%2)\n"
17528 ".section .fixup, \"ax\"\n"
17529 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17530 + "3:\n"
17531 +
17532 +#ifdef CONFIG_PAX_KERNEXEC
17533 + " movl %%cr0, %0\n"
17534 + " movl %0, %%eax\n"
17535 + " andl $0xFFFEFFFF, %%eax\n"
17536 + " movl %%eax, %%cr0\n"
17537 +#endif
17538 +
17539 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17540 +
17541 +#ifdef CONFIG_PAX_KERNEXEC
17542 + " movl %0, %%cr0\n"
17543 +#endif
17544 +
17545 " jmp 2b\n"
17546 ".previous\n"
17547 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
17548 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17549
17550 from += 64;
17551 to += 64;
17552 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
17553 static void fast_copy_page(void *to, void *from)
17554 {
17555 int i;
17556 + unsigned long cr0;
17557
17558 kernel_fpu_begin();
17559
17560 __asm__ __volatile__ (
17561 - "1: prefetch (%0)\n"
17562 - " prefetch 64(%0)\n"
17563 - " prefetch 128(%0)\n"
17564 - " prefetch 192(%0)\n"
17565 - " prefetch 256(%0)\n"
17566 + "1: prefetch (%1)\n"
17567 + " prefetch 64(%1)\n"
17568 + " prefetch 128(%1)\n"
17569 + " prefetch 192(%1)\n"
17570 + " prefetch 256(%1)\n"
17571 "2: \n"
17572 ".section .fixup, \"ax\"\n"
17573 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17574 + "3: \n"
17575 +
17576 +#ifdef CONFIG_PAX_KERNEXEC
17577 + " movl %%cr0, %0\n"
17578 + " movl %0, %%eax\n"
17579 + " andl $0xFFFEFFFF, %%eax\n"
17580 + " movl %%eax, %%cr0\n"
17581 +#endif
17582 +
17583 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17584 +
17585 +#ifdef CONFIG_PAX_KERNEXEC
17586 + " movl %0, %%cr0\n"
17587 +#endif
17588 +
17589 " jmp 2b\n"
17590 ".previous\n"
17591 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
17592 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17593
17594 for (i = 0; i < 4096/64; i++) {
17595 __asm__ __volatile__ (
17596 - "1: prefetch 320(%0)\n"
17597 - "2: movq (%0), %%mm0\n"
17598 - " movq 8(%0), %%mm1\n"
17599 - " movq 16(%0), %%mm2\n"
17600 - " movq 24(%0), %%mm3\n"
17601 - " movq %%mm0, (%1)\n"
17602 - " movq %%mm1, 8(%1)\n"
17603 - " movq %%mm2, 16(%1)\n"
17604 - " movq %%mm3, 24(%1)\n"
17605 - " movq 32(%0), %%mm0\n"
17606 - " movq 40(%0), %%mm1\n"
17607 - " movq 48(%0), %%mm2\n"
17608 - " movq 56(%0), %%mm3\n"
17609 - " movq %%mm0, 32(%1)\n"
17610 - " movq %%mm1, 40(%1)\n"
17611 - " movq %%mm2, 48(%1)\n"
17612 - " movq %%mm3, 56(%1)\n"
17613 + "1: prefetch 320(%1)\n"
17614 + "2: movq (%1), %%mm0\n"
17615 + " movq 8(%1), %%mm1\n"
17616 + " movq 16(%1), %%mm2\n"
17617 + " movq 24(%1), %%mm3\n"
17618 + " movq %%mm0, (%2)\n"
17619 + " movq %%mm1, 8(%2)\n"
17620 + " movq %%mm2, 16(%2)\n"
17621 + " movq %%mm3, 24(%2)\n"
17622 + " movq 32(%1), %%mm0\n"
17623 + " movq 40(%1), %%mm1\n"
17624 + " movq 48(%1), %%mm2\n"
17625 + " movq 56(%1), %%mm3\n"
17626 + " movq %%mm0, 32(%2)\n"
17627 + " movq %%mm1, 40(%2)\n"
17628 + " movq %%mm2, 48(%2)\n"
17629 + " movq %%mm3, 56(%2)\n"
17630 ".section .fixup, \"ax\"\n"
17631 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17632 + "3:\n"
17633 +
17634 +#ifdef CONFIG_PAX_KERNEXEC
17635 + " movl %%cr0, %0\n"
17636 + " movl %0, %%eax\n"
17637 + " andl $0xFFFEFFFF, %%eax\n"
17638 + " movl %%eax, %%cr0\n"
17639 +#endif
17640 +
17641 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17642 +
17643 +#ifdef CONFIG_PAX_KERNEXEC
17644 + " movl %0, %%cr0\n"
17645 +#endif
17646 +
17647 " jmp 2b\n"
17648 ".previous\n"
17649 _ASM_EXTABLE(1b, 3b)
17650 - : : "r" (from), "r" (to) : "memory");
17651 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17652
17653 from += 64;
17654 to += 64;
17655 diff -urNp linux-2.6.39.4/arch/x86/lib/putuser.S linux-2.6.39.4/arch/x86/lib/putuser.S
17656 --- linux-2.6.39.4/arch/x86/lib/putuser.S 2011-05-19 00:06:34.000000000 -0400
17657 +++ linux-2.6.39.4/arch/x86/lib/putuser.S 2011-08-05 19:44:35.000000000 -0400
17658 @@ -15,7 +15,8 @@
17659 #include <asm/thread_info.h>
17660 #include <asm/errno.h>
17661 #include <asm/asm.h>
17662 -
17663 +#include <asm/segment.h>
17664 +#include <asm/pgtable.h>
17665
17666 /*
17667 * __put_user_X
17668 @@ -29,52 +30,119 @@
17669 * as they get called from within inline assembly.
17670 */
17671
17672 -#define ENTER CFI_STARTPROC ; \
17673 - GET_THREAD_INFO(%_ASM_BX)
17674 +#define ENTER CFI_STARTPROC
17675 #define EXIT ret ; \
17676 CFI_ENDPROC
17677
17678 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17679 +#define _DEST %_ASM_CX,%_ASM_BX
17680 +#else
17681 +#define _DEST %_ASM_CX
17682 +#endif
17683 +
17684 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17685 +#define __copyuser_seg gs;
17686 +#else
17687 +#define __copyuser_seg
17688 +#endif
17689 +
17690 .text
17691 ENTRY(__put_user_1)
17692 ENTER
17693 +
17694 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17695 + GET_THREAD_INFO(%_ASM_BX)
17696 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
17697 jae bad_put_user
17698 -1: movb %al,(%_ASM_CX)
17699 +
17700 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17701 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17702 + cmp %_ASM_BX,%_ASM_CX
17703 + jb 1234f
17704 + xor %ebx,%ebx
17705 +1234:
17706 +#endif
17707 +
17708 +#endif
17709 +
17710 +1: __copyuser_seg movb %al,(_DEST)
17711 xor %eax,%eax
17712 EXIT
17713 ENDPROC(__put_user_1)
17714
17715 ENTRY(__put_user_2)
17716 ENTER
17717 +
17718 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17719 + GET_THREAD_INFO(%_ASM_BX)
17720 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17721 sub $1,%_ASM_BX
17722 cmp %_ASM_BX,%_ASM_CX
17723 jae bad_put_user
17724 -2: movw %ax,(%_ASM_CX)
17725 +
17726 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17727 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17728 + cmp %_ASM_BX,%_ASM_CX
17729 + jb 1234f
17730 + xor %ebx,%ebx
17731 +1234:
17732 +#endif
17733 +
17734 +#endif
17735 +
17736 +2: __copyuser_seg movw %ax,(_DEST)
17737 xor %eax,%eax
17738 EXIT
17739 ENDPROC(__put_user_2)
17740
17741 ENTRY(__put_user_4)
17742 ENTER
17743 +
17744 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17745 + GET_THREAD_INFO(%_ASM_BX)
17746 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17747 sub $3,%_ASM_BX
17748 cmp %_ASM_BX,%_ASM_CX
17749 jae bad_put_user
17750 -3: movl %eax,(%_ASM_CX)
17751 +
17752 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17753 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17754 + cmp %_ASM_BX,%_ASM_CX
17755 + jb 1234f
17756 + xor %ebx,%ebx
17757 +1234:
17758 +#endif
17759 +
17760 +#endif
17761 +
17762 +3: __copyuser_seg movl %eax,(_DEST)
17763 xor %eax,%eax
17764 EXIT
17765 ENDPROC(__put_user_4)
17766
17767 ENTRY(__put_user_8)
17768 ENTER
17769 +
17770 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17771 + GET_THREAD_INFO(%_ASM_BX)
17772 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17773 sub $7,%_ASM_BX
17774 cmp %_ASM_BX,%_ASM_CX
17775 jae bad_put_user
17776 -4: mov %_ASM_AX,(%_ASM_CX)
17777 +
17778 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17779 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17780 + cmp %_ASM_BX,%_ASM_CX
17781 + jb 1234f
17782 + xor %ebx,%ebx
17783 +1234:
17784 +#endif
17785 +
17786 +#endif
17787 +
17788 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
17789 #ifdef CONFIG_X86_32
17790 -5: movl %edx,4(%_ASM_CX)
17791 +5: __copyuser_seg movl %edx,4(_DEST)
17792 #endif
17793 xor %eax,%eax
17794 EXIT
17795 diff -urNp linux-2.6.39.4/arch/x86/lib/usercopy_32.c linux-2.6.39.4/arch/x86/lib/usercopy_32.c
17796 --- linux-2.6.39.4/arch/x86/lib/usercopy_32.c 2011-05-19 00:06:34.000000000 -0400
17797 +++ linux-2.6.39.4/arch/x86/lib/usercopy_32.c 2011-08-05 19:44:35.000000000 -0400
17798 @@ -43,7 +43,7 @@ do { \
17799 __asm__ __volatile__( \
17800 " testl %1,%1\n" \
17801 " jz 2f\n" \
17802 - "0: lodsb\n" \
17803 + "0: "__copyuser_seg"lodsb\n" \
17804 " stosb\n" \
17805 " testb %%al,%%al\n" \
17806 " jz 1f\n" \
17807 @@ -128,10 +128,12 @@ do { \
17808 int __d0; \
17809 might_fault(); \
17810 __asm__ __volatile__( \
17811 + __COPYUSER_SET_ES \
17812 "0: rep; stosl\n" \
17813 " movl %2,%0\n" \
17814 "1: rep; stosb\n" \
17815 "2:\n" \
17816 + __COPYUSER_RESTORE_ES \
17817 ".section .fixup,\"ax\"\n" \
17818 "3: lea 0(%2,%0,4),%0\n" \
17819 " jmp 2b\n" \
17820 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
17821 might_fault();
17822
17823 __asm__ __volatile__(
17824 + __COPYUSER_SET_ES
17825 " testl %0, %0\n"
17826 " jz 3f\n"
17827 " andl %0,%%ecx\n"
17828 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
17829 " subl %%ecx,%0\n"
17830 " addl %0,%%eax\n"
17831 "1:\n"
17832 + __COPYUSER_RESTORE_ES
17833 ".section .fixup,\"ax\"\n"
17834 "2: xorl %%eax,%%eax\n"
17835 " jmp 1b\n"
17836 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
17837
17838 #ifdef CONFIG_X86_INTEL_USERCOPY
17839 static unsigned long
17840 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
17841 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
17842 {
17843 int d0, d1;
17844 __asm__ __volatile__(
17845 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
17846 " .align 2,0x90\n"
17847 "3: movl 0(%4), %%eax\n"
17848 "4: movl 4(%4), %%edx\n"
17849 - "5: movl %%eax, 0(%3)\n"
17850 - "6: movl %%edx, 4(%3)\n"
17851 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
17852 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
17853 "7: movl 8(%4), %%eax\n"
17854 "8: movl 12(%4),%%edx\n"
17855 - "9: movl %%eax, 8(%3)\n"
17856 - "10: movl %%edx, 12(%3)\n"
17857 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
17858 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
17859 "11: movl 16(%4), %%eax\n"
17860 "12: movl 20(%4), %%edx\n"
17861 - "13: movl %%eax, 16(%3)\n"
17862 - "14: movl %%edx, 20(%3)\n"
17863 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
17864 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
17865 "15: movl 24(%4), %%eax\n"
17866 "16: movl 28(%4), %%edx\n"
17867 - "17: movl %%eax, 24(%3)\n"
17868 - "18: movl %%edx, 28(%3)\n"
17869 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
17870 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
17871 "19: movl 32(%4), %%eax\n"
17872 "20: movl 36(%4), %%edx\n"
17873 - "21: movl %%eax, 32(%3)\n"
17874 - "22: movl %%edx, 36(%3)\n"
17875 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
17876 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
17877 "23: movl 40(%4), %%eax\n"
17878 "24: movl 44(%4), %%edx\n"
17879 - "25: movl %%eax, 40(%3)\n"
17880 - "26: movl %%edx, 44(%3)\n"
17881 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
17882 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
17883 "27: movl 48(%4), %%eax\n"
17884 "28: movl 52(%4), %%edx\n"
17885 - "29: movl %%eax, 48(%3)\n"
17886 - "30: movl %%edx, 52(%3)\n"
17887 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
17888 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
17889 "31: movl 56(%4), %%eax\n"
17890 "32: movl 60(%4), %%edx\n"
17891 - "33: movl %%eax, 56(%3)\n"
17892 - "34: movl %%edx, 60(%3)\n"
17893 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
17894 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
17895 " addl $-64, %0\n"
17896 " addl $64, %4\n"
17897 " addl $64, %3\n"
17898 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
17899 " shrl $2, %0\n"
17900 " andl $3, %%eax\n"
17901 " cld\n"
17902 + __COPYUSER_SET_ES
17903 "99: rep; movsl\n"
17904 "36: movl %%eax, %0\n"
17905 "37: rep; movsb\n"
17906 "100:\n"
17907 + __COPYUSER_RESTORE_ES
17908 + ".section .fixup,\"ax\"\n"
17909 + "101: lea 0(%%eax,%0,4),%0\n"
17910 + " jmp 100b\n"
17911 + ".previous\n"
17912 + ".section __ex_table,\"a\"\n"
17913 + " .align 4\n"
17914 + " .long 1b,100b\n"
17915 + " .long 2b,100b\n"
17916 + " .long 3b,100b\n"
17917 + " .long 4b,100b\n"
17918 + " .long 5b,100b\n"
17919 + " .long 6b,100b\n"
17920 + " .long 7b,100b\n"
17921 + " .long 8b,100b\n"
17922 + " .long 9b,100b\n"
17923 + " .long 10b,100b\n"
17924 + " .long 11b,100b\n"
17925 + " .long 12b,100b\n"
17926 + " .long 13b,100b\n"
17927 + " .long 14b,100b\n"
17928 + " .long 15b,100b\n"
17929 + " .long 16b,100b\n"
17930 + " .long 17b,100b\n"
17931 + " .long 18b,100b\n"
17932 + " .long 19b,100b\n"
17933 + " .long 20b,100b\n"
17934 + " .long 21b,100b\n"
17935 + " .long 22b,100b\n"
17936 + " .long 23b,100b\n"
17937 + " .long 24b,100b\n"
17938 + " .long 25b,100b\n"
17939 + " .long 26b,100b\n"
17940 + " .long 27b,100b\n"
17941 + " .long 28b,100b\n"
17942 + " .long 29b,100b\n"
17943 + " .long 30b,100b\n"
17944 + " .long 31b,100b\n"
17945 + " .long 32b,100b\n"
17946 + " .long 33b,100b\n"
17947 + " .long 34b,100b\n"
17948 + " .long 35b,100b\n"
17949 + " .long 36b,100b\n"
17950 + " .long 37b,100b\n"
17951 + " .long 99b,101b\n"
17952 + ".previous"
17953 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
17954 + : "1"(to), "2"(from), "0"(size)
17955 + : "eax", "edx", "memory");
17956 + return size;
17957 +}
17958 +
17959 +static unsigned long
17960 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
17961 +{
17962 + int d0, d1;
17963 + __asm__ __volatile__(
17964 + " .align 2,0x90\n"
17965 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
17966 + " cmpl $67, %0\n"
17967 + " jbe 3f\n"
17968 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
17969 + " .align 2,0x90\n"
17970 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
17971 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
17972 + "5: movl %%eax, 0(%3)\n"
17973 + "6: movl %%edx, 4(%3)\n"
17974 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
17975 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
17976 + "9: movl %%eax, 8(%3)\n"
17977 + "10: movl %%edx, 12(%3)\n"
17978 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
17979 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
17980 + "13: movl %%eax, 16(%3)\n"
17981 + "14: movl %%edx, 20(%3)\n"
17982 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
17983 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
17984 + "17: movl %%eax, 24(%3)\n"
17985 + "18: movl %%edx, 28(%3)\n"
17986 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
17987 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
17988 + "21: movl %%eax, 32(%3)\n"
17989 + "22: movl %%edx, 36(%3)\n"
17990 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
17991 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
17992 + "25: movl %%eax, 40(%3)\n"
17993 + "26: movl %%edx, 44(%3)\n"
17994 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
17995 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
17996 + "29: movl %%eax, 48(%3)\n"
17997 + "30: movl %%edx, 52(%3)\n"
17998 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
17999 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
18000 + "33: movl %%eax, 56(%3)\n"
18001 + "34: movl %%edx, 60(%3)\n"
18002 + " addl $-64, %0\n"
18003 + " addl $64, %4\n"
18004 + " addl $64, %3\n"
18005 + " cmpl $63, %0\n"
18006 + " ja 1b\n"
18007 + "35: movl %0, %%eax\n"
18008 + " shrl $2, %0\n"
18009 + " andl $3, %%eax\n"
18010 + " cld\n"
18011 + "99: rep; "__copyuser_seg" movsl\n"
18012 + "36: movl %%eax, %0\n"
18013 + "37: rep; "__copyuser_seg" movsb\n"
18014 + "100:\n"
18015 ".section .fixup,\"ax\"\n"
18016 "101: lea 0(%%eax,%0,4),%0\n"
18017 " jmp 100b\n"
18018 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
18019 int d0, d1;
18020 __asm__ __volatile__(
18021 " .align 2,0x90\n"
18022 - "0: movl 32(%4), %%eax\n"
18023 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18024 " cmpl $67, %0\n"
18025 " jbe 2f\n"
18026 - "1: movl 64(%4), %%eax\n"
18027 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18028 " .align 2,0x90\n"
18029 - "2: movl 0(%4), %%eax\n"
18030 - "21: movl 4(%4), %%edx\n"
18031 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18032 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18033 " movl %%eax, 0(%3)\n"
18034 " movl %%edx, 4(%3)\n"
18035 - "3: movl 8(%4), %%eax\n"
18036 - "31: movl 12(%4),%%edx\n"
18037 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18038 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18039 " movl %%eax, 8(%3)\n"
18040 " movl %%edx, 12(%3)\n"
18041 - "4: movl 16(%4), %%eax\n"
18042 - "41: movl 20(%4), %%edx\n"
18043 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18044 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18045 " movl %%eax, 16(%3)\n"
18046 " movl %%edx, 20(%3)\n"
18047 - "10: movl 24(%4), %%eax\n"
18048 - "51: movl 28(%4), %%edx\n"
18049 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18050 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18051 " movl %%eax, 24(%3)\n"
18052 " movl %%edx, 28(%3)\n"
18053 - "11: movl 32(%4), %%eax\n"
18054 - "61: movl 36(%4), %%edx\n"
18055 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18056 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18057 " movl %%eax, 32(%3)\n"
18058 " movl %%edx, 36(%3)\n"
18059 - "12: movl 40(%4), %%eax\n"
18060 - "71: movl 44(%4), %%edx\n"
18061 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18062 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18063 " movl %%eax, 40(%3)\n"
18064 " movl %%edx, 44(%3)\n"
18065 - "13: movl 48(%4), %%eax\n"
18066 - "81: movl 52(%4), %%edx\n"
18067 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18068 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18069 " movl %%eax, 48(%3)\n"
18070 " movl %%edx, 52(%3)\n"
18071 - "14: movl 56(%4), %%eax\n"
18072 - "91: movl 60(%4), %%edx\n"
18073 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18074 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18075 " movl %%eax, 56(%3)\n"
18076 " movl %%edx, 60(%3)\n"
18077 " addl $-64, %0\n"
18078 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
18079 " shrl $2, %0\n"
18080 " andl $3, %%eax\n"
18081 " cld\n"
18082 - "6: rep; movsl\n"
18083 + "6: rep; "__copyuser_seg" movsl\n"
18084 " movl %%eax,%0\n"
18085 - "7: rep; movsb\n"
18086 + "7: rep; "__copyuser_seg" movsb\n"
18087 "8:\n"
18088 ".section .fixup,\"ax\"\n"
18089 "9: lea 0(%%eax,%0,4),%0\n"
18090 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
18091
18092 __asm__ __volatile__(
18093 " .align 2,0x90\n"
18094 - "0: movl 32(%4), %%eax\n"
18095 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18096 " cmpl $67, %0\n"
18097 " jbe 2f\n"
18098 - "1: movl 64(%4), %%eax\n"
18099 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18100 " .align 2,0x90\n"
18101 - "2: movl 0(%4), %%eax\n"
18102 - "21: movl 4(%4), %%edx\n"
18103 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18104 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18105 " movnti %%eax, 0(%3)\n"
18106 " movnti %%edx, 4(%3)\n"
18107 - "3: movl 8(%4), %%eax\n"
18108 - "31: movl 12(%4),%%edx\n"
18109 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18110 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18111 " movnti %%eax, 8(%3)\n"
18112 " movnti %%edx, 12(%3)\n"
18113 - "4: movl 16(%4), %%eax\n"
18114 - "41: movl 20(%4), %%edx\n"
18115 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18116 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18117 " movnti %%eax, 16(%3)\n"
18118 " movnti %%edx, 20(%3)\n"
18119 - "10: movl 24(%4), %%eax\n"
18120 - "51: movl 28(%4), %%edx\n"
18121 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18122 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18123 " movnti %%eax, 24(%3)\n"
18124 " movnti %%edx, 28(%3)\n"
18125 - "11: movl 32(%4), %%eax\n"
18126 - "61: movl 36(%4), %%edx\n"
18127 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18128 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18129 " movnti %%eax, 32(%3)\n"
18130 " movnti %%edx, 36(%3)\n"
18131 - "12: movl 40(%4), %%eax\n"
18132 - "71: movl 44(%4), %%edx\n"
18133 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18134 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18135 " movnti %%eax, 40(%3)\n"
18136 " movnti %%edx, 44(%3)\n"
18137 - "13: movl 48(%4), %%eax\n"
18138 - "81: movl 52(%4), %%edx\n"
18139 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18140 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18141 " movnti %%eax, 48(%3)\n"
18142 " movnti %%edx, 52(%3)\n"
18143 - "14: movl 56(%4), %%eax\n"
18144 - "91: movl 60(%4), %%edx\n"
18145 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18146 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18147 " movnti %%eax, 56(%3)\n"
18148 " movnti %%edx, 60(%3)\n"
18149 " addl $-64, %0\n"
18150 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
18151 " shrl $2, %0\n"
18152 " andl $3, %%eax\n"
18153 " cld\n"
18154 - "6: rep; movsl\n"
18155 + "6: rep; "__copyuser_seg" movsl\n"
18156 " movl %%eax,%0\n"
18157 - "7: rep; movsb\n"
18158 + "7: rep; "__copyuser_seg" movsb\n"
18159 "8:\n"
18160 ".section .fixup,\"ax\"\n"
18161 "9: lea 0(%%eax,%0,4),%0\n"
18162 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
18163
18164 __asm__ __volatile__(
18165 " .align 2,0x90\n"
18166 - "0: movl 32(%4), %%eax\n"
18167 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18168 " cmpl $67, %0\n"
18169 " jbe 2f\n"
18170 - "1: movl 64(%4), %%eax\n"
18171 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18172 " .align 2,0x90\n"
18173 - "2: movl 0(%4), %%eax\n"
18174 - "21: movl 4(%4), %%edx\n"
18175 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18176 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18177 " movnti %%eax, 0(%3)\n"
18178 " movnti %%edx, 4(%3)\n"
18179 - "3: movl 8(%4), %%eax\n"
18180 - "31: movl 12(%4),%%edx\n"
18181 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18182 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18183 " movnti %%eax, 8(%3)\n"
18184 " movnti %%edx, 12(%3)\n"
18185 - "4: movl 16(%4), %%eax\n"
18186 - "41: movl 20(%4), %%edx\n"
18187 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18188 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18189 " movnti %%eax, 16(%3)\n"
18190 " movnti %%edx, 20(%3)\n"
18191 - "10: movl 24(%4), %%eax\n"
18192 - "51: movl 28(%4), %%edx\n"
18193 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18194 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18195 " movnti %%eax, 24(%3)\n"
18196 " movnti %%edx, 28(%3)\n"
18197 - "11: movl 32(%4), %%eax\n"
18198 - "61: movl 36(%4), %%edx\n"
18199 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18200 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18201 " movnti %%eax, 32(%3)\n"
18202 " movnti %%edx, 36(%3)\n"
18203 - "12: movl 40(%4), %%eax\n"
18204 - "71: movl 44(%4), %%edx\n"
18205 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18206 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18207 " movnti %%eax, 40(%3)\n"
18208 " movnti %%edx, 44(%3)\n"
18209 - "13: movl 48(%4), %%eax\n"
18210 - "81: movl 52(%4), %%edx\n"
18211 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18212 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18213 " movnti %%eax, 48(%3)\n"
18214 " movnti %%edx, 52(%3)\n"
18215 - "14: movl 56(%4), %%eax\n"
18216 - "91: movl 60(%4), %%edx\n"
18217 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18218 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18219 " movnti %%eax, 56(%3)\n"
18220 " movnti %%edx, 60(%3)\n"
18221 " addl $-64, %0\n"
18222 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
18223 " shrl $2, %0\n"
18224 " andl $3, %%eax\n"
18225 " cld\n"
18226 - "6: rep; movsl\n"
18227 + "6: rep; "__copyuser_seg" movsl\n"
18228 " movl %%eax,%0\n"
18229 - "7: rep; movsb\n"
18230 + "7: rep; "__copyuser_seg" movsb\n"
18231 "8:\n"
18232 ".section .fixup,\"ax\"\n"
18233 "9: lea 0(%%eax,%0,4),%0\n"
18234 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
18235 */
18236 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
18237 unsigned long size);
18238 -unsigned long __copy_user_intel(void __user *to, const void *from,
18239 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
18240 + unsigned long size);
18241 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
18242 unsigned long size);
18243 unsigned long __copy_user_zeroing_intel_nocache(void *to,
18244 const void __user *from, unsigned long size);
18245 #endif /* CONFIG_X86_INTEL_USERCOPY */
18246
18247 /* Generic arbitrary sized copy. */
18248 -#define __copy_user(to, from, size) \
18249 +#define __copy_user(to, from, size, prefix, set, restore) \
18250 do { \
18251 int __d0, __d1, __d2; \
18252 __asm__ __volatile__( \
18253 + set \
18254 " cmp $7,%0\n" \
18255 " jbe 1f\n" \
18256 " movl %1,%0\n" \
18257 " negl %0\n" \
18258 " andl $7,%0\n" \
18259 " subl %0,%3\n" \
18260 - "4: rep; movsb\n" \
18261 + "4: rep; "prefix"movsb\n" \
18262 " movl %3,%0\n" \
18263 " shrl $2,%0\n" \
18264 " andl $3,%3\n" \
18265 " .align 2,0x90\n" \
18266 - "0: rep; movsl\n" \
18267 + "0: rep; "prefix"movsl\n" \
18268 " movl %3,%0\n" \
18269 - "1: rep; movsb\n" \
18270 + "1: rep; "prefix"movsb\n" \
18271 "2:\n" \
18272 + restore \
18273 ".section .fixup,\"ax\"\n" \
18274 "5: addl %3,%0\n" \
18275 " jmp 2b\n" \
18276 @@ -682,14 +799,14 @@ do { \
18277 " negl %0\n" \
18278 " andl $7,%0\n" \
18279 " subl %0,%3\n" \
18280 - "4: rep; movsb\n" \
18281 + "4: rep; "__copyuser_seg"movsb\n" \
18282 " movl %3,%0\n" \
18283 " shrl $2,%0\n" \
18284 " andl $3,%3\n" \
18285 " .align 2,0x90\n" \
18286 - "0: rep; movsl\n" \
18287 + "0: rep; "__copyuser_seg"movsl\n" \
18288 " movl %3,%0\n" \
18289 - "1: rep; movsb\n" \
18290 + "1: rep; "__copyuser_seg"movsb\n" \
18291 "2:\n" \
18292 ".section .fixup,\"ax\"\n" \
18293 "5: addl %3,%0\n" \
18294 @@ -775,9 +892,9 @@ survive:
18295 }
18296 #endif
18297 if (movsl_is_ok(to, from, n))
18298 - __copy_user(to, from, n);
18299 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
18300 else
18301 - n = __copy_user_intel(to, from, n);
18302 + n = __generic_copy_to_user_intel(to, from, n);
18303 return n;
18304 }
18305 EXPORT_SYMBOL(__copy_to_user_ll);
18306 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
18307 unsigned long n)
18308 {
18309 if (movsl_is_ok(to, from, n))
18310 - __copy_user(to, from, n);
18311 + __copy_user(to, from, n, __copyuser_seg, "", "");
18312 else
18313 - n = __copy_user_intel((void __user *)to,
18314 - (const void *)from, n);
18315 + n = __generic_copy_from_user_intel(to, from, n);
18316 return n;
18317 }
18318 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
18319 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
18320 if (n > 64 && cpu_has_xmm2)
18321 n = __copy_user_intel_nocache(to, from, n);
18322 else
18323 - __copy_user(to, from, n);
18324 + __copy_user(to, from, n, __copyuser_seg, "", "");
18325 #else
18326 - __copy_user(to, from, n);
18327 + __copy_user(to, from, n, __copyuser_seg, "", "");
18328 #endif
18329 return n;
18330 }
18331 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
18332
18333 -/**
18334 - * copy_to_user: - Copy a block of data into user space.
18335 - * @to: Destination address, in user space.
18336 - * @from: Source address, in kernel space.
18337 - * @n: Number of bytes to copy.
18338 - *
18339 - * Context: User context only. This function may sleep.
18340 - *
18341 - * Copy data from kernel space to user space.
18342 - *
18343 - * Returns number of bytes that could not be copied.
18344 - * On success, this will be zero.
18345 - */
18346 -unsigned long
18347 -copy_to_user(void __user *to, const void *from, unsigned long n)
18348 +void copy_from_user_overflow(void)
18349 {
18350 - if (access_ok(VERIFY_WRITE, to, n))
18351 - n = __copy_to_user(to, from, n);
18352 - return n;
18353 + WARN(1, "Buffer overflow detected!\n");
18354 }
18355 -EXPORT_SYMBOL(copy_to_user);
18356 +EXPORT_SYMBOL(copy_from_user_overflow);
18357
18358 -/**
18359 - * copy_from_user: - Copy a block of data from user space.
18360 - * @to: Destination address, in kernel space.
18361 - * @from: Source address, in user space.
18362 - * @n: Number of bytes to copy.
18363 - *
18364 - * Context: User context only. This function may sleep.
18365 - *
18366 - * Copy data from user space to kernel space.
18367 - *
18368 - * Returns number of bytes that could not be copied.
18369 - * On success, this will be zero.
18370 - *
18371 - * If some data could not be copied, this function will pad the copied
18372 - * data to the requested size using zero bytes.
18373 - */
18374 -unsigned long
18375 -_copy_from_user(void *to, const void __user *from, unsigned long n)
18376 +void copy_to_user_overflow(void)
18377 {
18378 - if (access_ok(VERIFY_READ, from, n))
18379 - n = __copy_from_user(to, from, n);
18380 - else
18381 - memset(to, 0, n);
18382 - return n;
18383 + WARN(1, "Buffer overflow detected!\n");
18384 }
18385 -EXPORT_SYMBOL(_copy_from_user);
18386 +EXPORT_SYMBOL(copy_to_user_overflow);
18387
18388 -void copy_from_user_overflow(void)
18389 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18390 +void __set_fs(mm_segment_t x)
18391 {
18392 - WARN(1, "Buffer overflow detected!\n");
18393 + switch (x.seg) {
18394 + case 0:
18395 + loadsegment(gs, 0);
18396 + break;
18397 + case TASK_SIZE_MAX:
18398 + loadsegment(gs, __USER_DS);
18399 + break;
18400 + case -1UL:
18401 + loadsegment(gs, __KERNEL_DS);
18402 + break;
18403 + default:
18404 + BUG();
18405 + }
18406 + return;
18407 }
18408 -EXPORT_SYMBOL(copy_from_user_overflow);
18409 +EXPORT_SYMBOL(__set_fs);
18410 +
18411 +void set_fs(mm_segment_t x)
18412 +{
18413 + current_thread_info()->addr_limit = x;
18414 + __set_fs(x);
18415 +}
18416 +EXPORT_SYMBOL(set_fs);
18417 +#endif
18418 diff -urNp linux-2.6.39.4/arch/x86/lib/usercopy_64.c linux-2.6.39.4/arch/x86/lib/usercopy_64.c
18419 --- linux-2.6.39.4/arch/x86/lib/usercopy_64.c 2011-05-19 00:06:34.000000000 -0400
18420 +++ linux-2.6.39.4/arch/x86/lib/usercopy_64.c 2011-08-05 19:44:35.000000000 -0400
18421 @@ -42,6 +42,12 @@ long
18422 __strncpy_from_user(char *dst, const char __user *src, long count)
18423 {
18424 long res;
18425 +
18426 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18427 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18428 + src += PAX_USER_SHADOW_BASE;
18429 +#endif
18430 +
18431 __do_strncpy_from_user(dst, src, count, res);
18432 return res;
18433 }
18434 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
18435 {
18436 long __d0;
18437 might_fault();
18438 +
18439 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18440 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
18441 + addr += PAX_USER_SHADOW_BASE;
18442 +#endif
18443 +
18444 /* no memory constraint because it doesn't change any memory gcc knows
18445 about */
18446 asm volatile(
18447 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
18448
18449 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
18450 {
18451 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18452 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18453 +
18454 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18455 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
18456 + to += PAX_USER_SHADOW_BASE;
18457 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
18458 + from += PAX_USER_SHADOW_BASE;
18459 +#endif
18460 +
18461 return copy_user_generic((__force void *)to, (__force void *)from, len);
18462 - }
18463 - return len;
18464 + }
18465 + return len;
18466 }
18467 EXPORT_SYMBOL(copy_in_user);
18468
18469 diff -urNp linux-2.6.39.4/arch/x86/Makefile linux-2.6.39.4/arch/x86/Makefile
18470 --- linux-2.6.39.4/arch/x86/Makefile 2011-05-19 00:06:34.000000000 -0400
18471 +++ linux-2.6.39.4/arch/x86/Makefile 2011-08-05 19:44:35.000000000 -0400
18472 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
18473 else
18474 BITS := 64
18475 UTS_MACHINE := x86_64
18476 + biarch := $(call cc-option,-m64)
18477 CHECKFLAGS += -D__x86_64__ -m64
18478
18479 KBUILD_AFLAGS += -m64
18480 @@ -195,3 +196,12 @@ define archhelp
18481 echo ' FDARGS="..." arguments for the booted kernel'
18482 echo ' FDINITRD=file initrd for the booted kernel'
18483 endef
18484 +
18485 +define OLD_LD
18486 +
18487 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
18488 +*** Please upgrade your binutils to 2.18 or newer
18489 +endef
18490 +
18491 +archprepare:
18492 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
18493 diff -urNp linux-2.6.39.4/arch/x86/mm/extable.c linux-2.6.39.4/arch/x86/mm/extable.c
18494 --- linux-2.6.39.4/arch/x86/mm/extable.c 2011-05-19 00:06:34.000000000 -0400
18495 +++ linux-2.6.39.4/arch/x86/mm/extable.c 2011-08-05 19:44:35.000000000 -0400
18496 @@ -1,14 +1,71 @@
18497 #include <linux/module.h>
18498 #include <linux/spinlock.h>
18499 +#include <linux/sort.h>
18500 #include <asm/uaccess.h>
18501 +#include <asm/pgtable.h>
18502
18503 +/*
18504 + * The exception table needs to be sorted so that the binary
18505 + * search that we use to find entries in it works properly.
18506 + * This is used both for the kernel exception table and for
18507 + * the exception tables of modules that get loaded.
18508 + */
18509 +static int cmp_ex(const void *a, const void *b)
18510 +{
18511 + const struct exception_table_entry *x = a, *y = b;
18512 +
18513 + /* avoid overflow */
18514 + if (x->insn > y->insn)
18515 + return 1;
18516 + if (x->insn < y->insn)
18517 + return -1;
18518 + return 0;
18519 +}
18520 +
18521 +static void swap_ex(void *a, void *b, int size)
18522 +{
18523 + struct exception_table_entry t, *x = a, *y = b;
18524 +
18525 + t = *x;
18526 +
18527 + pax_open_kernel();
18528 + *x = *y;
18529 + *y = t;
18530 + pax_close_kernel();
18531 +}
18532 +
18533 +void sort_extable(struct exception_table_entry *start,
18534 + struct exception_table_entry *finish)
18535 +{
18536 + sort(start, finish - start, sizeof(struct exception_table_entry),
18537 + cmp_ex, swap_ex);
18538 +}
18539 +
18540 +#ifdef CONFIG_MODULES
18541 +/*
18542 + * If the exception table is sorted, any referring to the module init
18543 + * will be at the beginning or the end.
18544 + */
18545 +void trim_init_extable(struct module *m)
18546 +{
18547 + /*trim the beginning*/
18548 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
18549 + m->extable++;
18550 + m->num_exentries--;
18551 + }
18552 + /*trim the end*/
18553 + while (m->num_exentries &&
18554 + within_module_init(m->extable[m->num_exentries-1].insn, m))
18555 + m->num_exentries--;
18556 +}
18557 +#endif /* CONFIG_MODULES */
18558
18559 int fixup_exception(struct pt_regs *regs)
18560 {
18561 const struct exception_table_entry *fixup;
18562
18563 #ifdef CONFIG_PNPBIOS
18564 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
18565 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
18566 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
18567 extern u32 pnp_bios_is_utter_crap;
18568 pnp_bios_is_utter_crap = 1;
18569 diff -urNp linux-2.6.39.4/arch/x86/mm/fault.c linux-2.6.39.4/arch/x86/mm/fault.c
18570 --- linux-2.6.39.4/arch/x86/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
18571 +++ linux-2.6.39.4/arch/x86/mm/fault.c 2011-08-17 20:06:06.000000000 -0400
18572 @@ -12,10 +12,18 @@
18573 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
18574 #include <linux/perf_event.h> /* perf_sw_event */
18575 #include <linux/hugetlb.h> /* hstate_index_to_shift */
18576 +#include <linux/unistd.h>
18577 +#include <linux/compiler.h>
18578
18579 #include <asm/traps.h> /* dotraplinkage, ... */
18580 #include <asm/pgalloc.h> /* pgd_*(), ... */
18581 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
18582 +#include <asm/vsyscall.h>
18583 +#include <asm/tlbflush.h>
18584 +
18585 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18586 +#include <asm/stacktrace.h>
18587 +#endif
18588
18589 /*
18590 * Page fault error code bits:
18591 @@ -53,7 +61,7 @@ static inline int __kprobes notify_page_
18592 int ret = 0;
18593
18594 /* kprobe_running() needs smp_processor_id() */
18595 - if (kprobes_built_in() && !user_mode_vm(regs)) {
18596 + if (kprobes_built_in() && !user_mode(regs)) {
18597 preempt_disable();
18598 if (kprobe_running() && kprobe_fault_handler(regs, 14))
18599 ret = 1;
18600 @@ -114,7 +122,10 @@ check_prefetch_opcode(struct pt_regs *re
18601 return !instr_lo || (instr_lo>>1) == 1;
18602 case 0x00:
18603 /* Prefetch instruction is 0x0F0D or 0x0F18 */
18604 - if (probe_kernel_address(instr, opcode))
18605 + if (user_mode(regs)) {
18606 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18607 + return 0;
18608 + } else if (probe_kernel_address(instr, opcode))
18609 return 0;
18610
18611 *prefetch = (instr_lo == 0xF) &&
18612 @@ -148,7 +159,10 @@ is_prefetch(struct pt_regs *regs, unsign
18613 while (instr < max_instr) {
18614 unsigned char opcode;
18615
18616 - if (probe_kernel_address(instr, opcode))
18617 + if (user_mode(regs)) {
18618 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18619 + break;
18620 + } else if (probe_kernel_address(instr, opcode))
18621 break;
18622
18623 instr++;
18624 @@ -179,6 +193,30 @@ force_sig_info_fault(int si_signo, int s
18625 force_sig_info(si_signo, &info, tsk);
18626 }
18627
18628 +#ifdef CONFIG_PAX_EMUTRAMP
18629 +static int pax_handle_fetch_fault(struct pt_regs *regs);
18630 +#endif
18631 +
18632 +#ifdef CONFIG_PAX_PAGEEXEC
18633 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
18634 +{
18635 + pgd_t *pgd;
18636 + pud_t *pud;
18637 + pmd_t *pmd;
18638 +
18639 + pgd = pgd_offset(mm, address);
18640 + if (!pgd_present(*pgd))
18641 + return NULL;
18642 + pud = pud_offset(pgd, address);
18643 + if (!pud_present(*pud))
18644 + return NULL;
18645 + pmd = pmd_offset(pud, address);
18646 + if (!pmd_present(*pmd))
18647 + return NULL;
18648 + return pmd;
18649 +}
18650 +#endif
18651 +
18652 DEFINE_SPINLOCK(pgd_lock);
18653 LIST_HEAD(pgd_list);
18654
18655 @@ -229,10 +267,22 @@ void vmalloc_sync_all(void)
18656 for (address = VMALLOC_START & PMD_MASK;
18657 address >= TASK_SIZE && address < FIXADDR_TOP;
18658 address += PMD_SIZE) {
18659 +
18660 +#ifdef CONFIG_PAX_PER_CPU_PGD
18661 + unsigned long cpu;
18662 +#else
18663 struct page *page;
18664 +#endif
18665
18666 spin_lock(&pgd_lock);
18667 +
18668 +#ifdef CONFIG_PAX_PER_CPU_PGD
18669 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
18670 + pgd_t *pgd = get_cpu_pgd(cpu);
18671 + pmd_t *ret;
18672 +#else
18673 list_for_each_entry(page, &pgd_list, lru) {
18674 + pgd_t *pgd = page_address(page);
18675 spinlock_t *pgt_lock;
18676 pmd_t *ret;
18677
18678 @@ -240,8 +290,13 @@ void vmalloc_sync_all(void)
18679 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
18680
18681 spin_lock(pgt_lock);
18682 - ret = vmalloc_sync_one(page_address(page), address);
18683 +#endif
18684 +
18685 + ret = vmalloc_sync_one(pgd, address);
18686 +
18687 +#ifndef CONFIG_PAX_PER_CPU_PGD
18688 spin_unlock(pgt_lock);
18689 +#endif
18690
18691 if (!ret)
18692 break;
18693 @@ -275,6 +330,11 @@ static noinline __kprobes int vmalloc_fa
18694 * an interrupt in the middle of a task switch..
18695 */
18696 pgd_paddr = read_cr3();
18697 +
18698 +#ifdef CONFIG_PAX_PER_CPU_PGD
18699 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
18700 +#endif
18701 +
18702 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
18703 if (!pmd_k)
18704 return -1;
18705 @@ -370,7 +430,14 @@ static noinline __kprobes int vmalloc_fa
18706 * happen within a race in page table update. In the later
18707 * case just flush:
18708 */
18709 +
18710 +#ifdef CONFIG_PAX_PER_CPU_PGD
18711 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
18712 + pgd = pgd_offset_cpu(smp_processor_id(), address);
18713 +#else
18714 pgd = pgd_offset(current->active_mm, address);
18715 +#endif
18716 +
18717 pgd_ref = pgd_offset_k(address);
18718 if (pgd_none(*pgd_ref))
18719 return -1;
18720 @@ -532,7 +599,7 @@ static int is_errata93(struct pt_regs *r
18721 static int is_errata100(struct pt_regs *regs, unsigned long address)
18722 {
18723 #ifdef CONFIG_X86_64
18724 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
18725 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
18726 return 1;
18727 #endif
18728 return 0;
18729 @@ -559,7 +626,7 @@ static int is_f00f_bug(struct pt_regs *r
18730 }
18731
18732 static const char nx_warning[] = KERN_CRIT
18733 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
18734 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
18735
18736 static void
18737 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
18738 @@ -568,15 +635,26 @@ show_fault_oops(struct pt_regs *regs, un
18739 if (!oops_may_print())
18740 return;
18741
18742 - if (error_code & PF_INSTR) {
18743 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
18744 unsigned int level;
18745
18746 pte_t *pte = lookup_address(address, &level);
18747
18748 if (pte && pte_present(*pte) && !pte_exec(*pte))
18749 - printk(nx_warning, current_uid());
18750 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
18751 }
18752
18753 +#ifdef CONFIG_PAX_KERNEXEC
18754 + if (init_mm.start_code <= address && address < init_mm.end_code) {
18755 + if (current->signal->curr_ip)
18756 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18757 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
18758 + else
18759 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18760 + current->comm, task_pid_nr(current), current_uid(), current_euid());
18761 + }
18762 +#endif
18763 +
18764 printk(KERN_ALERT "BUG: unable to handle kernel ");
18765 if (address < PAGE_SIZE)
18766 printk(KERN_CONT "NULL pointer dereference");
18767 @@ -701,6 +779,70 @@ __bad_area_nosemaphore(struct pt_regs *r
18768 unsigned long address, int si_code)
18769 {
18770 struct task_struct *tsk = current;
18771 +#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18772 + struct mm_struct *mm = tsk->mm;
18773 +#endif
18774 +
18775 +#ifdef CONFIG_X86_64
18776 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
18777 + if (regs->ip == (unsigned long)vgettimeofday) {
18778 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
18779 + return;
18780 + } else if (regs->ip == (unsigned long)vtime) {
18781 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
18782 + return;
18783 + } else if (regs->ip == (unsigned long)vgetcpu) {
18784 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
18785 + return;
18786 + }
18787 + }
18788 +#endif
18789 +
18790 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18791 + if (mm && (error_code & PF_USER)) {
18792 + unsigned long ip = regs->ip;
18793 +
18794 + if (v8086_mode(regs))
18795 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
18796 +
18797 + /*
18798 + * It's possible to have interrupts off here:
18799 + */
18800 + local_irq_enable();
18801 +
18802 +#ifdef CONFIG_PAX_PAGEEXEC
18803 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
18804 + (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
18805 +
18806 +#ifdef CONFIG_PAX_EMUTRAMP
18807 + switch (pax_handle_fetch_fault(regs)) {
18808 + case 2:
18809 + return;
18810 + }
18811 +#endif
18812 +
18813 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18814 + do_group_exit(SIGKILL);
18815 + }
18816 +#endif
18817 +
18818 +#ifdef CONFIG_PAX_SEGMEXEC
18819 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
18820 +
18821 +#ifdef CONFIG_PAX_EMUTRAMP
18822 + switch (pax_handle_fetch_fault(regs)) {
18823 + case 2:
18824 + return;
18825 + }
18826 +#endif
18827 +
18828 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18829 + do_group_exit(SIGKILL);
18830 + }
18831 +#endif
18832 +
18833 + }
18834 +#endif
18835
18836 /* User mode accesses just cause a SIGSEGV */
18837 if (error_code & PF_USER) {
18838 @@ -855,6 +997,99 @@ static int spurious_fault_check(unsigned
18839 return 1;
18840 }
18841
18842 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18843 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
18844 +{
18845 + pte_t *pte;
18846 + pmd_t *pmd;
18847 + spinlock_t *ptl;
18848 + unsigned char pte_mask;
18849 +
18850 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
18851 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
18852 + return 0;
18853 +
18854 + /* PaX: it's our fault, let's handle it if we can */
18855 +
18856 + /* PaX: take a look at read faults before acquiring any locks */
18857 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
18858 + /* instruction fetch attempt from a protected page in user mode */
18859 + up_read(&mm->mmap_sem);
18860 +
18861 +#ifdef CONFIG_PAX_EMUTRAMP
18862 + switch (pax_handle_fetch_fault(regs)) {
18863 + case 2:
18864 + return 1;
18865 + }
18866 +#endif
18867 +
18868 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
18869 + do_group_exit(SIGKILL);
18870 + }
18871 +
18872 + pmd = pax_get_pmd(mm, address);
18873 + if (unlikely(!pmd))
18874 + return 0;
18875 +
18876 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
18877 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
18878 + pte_unmap_unlock(pte, ptl);
18879 + return 0;
18880 + }
18881 +
18882 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
18883 + /* write attempt to a protected page in user mode */
18884 + pte_unmap_unlock(pte, ptl);
18885 + return 0;
18886 + }
18887 +
18888 +#ifdef CONFIG_SMP
18889 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
18890 +#else
18891 + if (likely(address > get_limit(regs->cs)))
18892 +#endif
18893 + {
18894 + set_pte(pte, pte_mkread(*pte));
18895 + __flush_tlb_one(address);
18896 + pte_unmap_unlock(pte, ptl);
18897 + up_read(&mm->mmap_sem);
18898 + return 1;
18899 + }
18900 +
18901 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
18902 +
18903 + /*
18904 + * PaX: fill DTLB with user rights and retry
18905 + */
18906 + __asm__ __volatile__ (
18907 + "orb %2,(%1)\n"
18908 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
18909 +/*
18910 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
18911 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
18912 + * page fault when examined during a TLB load attempt. this is true not only
18913 + * for PTEs holding a non-present entry but also present entries that will
18914 + * raise a page fault (such as those set up by PaX, or the copy-on-write
18915 + * mechanism). in effect it means that we do *not* need to flush the TLBs
18916 + * for our target pages since their PTEs are simply not in the TLBs at all.
18917 +
18918 + * the best thing in omitting it is that we gain around 15-20% speed in the
18919 + * fast path of the page fault handler and can get rid of tracing since we
18920 + * can no longer flush unintended entries.
18921 + */
18922 + "invlpg (%0)\n"
18923 +#endif
18924 + __copyuser_seg"testb $0,(%0)\n"
18925 + "xorb %3,(%1)\n"
18926 + :
18927 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
18928 + : "memory", "cc");
18929 + pte_unmap_unlock(pte, ptl);
18930 + up_read(&mm->mmap_sem);
18931 + return 1;
18932 +}
18933 +#endif
18934 +
18935 /*
18936 * Handle a spurious fault caused by a stale TLB entry.
18937 *
18938 @@ -927,6 +1162,9 @@ int show_unhandled_signals = 1;
18939 static inline int
18940 access_error(unsigned long error_code, struct vm_area_struct *vma)
18941 {
18942 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
18943 + return 1;
18944 +
18945 if (error_code & PF_WRITE) {
18946 /* write, present and write, not present: */
18947 if (unlikely(!(vma->vm_flags & VM_WRITE)))
18948 @@ -960,19 +1198,33 @@ do_page_fault(struct pt_regs *regs, unsi
18949 {
18950 struct vm_area_struct *vma;
18951 struct task_struct *tsk;
18952 - unsigned long address;
18953 struct mm_struct *mm;
18954 int fault;
18955 int write = error_code & PF_WRITE;
18956 unsigned int flags = FAULT_FLAG_ALLOW_RETRY |
18957 (write ? FAULT_FLAG_WRITE : 0);
18958
18959 + /* Get the faulting address: */
18960 + unsigned long address = read_cr2();
18961 +
18962 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18963 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
18964 + if (!search_exception_tables(regs->ip)) {
18965 + bad_area_nosemaphore(regs, error_code, address);
18966 + return;
18967 + }
18968 + if (address < PAX_USER_SHADOW_BASE) {
18969 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
18970 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
18971 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
18972 + } else
18973 + address -= PAX_USER_SHADOW_BASE;
18974 + }
18975 +#endif
18976 +
18977 tsk = current;
18978 mm = tsk->mm;
18979
18980 - /* Get the faulting address: */
18981 - address = read_cr2();
18982 -
18983 /*
18984 * Detect and handle instructions that would cause a page fault for
18985 * both a tracked kernel page and a userspace page.
18986 @@ -1032,7 +1284,7 @@ do_page_fault(struct pt_regs *regs, unsi
18987 * User-mode registers count as a user access even for any
18988 * potential system fault or CPU buglet:
18989 */
18990 - if (user_mode_vm(regs)) {
18991 + if (user_mode(regs)) {
18992 local_irq_enable();
18993 error_code |= PF_USER;
18994 } else {
18995 @@ -1087,6 +1339,11 @@ retry:
18996 might_sleep();
18997 }
18998
18999 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
19000 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
19001 + return;
19002 +#endif
19003 +
19004 vma = find_vma(mm, address);
19005 if (unlikely(!vma)) {
19006 bad_area(regs, error_code, address);
19007 @@ -1098,18 +1355,24 @@ retry:
19008 bad_area(regs, error_code, address);
19009 return;
19010 }
19011 - if (error_code & PF_USER) {
19012 - /*
19013 - * Accessing the stack below %sp is always a bug.
19014 - * The large cushion allows instructions like enter
19015 - * and pusha to work. ("enter $65535, $31" pushes
19016 - * 32 pointers and then decrements %sp by 65535.)
19017 - */
19018 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
19019 - bad_area(regs, error_code, address);
19020 - return;
19021 - }
19022 + /*
19023 + * Accessing the stack below %sp is always a bug.
19024 + * The large cushion allows instructions like enter
19025 + * and pusha to work. ("enter $65535, $31" pushes
19026 + * 32 pointers and then decrements %sp by 65535.)
19027 + */
19028 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
19029 + bad_area(regs, error_code, address);
19030 + return;
19031 + }
19032 +
19033 +#ifdef CONFIG_PAX_SEGMEXEC
19034 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
19035 + bad_area(regs, error_code, address);
19036 + return;
19037 }
19038 +#endif
19039 +
19040 if (unlikely(expand_stack(vma, address))) {
19041 bad_area(regs, error_code, address);
19042 return;
19043 @@ -1164,3 +1427,199 @@ good_area:
19044
19045 up_read(&mm->mmap_sem);
19046 }
19047 +
19048 +#ifdef CONFIG_PAX_EMUTRAMP
19049 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
19050 +{
19051 + int err;
19052 +
19053 + do { /* PaX: gcc trampoline emulation #1 */
19054 + unsigned char mov1, mov2;
19055 + unsigned short jmp;
19056 + unsigned int addr1, addr2;
19057 +
19058 +#ifdef CONFIG_X86_64
19059 + if ((regs->ip + 11) >> 32)
19060 + break;
19061 +#endif
19062 +
19063 + err = get_user(mov1, (unsigned char __user *)regs->ip);
19064 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19065 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
19066 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19067 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
19068 +
19069 + if (err)
19070 + break;
19071 +
19072 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
19073 + regs->cx = addr1;
19074 + regs->ax = addr2;
19075 + regs->ip = addr2;
19076 + return 2;
19077 + }
19078 + } while (0);
19079 +
19080 + do { /* PaX: gcc trampoline emulation #2 */
19081 + unsigned char mov, jmp;
19082 + unsigned int addr1, addr2;
19083 +
19084 +#ifdef CONFIG_X86_64
19085 + if ((regs->ip + 9) >> 32)
19086 + break;
19087 +#endif
19088 +
19089 + err = get_user(mov, (unsigned char __user *)regs->ip);
19090 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19091 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
19092 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19093 +
19094 + if (err)
19095 + break;
19096 +
19097 + if (mov == 0xB9 && jmp == 0xE9) {
19098 + regs->cx = addr1;
19099 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
19100 + return 2;
19101 + }
19102 + } while (0);
19103 +
19104 + return 1; /* PaX in action */
19105 +}
19106 +
19107 +#ifdef CONFIG_X86_64
19108 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
19109 +{
19110 + int err;
19111 +
19112 + do { /* PaX: gcc trampoline emulation #1 */
19113 + unsigned short mov1, mov2, jmp1;
19114 + unsigned char jmp2;
19115 + unsigned int addr1;
19116 + unsigned long addr2;
19117 +
19118 + err = get_user(mov1, (unsigned short __user *)regs->ip);
19119 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
19120 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
19121 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
19122 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
19123 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
19124 +
19125 + if (err)
19126 + break;
19127 +
19128 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19129 + regs->r11 = addr1;
19130 + regs->r10 = addr2;
19131 + regs->ip = addr1;
19132 + return 2;
19133 + }
19134 + } while (0);
19135 +
19136 + do { /* PaX: gcc trampoline emulation #2 */
19137 + unsigned short mov1, mov2, jmp1;
19138 + unsigned char jmp2;
19139 + unsigned long addr1, addr2;
19140 +
19141 + err = get_user(mov1, (unsigned short __user *)regs->ip);
19142 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
19143 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
19144 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
19145 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
19146 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
19147 +
19148 + if (err)
19149 + break;
19150 +
19151 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19152 + regs->r11 = addr1;
19153 + regs->r10 = addr2;
19154 + regs->ip = addr1;
19155 + return 2;
19156 + }
19157 + } while (0);
19158 +
19159 + return 1; /* PaX in action */
19160 +}
19161 +#endif
19162 +
19163 +/*
19164 + * PaX: decide what to do with offenders (regs->ip = fault address)
19165 + *
19166 + * returns 1 when task should be killed
19167 + * 2 when gcc trampoline was detected
19168 + */
19169 +static int pax_handle_fetch_fault(struct pt_regs *regs)
19170 +{
19171 + if (v8086_mode(regs))
19172 + return 1;
19173 +
19174 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
19175 + return 1;
19176 +
19177 +#ifdef CONFIG_X86_32
19178 + return pax_handle_fetch_fault_32(regs);
19179 +#else
19180 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
19181 + return pax_handle_fetch_fault_32(regs);
19182 + else
19183 + return pax_handle_fetch_fault_64(regs);
19184 +#endif
19185 +}
19186 +#endif
19187 +
19188 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19189 +void pax_report_insns(void *pc, void *sp)
19190 +{
19191 + long i;
19192 +
19193 + printk(KERN_ERR "PAX: bytes at PC: ");
19194 + for (i = 0; i < 20; i++) {
19195 + unsigned char c;
19196 + if (get_user(c, (__force unsigned char __user *)pc+i))
19197 + printk(KERN_CONT "?? ");
19198 + else
19199 + printk(KERN_CONT "%02x ", c);
19200 + }
19201 + printk("\n");
19202 +
19203 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
19204 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
19205 + unsigned long c;
19206 + if (get_user(c, (__force unsigned long __user *)sp+i))
19207 +#ifdef CONFIG_X86_32
19208 + printk(KERN_CONT "???????? ");
19209 +#else
19210 + printk(KERN_CONT "???????????????? ");
19211 +#endif
19212 + else
19213 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
19214 + }
19215 + printk("\n");
19216 +}
19217 +#endif
19218 +
19219 +/**
19220 + * probe_kernel_write(): safely attempt to write to a location
19221 + * @dst: address to write to
19222 + * @src: pointer to the data that shall be written
19223 + * @size: size of the data chunk
19224 + *
19225 + * Safely write to address @dst from the buffer at @src. If a kernel fault
19226 + * happens, handle that and return -EFAULT.
19227 + */
19228 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
19229 +{
19230 + long ret;
19231 + mm_segment_t old_fs = get_fs();
19232 +
19233 + set_fs(KERNEL_DS);
19234 + pagefault_disable();
19235 + pax_open_kernel();
19236 + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
19237 + pax_close_kernel();
19238 + pagefault_enable();
19239 + set_fs(old_fs);
19240 +
19241 + return ret ? -EFAULT : 0;
19242 +}
19243 diff -urNp linux-2.6.39.4/arch/x86/mm/gup.c linux-2.6.39.4/arch/x86/mm/gup.c
19244 --- linux-2.6.39.4/arch/x86/mm/gup.c 2011-05-19 00:06:34.000000000 -0400
19245 +++ linux-2.6.39.4/arch/x86/mm/gup.c 2011-08-05 19:44:35.000000000 -0400
19246 @@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long
19247 addr = start;
19248 len = (unsigned long) nr_pages << PAGE_SHIFT;
19249 end = start + len;
19250 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19251 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19252 (void __user *)start, len)))
19253 return 0;
19254
19255 diff -urNp linux-2.6.39.4/arch/x86/mm/highmem_32.c linux-2.6.39.4/arch/x86/mm/highmem_32.c
19256 --- linux-2.6.39.4/arch/x86/mm/highmem_32.c 2011-05-19 00:06:34.000000000 -0400
19257 +++ linux-2.6.39.4/arch/x86/mm/highmem_32.c 2011-08-05 19:44:35.000000000 -0400
19258 @@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
19259 idx = type + KM_TYPE_NR*smp_processor_id();
19260 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19261 BUG_ON(!pte_none(*(kmap_pte-idx)));
19262 +
19263 + pax_open_kernel();
19264 set_pte(kmap_pte-idx, mk_pte(page, prot));
19265 + pax_close_kernel();
19266
19267 return (void *)vaddr;
19268 }
19269 diff -urNp linux-2.6.39.4/arch/x86/mm/hugetlbpage.c linux-2.6.39.4/arch/x86/mm/hugetlbpage.c
19270 --- linux-2.6.39.4/arch/x86/mm/hugetlbpage.c 2011-05-19 00:06:34.000000000 -0400
19271 +++ linux-2.6.39.4/arch/x86/mm/hugetlbpage.c 2011-08-05 19:44:35.000000000 -0400
19272 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
19273 struct hstate *h = hstate_file(file);
19274 struct mm_struct *mm = current->mm;
19275 struct vm_area_struct *vma;
19276 - unsigned long start_addr;
19277 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19278 +
19279 +#ifdef CONFIG_PAX_SEGMEXEC
19280 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19281 + pax_task_size = SEGMEXEC_TASK_SIZE;
19282 +#endif
19283 +
19284 + pax_task_size -= PAGE_SIZE;
19285
19286 if (len > mm->cached_hole_size) {
19287 - start_addr = mm->free_area_cache;
19288 + start_addr = mm->free_area_cache;
19289 } else {
19290 - start_addr = TASK_UNMAPPED_BASE;
19291 - mm->cached_hole_size = 0;
19292 + start_addr = mm->mmap_base;
19293 + mm->cached_hole_size = 0;
19294 }
19295
19296 full_search:
19297 @@ -280,26 +287,27 @@ full_search:
19298
19299 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19300 /* At this point: (!vma || addr < vma->vm_end). */
19301 - if (TASK_SIZE - len < addr) {
19302 + if (pax_task_size - len < addr) {
19303 /*
19304 * Start a new search - just in case we missed
19305 * some holes.
19306 */
19307 - if (start_addr != TASK_UNMAPPED_BASE) {
19308 - start_addr = TASK_UNMAPPED_BASE;
19309 + if (start_addr != mm->mmap_base) {
19310 + start_addr = mm->mmap_base;
19311 mm->cached_hole_size = 0;
19312 goto full_search;
19313 }
19314 return -ENOMEM;
19315 }
19316 - if (!vma || addr + len <= vma->vm_start) {
19317 - mm->free_area_cache = addr + len;
19318 - return addr;
19319 - }
19320 + if (check_heap_stack_gap(vma, addr, len))
19321 + break;
19322 if (addr + mm->cached_hole_size < vma->vm_start)
19323 mm->cached_hole_size = vma->vm_start - addr;
19324 addr = ALIGN(vma->vm_end, huge_page_size(h));
19325 }
19326 +
19327 + mm->free_area_cache = addr + len;
19328 + return addr;
19329 }
19330
19331 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
19332 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
19333 {
19334 struct hstate *h = hstate_file(file);
19335 struct mm_struct *mm = current->mm;
19336 - struct vm_area_struct *vma, *prev_vma;
19337 - unsigned long base = mm->mmap_base, addr = addr0;
19338 + struct vm_area_struct *vma;
19339 + unsigned long base = mm->mmap_base, addr;
19340 unsigned long largest_hole = mm->cached_hole_size;
19341 - int first_time = 1;
19342
19343 /* don't allow allocations above current base */
19344 if (mm->free_area_cache > base)
19345 @@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
19346 largest_hole = 0;
19347 mm->free_area_cache = base;
19348 }
19349 -try_again:
19350 +
19351 /* make sure it can fit in the remaining address space */
19352 if (mm->free_area_cache < len)
19353 goto fail;
19354
19355 /* either no address requested or can't fit in requested address hole */
19356 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
19357 + addr = (mm->free_area_cache - len);
19358 do {
19359 + addr &= huge_page_mask(h);
19360 + vma = find_vma(mm, addr);
19361 /*
19362 * Lookup failure means no vma is above this address,
19363 * i.e. return with success:
19364 - */
19365 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
19366 - return addr;
19367 -
19368 - /*
19369 * new region fits between prev_vma->vm_end and
19370 * vma->vm_start, use it:
19371 */
19372 - if (addr + len <= vma->vm_start &&
19373 - (!prev_vma || (addr >= prev_vma->vm_end))) {
19374 + if (check_heap_stack_gap(vma, addr, len)) {
19375 /* remember the address as a hint for next time */
19376 - mm->cached_hole_size = largest_hole;
19377 - return (mm->free_area_cache = addr);
19378 - } else {
19379 - /* pull free_area_cache down to the first hole */
19380 - if (mm->free_area_cache == vma->vm_end) {
19381 - mm->free_area_cache = vma->vm_start;
19382 - mm->cached_hole_size = largest_hole;
19383 - }
19384 + mm->cached_hole_size = largest_hole;
19385 + return (mm->free_area_cache = addr);
19386 + }
19387 + /* pull free_area_cache down to the first hole */
19388 + if (mm->free_area_cache == vma->vm_end) {
19389 + mm->free_area_cache = vma->vm_start;
19390 + mm->cached_hole_size = largest_hole;
19391 }
19392
19393 /* remember the largest hole we saw so far */
19394 if (addr + largest_hole < vma->vm_start)
19395 - largest_hole = vma->vm_start - addr;
19396 + largest_hole = vma->vm_start - addr;
19397
19398 /* try just below the current vma->vm_start */
19399 - addr = (vma->vm_start - len) & huge_page_mask(h);
19400 - } while (len <= vma->vm_start);
19401 + addr = skip_heap_stack_gap(vma, len);
19402 + } while (!IS_ERR_VALUE(addr));
19403
19404 fail:
19405 /*
19406 - * if hint left us with no space for the requested
19407 - * mapping then try again:
19408 - */
19409 - if (first_time) {
19410 - mm->free_area_cache = base;
19411 - largest_hole = 0;
19412 - first_time = 0;
19413 - goto try_again;
19414 - }
19415 - /*
19416 * A failed mmap() very likely causes application failure,
19417 * so fall back to the bottom-up function here. This scenario
19418 * can happen with large stack limits and large mmap()
19419 * allocations.
19420 */
19421 - mm->free_area_cache = TASK_UNMAPPED_BASE;
19422 +
19423 +#ifdef CONFIG_PAX_SEGMEXEC
19424 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19425 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19426 + else
19427 +#endif
19428 +
19429 + mm->mmap_base = TASK_UNMAPPED_BASE;
19430 +
19431 +#ifdef CONFIG_PAX_RANDMMAP
19432 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19433 + mm->mmap_base += mm->delta_mmap;
19434 +#endif
19435 +
19436 + mm->free_area_cache = mm->mmap_base;
19437 mm->cached_hole_size = ~0UL;
19438 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
19439 len, pgoff, flags);
19440 @@ -386,6 +392,7 @@ fail:
19441 /*
19442 * Restore the topdown base:
19443 */
19444 + mm->mmap_base = base;
19445 mm->free_area_cache = base;
19446 mm->cached_hole_size = ~0UL;
19447
19448 @@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
19449 struct hstate *h = hstate_file(file);
19450 struct mm_struct *mm = current->mm;
19451 struct vm_area_struct *vma;
19452 + unsigned long pax_task_size = TASK_SIZE;
19453
19454 if (len & ~huge_page_mask(h))
19455 return -EINVAL;
19456 - if (len > TASK_SIZE)
19457 +
19458 +#ifdef CONFIG_PAX_SEGMEXEC
19459 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19460 + pax_task_size = SEGMEXEC_TASK_SIZE;
19461 +#endif
19462 +
19463 + pax_task_size -= PAGE_SIZE;
19464 +
19465 + if (len > pax_task_size)
19466 return -ENOMEM;
19467
19468 if (flags & MAP_FIXED) {
19469 @@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
19470 if (addr) {
19471 addr = ALIGN(addr, huge_page_size(h));
19472 vma = find_vma(mm, addr);
19473 - if (TASK_SIZE - len >= addr &&
19474 - (!vma || addr + len <= vma->vm_start))
19475 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
19476 return addr;
19477 }
19478 if (mm->get_unmapped_area == arch_get_unmapped_area)
19479 diff -urNp linux-2.6.39.4/arch/x86/mm/init_32.c linux-2.6.39.4/arch/x86/mm/init_32.c
19480 --- linux-2.6.39.4/arch/x86/mm/init_32.c 2011-05-19 00:06:34.000000000 -0400
19481 +++ linux-2.6.39.4/arch/x86/mm/init_32.c 2011-08-05 19:44:35.000000000 -0400
19482 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
19483 }
19484
19485 /*
19486 - * Creates a middle page table and puts a pointer to it in the
19487 - * given global directory entry. This only returns the gd entry
19488 - * in non-PAE compilation mode, since the middle layer is folded.
19489 - */
19490 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
19491 -{
19492 - pud_t *pud;
19493 - pmd_t *pmd_table;
19494 -
19495 -#ifdef CONFIG_X86_PAE
19496 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
19497 - if (after_bootmem)
19498 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
19499 - else
19500 - pmd_table = (pmd_t *)alloc_low_page();
19501 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
19502 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
19503 - pud = pud_offset(pgd, 0);
19504 - BUG_ON(pmd_table != pmd_offset(pud, 0));
19505 -
19506 - return pmd_table;
19507 - }
19508 -#endif
19509 - pud = pud_offset(pgd, 0);
19510 - pmd_table = pmd_offset(pud, 0);
19511 -
19512 - return pmd_table;
19513 -}
19514 -
19515 -/*
19516 * Create a page table and place a pointer to it in a middle page
19517 * directory entry:
19518 */
19519 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
19520 page_table = (pte_t *)alloc_low_page();
19521
19522 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
19523 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19524 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
19525 +#else
19526 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
19527 +#endif
19528 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
19529 }
19530
19531 return pte_offset_kernel(pmd, 0);
19532 }
19533
19534 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
19535 +{
19536 + pud_t *pud;
19537 + pmd_t *pmd_table;
19538 +
19539 + pud = pud_offset(pgd, 0);
19540 + pmd_table = pmd_offset(pud, 0);
19541 +
19542 + return pmd_table;
19543 +}
19544 +
19545 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
19546 {
19547 int pgd_idx = pgd_index(vaddr);
19548 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
19549 int pgd_idx, pmd_idx;
19550 unsigned long vaddr;
19551 pgd_t *pgd;
19552 + pud_t *pud;
19553 pmd_t *pmd;
19554 pte_t *pte = NULL;
19555
19556 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
19557 pgd = pgd_base + pgd_idx;
19558
19559 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
19560 - pmd = one_md_table_init(pgd);
19561 - pmd = pmd + pmd_index(vaddr);
19562 + pud = pud_offset(pgd, vaddr);
19563 + pmd = pmd_offset(pud, vaddr);
19564 +
19565 +#ifdef CONFIG_X86_PAE
19566 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19567 +#endif
19568 +
19569 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
19570 pmd++, pmd_idx++) {
19571 pte = page_table_kmap_check(one_page_table_init(pmd),
19572 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
19573 }
19574 }
19575
19576 -static inline int is_kernel_text(unsigned long addr)
19577 +static inline int is_kernel_text(unsigned long start, unsigned long end)
19578 {
19579 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
19580 - return 1;
19581 - return 0;
19582 + if ((start > ktla_ktva((unsigned long)_etext) ||
19583 + end <= ktla_ktva((unsigned long)_stext)) &&
19584 + (start > ktla_ktva((unsigned long)_einittext) ||
19585 + end <= ktla_ktva((unsigned long)_sinittext)) &&
19586 +
19587 +#ifdef CONFIG_ACPI_SLEEP
19588 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
19589 +#endif
19590 +
19591 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
19592 + return 0;
19593 + return 1;
19594 }
19595
19596 /*
19597 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
19598 unsigned long last_map_addr = end;
19599 unsigned long start_pfn, end_pfn;
19600 pgd_t *pgd_base = swapper_pg_dir;
19601 - int pgd_idx, pmd_idx, pte_ofs;
19602 + unsigned int pgd_idx, pmd_idx, pte_ofs;
19603 unsigned long pfn;
19604 pgd_t *pgd;
19605 + pud_t *pud;
19606 pmd_t *pmd;
19607 pte_t *pte;
19608 unsigned pages_2m, pages_4k;
19609 @@ -281,8 +282,13 @@ repeat:
19610 pfn = start_pfn;
19611 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19612 pgd = pgd_base + pgd_idx;
19613 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
19614 - pmd = one_md_table_init(pgd);
19615 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
19616 + pud = pud_offset(pgd, 0);
19617 + pmd = pmd_offset(pud, 0);
19618 +
19619 +#ifdef CONFIG_X86_PAE
19620 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19621 +#endif
19622
19623 if (pfn >= end_pfn)
19624 continue;
19625 @@ -294,14 +300,13 @@ repeat:
19626 #endif
19627 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
19628 pmd++, pmd_idx++) {
19629 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
19630 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
19631
19632 /*
19633 * Map with big pages if possible, otherwise
19634 * create normal page tables:
19635 */
19636 if (use_pse) {
19637 - unsigned int addr2;
19638 pgprot_t prot = PAGE_KERNEL_LARGE;
19639 /*
19640 * first pass will use the same initial
19641 @@ -311,11 +316,7 @@ repeat:
19642 __pgprot(PTE_IDENT_ATTR |
19643 _PAGE_PSE);
19644
19645 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
19646 - PAGE_OFFSET + PAGE_SIZE-1;
19647 -
19648 - if (is_kernel_text(addr) ||
19649 - is_kernel_text(addr2))
19650 + if (is_kernel_text(address, address + PMD_SIZE))
19651 prot = PAGE_KERNEL_LARGE_EXEC;
19652
19653 pages_2m++;
19654 @@ -332,7 +333,7 @@ repeat:
19655 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19656 pte += pte_ofs;
19657 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
19658 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
19659 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
19660 pgprot_t prot = PAGE_KERNEL;
19661 /*
19662 * first pass will use the same initial
19663 @@ -340,7 +341,7 @@ repeat:
19664 */
19665 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
19666
19667 - if (is_kernel_text(addr))
19668 + if (is_kernel_text(address, address + PAGE_SIZE))
19669 prot = PAGE_KERNEL_EXEC;
19670
19671 pages_4k++;
19672 @@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
19673
19674 pud = pud_offset(pgd, va);
19675 pmd = pmd_offset(pud, va);
19676 - if (!pmd_present(*pmd))
19677 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
19678 break;
19679
19680 pte = pte_offset_kernel(pmd, va);
19681 @@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
19682
19683 static void __init pagetable_init(void)
19684 {
19685 - pgd_t *pgd_base = swapper_pg_dir;
19686 -
19687 - permanent_kmaps_init(pgd_base);
19688 + permanent_kmaps_init(swapper_pg_dir);
19689 }
19690
19691 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19692 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19693 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19694
19695 /* user-defined highmem size */
19696 @@ -754,6 +753,12 @@ void __init mem_init(void)
19697
19698 pci_iommu_alloc();
19699
19700 +#ifdef CONFIG_PAX_PER_CPU_PGD
19701 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19702 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19703 + KERNEL_PGD_PTRS);
19704 +#endif
19705 +
19706 #ifdef CONFIG_FLATMEM
19707 BUG_ON(!mem_map);
19708 #endif
19709 @@ -771,7 +776,7 @@ void __init mem_init(void)
19710 set_highmem_pages_init();
19711
19712 codesize = (unsigned long) &_etext - (unsigned long) &_text;
19713 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
19714 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
19715 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
19716
19717 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
19718 @@ -812,10 +817,10 @@ void __init mem_init(void)
19719 ((unsigned long)&__init_end -
19720 (unsigned long)&__init_begin) >> 10,
19721
19722 - (unsigned long)&_etext, (unsigned long)&_edata,
19723 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
19724 + (unsigned long)&_sdata, (unsigned long)&_edata,
19725 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
19726
19727 - (unsigned long)&_text, (unsigned long)&_etext,
19728 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
19729 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
19730
19731 /*
19732 @@ -893,6 +898,7 @@ void set_kernel_text_rw(void)
19733 if (!kernel_set_to_readonly)
19734 return;
19735
19736 + start = ktla_ktva(start);
19737 pr_debug("Set kernel text: %lx - %lx for read write\n",
19738 start, start+size);
19739
19740 @@ -907,6 +913,7 @@ void set_kernel_text_ro(void)
19741 if (!kernel_set_to_readonly)
19742 return;
19743
19744 + start = ktla_ktva(start);
19745 pr_debug("Set kernel text: %lx - %lx for read only\n",
19746 start, start+size);
19747
19748 @@ -935,6 +942,7 @@ void mark_rodata_ro(void)
19749 unsigned long start = PFN_ALIGN(_text);
19750 unsigned long size = PFN_ALIGN(_etext) - start;
19751
19752 + start = ktla_ktva(start);
19753 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
19754 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
19755 size >> 10);
19756 diff -urNp linux-2.6.39.4/arch/x86/mm/init_64.c linux-2.6.39.4/arch/x86/mm/init_64.c
19757 --- linux-2.6.39.4/arch/x86/mm/init_64.c 2011-05-19 00:06:34.000000000 -0400
19758 +++ linux-2.6.39.4/arch/x86/mm/init_64.c 2011-08-05 19:44:35.000000000 -0400
19759 @@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpa
19760 * around without checking the pgd every time.
19761 */
19762
19763 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
19764 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
19765 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19766
19767 int force_personality32;
19768 @@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long star
19769
19770 for (address = start; address <= end; address += PGDIR_SIZE) {
19771 const pgd_t *pgd_ref = pgd_offset_k(address);
19772 +
19773 +#ifdef CONFIG_PAX_PER_CPU_PGD
19774 + unsigned long cpu;
19775 +#else
19776 struct page *page;
19777 +#endif
19778
19779 if (pgd_none(*pgd_ref))
19780 continue;
19781
19782 spin_lock(&pgd_lock);
19783 +
19784 +#ifdef CONFIG_PAX_PER_CPU_PGD
19785 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19786 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
19787 +#else
19788 list_for_each_entry(page, &pgd_list, lru) {
19789 pgd_t *pgd;
19790 spinlock_t *pgt_lock;
19791 @@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long star
19792 /* the pgt_lock only for Xen */
19793 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19794 spin_lock(pgt_lock);
19795 +#endif
19796
19797 if (pgd_none(*pgd))
19798 set_pgd(pgd, *pgd_ref);
19799 @@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long star
19800 BUG_ON(pgd_page_vaddr(*pgd)
19801 != pgd_page_vaddr(*pgd_ref));
19802
19803 +#ifndef CONFIG_PAX_PER_CPU_PGD
19804 spin_unlock(pgt_lock);
19805 +#endif
19806 +
19807 }
19808 spin_unlock(&pgd_lock);
19809 }
19810 @@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
19811 pmd = fill_pmd(pud, vaddr);
19812 pte = fill_pte(pmd, vaddr);
19813
19814 + pax_open_kernel();
19815 set_pte(pte, new_pte);
19816 + pax_close_kernel();
19817
19818 /*
19819 * It's enough to flush this one mapping.
19820 @@ -261,14 +277,12 @@ static void __init __init_extra_mapping(
19821 pgd = pgd_offset_k((unsigned long)__va(phys));
19822 if (pgd_none(*pgd)) {
19823 pud = (pud_t *) spp_getpage();
19824 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
19825 - _PAGE_USER));
19826 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
19827 }
19828 pud = pud_offset(pgd, (unsigned long)__va(phys));
19829 if (pud_none(*pud)) {
19830 pmd = (pmd_t *) spp_getpage();
19831 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
19832 - _PAGE_USER));
19833 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
19834 }
19835 pmd = pmd_offset(pud, phys);
19836 BUG_ON(!pmd_none(*pmd));
19837 @@ -698,6 +712,12 @@ void __init mem_init(void)
19838
19839 pci_iommu_alloc();
19840
19841 +#ifdef CONFIG_PAX_PER_CPU_PGD
19842 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19843 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19844 + KERNEL_PGD_PTRS);
19845 +#endif
19846 +
19847 /* clear_bss() already clear the empty_zero_page */
19848
19849 reservedpages = 0;
19850 @@ -858,8 +878,8 @@ int kern_addr_valid(unsigned long addr)
19851 static struct vm_area_struct gate_vma = {
19852 .vm_start = VSYSCALL_START,
19853 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
19854 - .vm_page_prot = PAGE_READONLY_EXEC,
19855 - .vm_flags = VM_READ | VM_EXEC
19856 + .vm_page_prot = PAGE_READONLY,
19857 + .vm_flags = VM_READ
19858 };
19859
19860 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
19861 @@ -893,7 +913,7 @@ int in_gate_area_no_mm(unsigned long add
19862
19863 const char *arch_vma_name(struct vm_area_struct *vma)
19864 {
19865 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
19866 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
19867 return "[vdso]";
19868 if (vma == &gate_vma)
19869 return "[vsyscall]";
19870 diff -urNp linux-2.6.39.4/arch/x86/mm/init.c linux-2.6.39.4/arch/x86/mm/init.c
19871 --- linux-2.6.39.4/arch/x86/mm/init.c 2011-05-19 00:06:34.000000000 -0400
19872 +++ linux-2.6.39.4/arch/x86/mm/init.c 2011-08-05 19:44:35.000000000 -0400
19873 @@ -33,7 +33,7 @@ int direct_gbpages
19874 static void __init find_early_table_space(unsigned long end, int use_pse,
19875 int use_gbpages)
19876 {
19877 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
19878 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
19879 phys_addr_t base;
19880
19881 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
19882 @@ -315,12 +315,34 @@ unsigned long __init_refok init_memory_m
19883 */
19884 int devmem_is_allowed(unsigned long pagenr)
19885 {
19886 - if (pagenr <= 256)
19887 +#ifdef CONFIG_GRKERNSEC_KMEM
19888 + /* allow BDA */
19889 + if (!pagenr)
19890 + return 1;
19891 + /* allow EBDA */
19892 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
19893 + return 1;
19894 +#else
19895 + if (!pagenr)
19896 + return 1;
19897 +#ifdef CONFIG_VM86
19898 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
19899 + return 1;
19900 +#endif
19901 +#endif
19902 +
19903 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
19904 return 1;
19905 +#ifdef CONFIG_GRKERNSEC_KMEM
19906 + /* throw out everything else below 1MB */
19907 + if (pagenr <= 256)
19908 + return 0;
19909 +#endif
19910 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
19911 return 0;
19912 if (!page_is_ram(pagenr))
19913 return 1;
19914 +
19915 return 0;
19916 }
19917
19918 @@ -375,6 +397,86 @@ void free_init_pages(char *what, unsigne
19919
19920 void free_initmem(void)
19921 {
19922 +
19923 +#ifdef CONFIG_PAX_KERNEXEC
19924 +#ifdef CONFIG_X86_32
19925 + /* PaX: limit KERNEL_CS to actual size */
19926 + unsigned long addr, limit;
19927 + struct desc_struct d;
19928 + int cpu;
19929 +
19930 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
19931 + limit = (limit - 1UL) >> PAGE_SHIFT;
19932 +
19933 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
19934 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
19935 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
19936 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
19937 + }
19938 +
19939 + /* PaX: make KERNEL_CS read-only */
19940 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
19941 + if (!paravirt_enabled())
19942 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
19943 +/*
19944 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
19945 + pgd = pgd_offset_k(addr);
19946 + pud = pud_offset(pgd, addr);
19947 + pmd = pmd_offset(pud, addr);
19948 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19949 + }
19950 +*/
19951 +#ifdef CONFIG_X86_PAE
19952 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
19953 +/*
19954 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
19955 + pgd = pgd_offset_k(addr);
19956 + pud = pud_offset(pgd, addr);
19957 + pmd = pmd_offset(pud, addr);
19958 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19959 + }
19960 +*/
19961 +#endif
19962 +
19963 +#ifdef CONFIG_MODULES
19964 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
19965 +#endif
19966 +
19967 +#else
19968 + pgd_t *pgd;
19969 + pud_t *pud;
19970 + pmd_t *pmd;
19971 + unsigned long addr, end;
19972 +
19973 + /* PaX: make kernel code/rodata read-only, rest non-executable */
19974 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
19975 + pgd = pgd_offset_k(addr);
19976 + pud = pud_offset(pgd, addr);
19977 + pmd = pmd_offset(pud, addr);
19978 + if (!pmd_present(*pmd))
19979 + continue;
19980 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
19981 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19982 + else
19983 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19984 + }
19985 +
19986 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
19987 + end = addr + KERNEL_IMAGE_SIZE;
19988 + for (; addr < end; addr += PMD_SIZE) {
19989 + pgd = pgd_offset_k(addr);
19990 + pud = pud_offset(pgd, addr);
19991 + pmd = pmd_offset(pud, addr);
19992 + if (!pmd_present(*pmd))
19993 + continue;
19994 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
19995 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19996 + }
19997 +#endif
19998 +
19999 + flush_tlb_all();
20000 +#endif
20001 +
20002 free_init_pages("unused kernel memory",
20003 (unsigned long)(&__init_begin),
20004 (unsigned long)(&__init_end));
20005 diff -urNp linux-2.6.39.4/arch/x86/mm/iomap_32.c linux-2.6.39.4/arch/x86/mm/iomap_32.c
20006 --- linux-2.6.39.4/arch/x86/mm/iomap_32.c 2011-05-19 00:06:34.000000000 -0400
20007 +++ linux-2.6.39.4/arch/x86/mm/iomap_32.c 2011-08-05 19:44:35.000000000 -0400
20008 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
20009 type = kmap_atomic_idx_push();
20010 idx = type + KM_TYPE_NR * smp_processor_id();
20011 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
20012 +
20013 + pax_open_kernel();
20014 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
20015 + pax_close_kernel();
20016 +
20017 arch_flush_lazy_mmu_mode();
20018
20019 return (void *)vaddr;
20020 diff -urNp linux-2.6.39.4/arch/x86/mm/ioremap.c linux-2.6.39.4/arch/x86/mm/ioremap.c
20021 --- linux-2.6.39.4/arch/x86/mm/ioremap.c 2011-05-19 00:06:34.000000000 -0400
20022 +++ linux-2.6.39.4/arch/x86/mm/ioremap.c 2011-08-05 19:44:35.000000000 -0400
20023 @@ -104,7 +104,7 @@ static void __iomem *__ioremap_caller(re
20024 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
20025 int is_ram = page_is_ram(pfn);
20026
20027 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
20028 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
20029 return NULL;
20030 WARN_ON_ONCE(is_ram);
20031 }
20032 @@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
20033 early_param("early_ioremap_debug", early_ioremap_debug_setup);
20034
20035 static __initdata int after_paging_init;
20036 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
20037 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
20038
20039 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
20040 {
20041 @@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
20042 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
20043
20044 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
20045 - memset(bm_pte, 0, sizeof(bm_pte));
20046 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
20047 + pmd_populate_user(&init_mm, pmd, bm_pte);
20048
20049 /*
20050 * The boot-ioremap range spans multiple pmds, for which
20051 diff -urNp linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c
20052 --- linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-05-19 00:06:34.000000000 -0400
20053 +++ linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-08-05 19:44:35.000000000 -0400
20054 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
20055 * memory (e.g. tracked pages)? For now, we need this to avoid
20056 * invoking kmemcheck for PnP BIOS calls.
20057 */
20058 - if (regs->flags & X86_VM_MASK)
20059 + if (v8086_mode(regs))
20060 return false;
20061 - if (regs->cs != __KERNEL_CS)
20062 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
20063 return false;
20064
20065 pte = kmemcheck_pte_lookup(address);
20066 diff -urNp linux-2.6.39.4/arch/x86/mm/mmap.c linux-2.6.39.4/arch/x86/mm/mmap.c
20067 --- linux-2.6.39.4/arch/x86/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
20068 +++ linux-2.6.39.4/arch/x86/mm/mmap.c 2011-08-05 19:44:35.000000000 -0400
20069 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
20070 * Leave an at least ~128 MB hole with possible stack randomization.
20071 */
20072 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
20073 -#define MAX_GAP (TASK_SIZE/6*5)
20074 +#define MAX_GAP (pax_task_size/6*5)
20075
20076 /*
20077 * True on X86_32 or when emulating IA32 on X86_64
20078 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
20079 return rnd << PAGE_SHIFT;
20080 }
20081
20082 -static unsigned long mmap_base(void)
20083 +static unsigned long mmap_base(struct mm_struct *mm)
20084 {
20085 unsigned long gap = rlimit(RLIMIT_STACK);
20086 + unsigned long pax_task_size = TASK_SIZE;
20087 +
20088 +#ifdef CONFIG_PAX_SEGMEXEC
20089 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20090 + pax_task_size = SEGMEXEC_TASK_SIZE;
20091 +#endif
20092
20093 if (gap < MIN_GAP)
20094 gap = MIN_GAP;
20095 else if (gap > MAX_GAP)
20096 gap = MAX_GAP;
20097
20098 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
20099 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
20100 }
20101
20102 /*
20103 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
20104 * does, but not when emulating X86_32
20105 */
20106 -static unsigned long mmap_legacy_base(void)
20107 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
20108 {
20109 - if (mmap_is_ia32())
20110 + if (mmap_is_ia32()) {
20111 +
20112 +#ifdef CONFIG_PAX_SEGMEXEC
20113 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20114 + return SEGMEXEC_TASK_UNMAPPED_BASE;
20115 + else
20116 +#endif
20117 +
20118 return TASK_UNMAPPED_BASE;
20119 - else
20120 + } else
20121 return TASK_UNMAPPED_BASE + mmap_rnd();
20122 }
20123
20124 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
20125 void arch_pick_mmap_layout(struct mm_struct *mm)
20126 {
20127 if (mmap_is_legacy()) {
20128 - mm->mmap_base = mmap_legacy_base();
20129 + mm->mmap_base = mmap_legacy_base(mm);
20130 +
20131 +#ifdef CONFIG_PAX_RANDMMAP
20132 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20133 + mm->mmap_base += mm->delta_mmap;
20134 +#endif
20135 +
20136 mm->get_unmapped_area = arch_get_unmapped_area;
20137 mm->unmap_area = arch_unmap_area;
20138 } else {
20139 - mm->mmap_base = mmap_base();
20140 + mm->mmap_base = mmap_base(mm);
20141 +
20142 +#ifdef CONFIG_PAX_RANDMMAP
20143 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20144 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
20145 +#endif
20146 +
20147 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
20148 mm->unmap_area = arch_unmap_area_topdown;
20149 }
20150 diff -urNp linux-2.6.39.4/arch/x86/mm/mmio-mod.c linux-2.6.39.4/arch/x86/mm/mmio-mod.c
20151 --- linux-2.6.39.4/arch/x86/mm/mmio-mod.c 2011-05-19 00:06:34.000000000 -0400
20152 +++ linux-2.6.39.4/arch/x86/mm/mmio-mod.c 2011-08-05 19:44:35.000000000 -0400
20153 @@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
20154 break;
20155 default:
20156 {
20157 - unsigned char *ip = (unsigned char *)instptr;
20158 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
20159 my_trace->opcode = MMIO_UNKNOWN_OP;
20160 my_trace->width = 0;
20161 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
20162 @@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
20163 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
20164 void __iomem *addr)
20165 {
20166 - static atomic_t next_id;
20167 + static atomic_unchecked_t next_id;
20168 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
20169 /* These are page-unaligned. */
20170 struct mmiotrace_map map = {
20171 @@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
20172 .private = trace
20173 },
20174 .phys = offset,
20175 - .id = atomic_inc_return(&next_id)
20176 + .id = atomic_inc_return_unchecked(&next_id)
20177 };
20178 map.map_id = trace->id;
20179
20180 diff -urNp linux-2.6.39.4/arch/x86/mm/numa_32.c linux-2.6.39.4/arch/x86/mm/numa_32.c
20181 --- linux-2.6.39.4/arch/x86/mm/numa_32.c 2011-05-19 00:06:34.000000000 -0400
20182 +++ linux-2.6.39.4/arch/x86/mm/numa_32.c 2011-08-05 19:44:35.000000000 -0400
20183 @@ -99,7 +99,6 @@ unsigned long node_memmap_size_bytes(int
20184 }
20185 #endif
20186
20187 -extern unsigned long find_max_low_pfn(void);
20188 extern unsigned long highend_pfn, highstart_pfn;
20189
20190 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
20191 diff -urNp linux-2.6.39.4/arch/x86/mm/pageattr.c linux-2.6.39.4/arch/x86/mm/pageattr.c
20192 --- linux-2.6.39.4/arch/x86/mm/pageattr.c 2011-05-19 00:06:34.000000000 -0400
20193 +++ linux-2.6.39.4/arch/x86/mm/pageattr.c 2011-08-05 19:44:35.000000000 -0400
20194 @@ -261,7 +261,7 @@ static inline pgprot_t static_protection
20195 */
20196 #ifdef CONFIG_PCI_BIOS
20197 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
20198 - pgprot_val(forbidden) |= _PAGE_NX;
20199 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20200 #endif
20201
20202 /*
20203 @@ -269,9 +269,10 @@ static inline pgprot_t static_protection
20204 * Does not cover __inittext since that is gone later on. On
20205 * 64bit we do not enforce !NX on the low mapping
20206 */
20207 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
20208 - pgprot_val(forbidden) |= _PAGE_NX;
20209 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
20210 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20211
20212 +#ifdef CONFIG_DEBUG_RODATA
20213 /*
20214 * The .rodata section needs to be read-only. Using the pfn
20215 * catches all aliases.
20216 @@ -279,6 +280,7 @@ static inline pgprot_t static_protection
20217 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
20218 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
20219 pgprot_val(forbidden) |= _PAGE_RW;
20220 +#endif
20221
20222 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
20223 /*
20224 @@ -317,6 +319,13 @@ static inline pgprot_t static_protection
20225 }
20226 #endif
20227
20228 +#ifdef CONFIG_PAX_KERNEXEC
20229 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
20230 + pgprot_val(forbidden) |= _PAGE_RW;
20231 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20232 + }
20233 +#endif
20234 +
20235 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
20236
20237 return prot;
20238 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
20239 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
20240 {
20241 /* change init_mm */
20242 + pax_open_kernel();
20243 set_pte_atomic(kpte, pte);
20244 +
20245 #ifdef CONFIG_X86_32
20246 if (!SHARED_KERNEL_PMD) {
20247 +
20248 +#ifdef CONFIG_PAX_PER_CPU_PGD
20249 + unsigned long cpu;
20250 +#else
20251 struct page *page;
20252 +#endif
20253
20254 +#ifdef CONFIG_PAX_PER_CPU_PGD
20255 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20256 + pgd_t *pgd = get_cpu_pgd(cpu);
20257 +#else
20258 list_for_each_entry(page, &pgd_list, lru) {
20259 - pgd_t *pgd;
20260 + pgd_t *pgd = (pgd_t *)page_address(page);
20261 +#endif
20262 +
20263 pud_t *pud;
20264 pmd_t *pmd;
20265
20266 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
20267 + pgd += pgd_index(address);
20268 pud = pud_offset(pgd, address);
20269 pmd = pmd_offset(pud, address);
20270 set_pte_atomic((pte_t *)pmd, pte);
20271 }
20272 }
20273 #endif
20274 + pax_close_kernel();
20275 }
20276
20277 static int
20278 diff -urNp linux-2.6.39.4/arch/x86/mm/pageattr-test.c linux-2.6.39.4/arch/x86/mm/pageattr-test.c
20279 --- linux-2.6.39.4/arch/x86/mm/pageattr-test.c 2011-05-19 00:06:34.000000000 -0400
20280 +++ linux-2.6.39.4/arch/x86/mm/pageattr-test.c 2011-08-05 19:44:35.000000000 -0400
20281 @@ -36,7 +36,7 @@ enum {
20282
20283 static int pte_testbit(pte_t pte)
20284 {
20285 - return pte_flags(pte) & _PAGE_UNUSED1;
20286 + return pte_flags(pte) & _PAGE_CPA_TEST;
20287 }
20288
20289 struct split_state {
20290 diff -urNp linux-2.6.39.4/arch/x86/mm/pat.c linux-2.6.39.4/arch/x86/mm/pat.c
20291 --- linux-2.6.39.4/arch/x86/mm/pat.c 2011-05-19 00:06:34.000000000 -0400
20292 +++ linux-2.6.39.4/arch/x86/mm/pat.c 2011-08-05 19:44:35.000000000 -0400
20293 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
20294
20295 if (!entry) {
20296 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
20297 - current->comm, current->pid, start, end);
20298 + current->comm, task_pid_nr(current), start, end);
20299 return -EINVAL;
20300 }
20301
20302 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
20303 while (cursor < to) {
20304 if (!devmem_is_allowed(pfn)) {
20305 printk(KERN_INFO
20306 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
20307 - current->comm, from, to);
20308 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
20309 + current->comm, from, to, cursor);
20310 return 0;
20311 }
20312 cursor += PAGE_SIZE;
20313 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
20314 printk(KERN_INFO
20315 "%s:%d ioremap_change_attr failed %s "
20316 "for %Lx-%Lx\n",
20317 - current->comm, current->pid,
20318 + current->comm, task_pid_nr(current),
20319 cattr_name(flags),
20320 base, (unsigned long long)(base + size));
20321 return -EINVAL;
20322 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
20323 if (want_flags != flags) {
20324 printk(KERN_WARNING
20325 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
20326 - current->comm, current->pid,
20327 + current->comm, task_pid_nr(current),
20328 cattr_name(want_flags),
20329 (unsigned long long)paddr,
20330 (unsigned long long)(paddr + size),
20331 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
20332 free_memtype(paddr, paddr + size);
20333 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
20334 " for %Lx-%Lx, got %s\n",
20335 - current->comm, current->pid,
20336 + current->comm, task_pid_nr(current),
20337 cattr_name(want_flags),
20338 (unsigned long long)paddr,
20339 (unsigned long long)(paddr + size),
20340 diff -urNp linux-2.6.39.4/arch/x86/mm/pf_in.c linux-2.6.39.4/arch/x86/mm/pf_in.c
20341 --- linux-2.6.39.4/arch/x86/mm/pf_in.c 2011-05-19 00:06:34.000000000 -0400
20342 +++ linux-2.6.39.4/arch/x86/mm/pf_in.c 2011-08-05 19:44:35.000000000 -0400
20343 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
20344 int i;
20345 enum reason_type rv = OTHERS;
20346
20347 - p = (unsigned char *)ins_addr;
20348 + p = (unsigned char *)ktla_ktva(ins_addr);
20349 p += skip_prefix(p, &prf);
20350 p += get_opcode(p, &opcode);
20351
20352 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
20353 struct prefix_bits prf;
20354 int i;
20355
20356 - p = (unsigned char *)ins_addr;
20357 + p = (unsigned char *)ktla_ktva(ins_addr);
20358 p += skip_prefix(p, &prf);
20359 p += get_opcode(p, &opcode);
20360
20361 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
20362 struct prefix_bits prf;
20363 int i;
20364
20365 - p = (unsigned char *)ins_addr;
20366 + p = (unsigned char *)ktla_ktva(ins_addr);
20367 p += skip_prefix(p, &prf);
20368 p += get_opcode(p, &opcode);
20369
20370 @@ -416,7 +416,7 @@ unsigned long get_ins_reg_val(unsigned l
20371 int i;
20372 unsigned long rv;
20373
20374 - p = (unsigned char *)ins_addr;
20375 + p = (unsigned char *)ktla_ktva(ins_addr);
20376 p += skip_prefix(p, &prf);
20377 p += get_opcode(p, &opcode);
20378 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
20379 @@ -476,7 +476,7 @@ unsigned long get_ins_imm_val(unsigned l
20380 int i;
20381 unsigned long rv;
20382
20383 - p = (unsigned char *)ins_addr;
20384 + p = (unsigned char *)ktla_ktva(ins_addr);
20385 p += skip_prefix(p, &prf);
20386 p += get_opcode(p, &opcode);
20387 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
20388 diff -urNp linux-2.6.39.4/arch/x86/mm/pgtable_32.c linux-2.6.39.4/arch/x86/mm/pgtable_32.c
20389 --- linux-2.6.39.4/arch/x86/mm/pgtable_32.c 2011-05-19 00:06:34.000000000 -0400
20390 +++ linux-2.6.39.4/arch/x86/mm/pgtable_32.c 2011-08-05 19:44:35.000000000 -0400
20391 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
20392 return;
20393 }
20394 pte = pte_offset_kernel(pmd, vaddr);
20395 +
20396 + pax_open_kernel();
20397 if (pte_val(pteval))
20398 set_pte_at(&init_mm, vaddr, pte, pteval);
20399 else
20400 pte_clear(&init_mm, vaddr, pte);
20401 + pax_close_kernel();
20402
20403 /*
20404 * It's enough to flush this one mapping.
20405 diff -urNp linux-2.6.39.4/arch/x86/mm/pgtable.c linux-2.6.39.4/arch/x86/mm/pgtable.c
20406 --- linux-2.6.39.4/arch/x86/mm/pgtable.c 2011-05-19 00:06:34.000000000 -0400
20407 +++ linux-2.6.39.4/arch/x86/mm/pgtable.c 2011-08-05 19:44:35.000000000 -0400
20408 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
20409 list_del(&page->lru);
20410 }
20411
20412 -#define UNSHARED_PTRS_PER_PGD \
20413 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20414 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20415 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
20416
20417 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20418 +{
20419 + while (count--)
20420 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
20421 +}
20422 +#endif
20423 +
20424 +#ifdef CONFIG_PAX_PER_CPU_PGD
20425 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20426 +{
20427 + while (count--)
20428 +
20429 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20430 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
20431 +#else
20432 + *dst++ = *src++;
20433 +#endif
20434
20435 +}
20436 +#endif
20437 +
20438 +#ifdef CONFIG_X86_64
20439 +#define pxd_t pud_t
20440 +#define pyd_t pgd_t
20441 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
20442 +#define pxd_free(mm, pud) pud_free((mm), (pud))
20443 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
20444 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
20445 +#define PYD_SIZE PGDIR_SIZE
20446 +#else
20447 +#define pxd_t pmd_t
20448 +#define pyd_t pud_t
20449 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
20450 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
20451 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
20452 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
20453 +#define PYD_SIZE PUD_SIZE
20454 +#endif
20455 +
20456 +#ifdef CONFIG_PAX_PER_CPU_PGD
20457 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
20458 +static inline void pgd_dtor(pgd_t *pgd) {}
20459 +#else
20460 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
20461 {
20462 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
20463 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
20464 pgd_list_del(pgd);
20465 spin_unlock(&pgd_lock);
20466 }
20467 +#endif
20468
20469 /*
20470 * List of all pgd's needed for non-PAE so it can invalidate entries
20471 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
20472 * -- wli
20473 */
20474
20475 -#ifdef CONFIG_X86_PAE
20476 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
20477 /*
20478 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
20479 * updating the top-level pagetable entries to guarantee the
20480 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
20481 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
20482 * and initialize the kernel pmds here.
20483 */
20484 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
20485 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20486
20487 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
20488 {
20489 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
20490 */
20491 flush_tlb_mm(mm);
20492 }
20493 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
20494 +#define PREALLOCATED_PXDS USER_PGD_PTRS
20495 #else /* !CONFIG_X86_PAE */
20496
20497 /* No need to prepopulate any pagetable entries in non-PAE modes. */
20498 -#define PREALLOCATED_PMDS 0
20499 +#define PREALLOCATED_PXDS 0
20500
20501 #endif /* CONFIG_X86_PAE */
20502
20503 -static void free_pmds(pmd_t *pmds[])
20504 +static void free_pxds(pxd_t *pxds[])
20505 {
20506 int i;
20507
20508 - for(i = 0; i < PREALLOCATED_PMDS; i++)
20509 - if (pmds[i])
20510 - free_page((unsigned long)pmds[i]);
20511 + for(i = 0; i < PREALLOCATED_PXDS; i++)
20512 + if (pxds[i])
20513 + free_page((unsigned long)pxds[i]);
20514 }
20515
20516 -static int preallocate_pmds(pmd_t *pmds[])
20517 +static int preallocate_pxds(pxd_t *pxds[])
20518 {
20519 int i;
20520 bool failed = false;
20521
20522 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
20523 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
20524 - if (pmd == NULL)
20525 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
20526 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
20527 + if (pxd == NULL)
20528 failed = true;
20529 - pmds[i] = pmd;
20530 + pxds[i] = pxd;
20531 }
20532
20533 if (failed) {
20534 - free_pmds(pmds);
20535 + free_pxds(pxds);
20536 return -ENOMEM;
20537 }
20538
20539 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
20540 * preallocate which never got a corresponding vma will need to be
20541 * freed manually.
20542 */
20543 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
20544 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
20545 {
20546 int i;
20547
20548 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
20549 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
20550 pgd_t pgd = pgdp[i];
20551
20552 if (pgd_val(pgd) != 0) {
20553 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
20554 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
20555
20556 - pgdp[i] = native_make_pgd(0);
20557 + set_pgd(pgdp + i, native_make_pgd(0));
20558
20559 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
20560 - pmd_free(mm, pmd);
20561 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
20562 + pxd_free(mm, pxd);
20563 }
20564 }
20565 }
20566
20567 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
20568 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
20569 {
20570 - pud_t *pud;
20571 + pyd_t *pyd;
20572 unsigned long addr;
20573 int i;
20574
20575 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
20576 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
20577 return;
20578
20579 - pud = pud_offset(pgd, 0);
20580 +#ifdef CONFIG_X86_64
20581 + pyd = pyd_offset(mm, 0L);
20582 +#else
20583 + pyd = pyd_offset(pgd, 0L);
20584 +#endif
20585
20586 - for (addr = i = 0; i < PREALLOCATED_PMDS;
20587 - i++, pud++, addr += PUD_SIZE) {
20588 - pmd_t *pmd = pmds[i];
20589 + for (addr = i = 0; i < PREALLOCATED_PXDS;
20590 + i++, pyd++, addr += PYD_SIZE) {
20591 + pxd_t *pxd = pxds[i];
20592
20593 if (i >= KERNEL_PGD_BOUNDARY)
20594 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20595 - sizeof(pmd_t) * PTRS_PER_PMD);
20596 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20597 + sizeof(pxd_t) * PTRS_PER_PMD);
20598
20599 - pud_populate(mm, pud, pmd);
20600 + pyd_populate(mm, pyd, pxd);
20601 }
20602 }
20603
20604 pgd_t *pgd_alloc(struct mm_struct *mm)
20605 {
20606 pgd_t *pgd;
20607 - pmd_t *pmds[PREALLOCATED_PMDS];
20608 + pxd_t *pxds[PREALLOCATED_PXDS];
20609
20610 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
20611
20612 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20613
20614 mm->pgd = pgd;
20615
20616 - if (preallocate_pmds(pmds) != 0)
20617 + if (preallocate_pxds(pxds) != 0)
20618 goto out_free_pgd;
20619
20620 if (paravirt_pgd_alloc(mm) != 0)
20621 - goto out_free_pmds;
20622 + goto out_free_pxds;
20623
20624 /*
20625 * Make sure that pre-populating the pmds is atomic with
20626 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20627 spin_lock(&pgd_lock);
20628
20629 pgd_ctor(mm, pgd);
20630 - pgd_prepopulate_pmd(mm, pgd, pmds);
20631 + pgd_prepopulate_pxd(mm, pgd, pxds);
20632
20633 spin_unlock(&pgd_lock);
20634
20635 return pgd;
20636
20637 -out_free_pmds:
20638 - free_pmds(pmds);
20639 +out_free_pxds:
20640 + free_pxds(pxds);
20641 out_free_pgd:
20642 free_page((unsigned long)pgd);
20643 out:
20644 @@ -295,7 +344,7 @@ out:
20645
20646 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
20647 {
20648 - pgd_mop_up_pmds(mm, pgd);
20649 + pgd_mop_up_pxds(mm, pgd);
20650 pgd_dtor(pgd);
20651 paravirt_pgd_free(mm, pgd);
20652 free_page((unsigned long)pgd);
20653 diff -urNp linux-2.6.39.4/arch/x86/mm/setup_nx.c linux-2.6.39.4/arch/x86/mm/setup_nx.c
20654 --- linux-2.6.39.4/arch/x86/mm/setup_nx.c 2011-05-19 00:06:34.000000000 -0400
20655 +++ linux-2.6.39.4/arch/x86/mm/setup_nx.c 2011-08-05 19:44:35.000000000 -0400
20656 @@ -5,8 +5,10 @@
20657 #include <asm/pgtable.h>
20658 #include <asm/proto.h>
20659
20660 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20661 static int disable_nx __cpuinitdata;
20662
20663 +#ifndef CONFIG_PAX_PAGEEXEC
20664 /*
20665 * noexec = on|off
20666 *
20667 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
20668 return 0;
20669 }
20670 early_param("noexec", noexec_setup);
20671 +#endif
20672 +
20673 +#endif
20674
20675 void __cpuinit x86_configure_nx(void)
20676 {
20677 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20678 if (cpu_has_nx && !disable_nx)
20679 __supported_pte_mask |= _PAGE_NX;
20680 else
20681 +#endif
20682 __supported_pte_mask &= ~_PAGE_NX;
20683 }
20684
20685 diff -urNp linux-2.6.39.4/arch/x86/mm/tlb.c linux-2.6.39.4/arch/x86/mm/tlb.c
20686 --- linux-2.6.39.4/arch/x86/mm/tlb.c 2011-05-19 00:06:34.000000000 -0400
20687 +++ linux-2.6.39.4/arch/x86/mm/tlb.c 2011-08-05 19:44:35.000000000 -0400
20688 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
20689 BUG();
20690 cpumask_clear_cpu(cpu,
20691 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
20692 +
20693 +#ifndef CONFIG_PAX_PER_CPU_PGD
20694 load_cr3(swapper_pg_dir);
20695 +#endif
20696 +
20697 }
20698 EXPORT_SYMBOL_GPL(leave_mm);
20699
20700 diff -urNp linux-2.6.39.4/arch/x86/oprofile/backtrace.c linux-2.6.39.4/arch/x86/oprofile/backtrace.c
20701 --- linux-2.6.39.4/arch/x86/oprofile/backtrace.c 2011-05-19 00:06:34.000000000 -0400
20702 +++ linux-2.6.39.4/arch/x86/oprofile/backtrace.c 2011-08-05 19:44:35.000000000 -0400
20703 @@ -57,7 +57,7 @@ dump_user_backtrace_32(struct stack_fram
20704 struct stack_frame_ia32 *fp;
20705
20706 /* Also check accessibility of one struct frame_head beyond */
20707 - if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
20708 + if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
20709 return NULL;
20710 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
20711 return NULL;
20712 @@ -123,7 +123,7 @@ x86_backtrace(struct pt_regs * const reg
20713 {
20714 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
20715
20716 - if (!user_mode_vm(regs)) {
20717 + if (!user_mode(regs)) {
20718 unsigned long stack = kernel_stack_pointer(regs);
20719 if (depth)
20720 dump_trace(NULL, regs, (unsigned long *)stack, 0,
20721 diff -urNp linux-2.6.39.4/arch/x86/pci/mrst.c linux-2.6.39.4/arch/x86/pci/mrst.c
20722 --- linux-2.6.39.4/arch/x86/pci/mrst.c 2011-05-19 00:06:34.000000000 -0400
20723 +++ linux-2.6.39.4/arch/x86/pci/mrst.c 2011-08-05 20:34:06.000000000 -0400
20724 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
20725 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
20726 pci_mmcfg_late_init();
20727 pcibios_enable_irq = mrst_pci_irq_enable;
20728 - pci_root_ops = pci_mrst_ops;
20729 + pax_open_kernel();
20730 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
20731 + pax_close_kernel();
20732 /* Continue with standard init */
20733 return 1;
20734 }
20735 diff -urNp linux-2.6.39.4/arch/x86/pci/pcbios.c linux-2.6.39.4/arch/x86/pci/pcbios.c
20736 --- linux-2.6.39.4/arch/x86/pci/pcbios.c 2011-05-19 00:06:34.000000000 -0400
20737 +++ linux-2.6.39.4/arch/x86/pci/pcbios.c 2011-08-05 20:34:06.000000000 -0400
20738 @@ -79,50 +79,93 @@ union bios32 {
20739 static struct {
20740 unsigned long address;
20741 unsigned short segment;
20742 -} bios32_indirect = { 0, __KERNEL_CS };
20743 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
20744
20745 /*
20746 * Returns the entry point for the given service, NULL on error
20747 */
20748
20749 -static unsigned long bios32_service(unsigned long service)
20750 +static unsigned long __devinit bios32_service(unsigned long service)
20751 {
20752 unsigned char return_code; /* %al */
20753 unsigned long address; /* %ebx */
20754 unsigned long length; /* %ecx */
20755 unsigned long entry; /* %edx */
20756 unsigned long flags;
20757 + struct desc_struct d, *gdt;
20758
20759 local_irq_save(flags);
20760 - __asm__("lcall *(%%edi); cld"
20761 +
20762 + gdt = get_cpu_gdt_table(smp_processor_id());
20763 +
20764 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
20765 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20766 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
20767 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20768 +
20769 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
20770 : "=a" (return_code),
20771 "=b" (address),
20772 "=c" (length),
20773 "=d" (entry)
20774 : "0" (service),
20775 "1" (0),
20776 - "D" (&bios32_indirect));
20777 + "D" (&bios32_indirect),
20778 + "r"(__PCIBIOS_DS)
20779 + : "memory");
20780 +
20781 + pax_open_kernel();
20782 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
20783 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
20784 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
20785 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
20786 + pax_close_kernel();
20787 +
20788 local_irq_restore(flags);
20789
20790 switch (return_code) {
20791 - case 0:
20792 - return address + entry;
20793 - case 0x80: /* Not present */
20794 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20795 - return 0;
20796 - default: /* Shouldn't happen */
20797 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20798 - service, return_code);
20799 + case 0: {
20800 + int cpu;
20801 + unsigned char flags;
20802 +
20803 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
20804 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
20805 + printk(KERN_WARNING "bios32_service: not valid\n");
20806 return 0;
20807 + }
20808 + address = address + PAGE_OFFSET;
20809 + length += 16UL; /* some BIOSs underreport this... */
20810 + flags = 4;
20811 + if (length >= 64*1024*1024) {
20812 + length >>= PAGE_SHIFT;
20813 + flags |= 8;
20814 + }
20815 +
20816 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
20817 + gdt = get_cpu_gdt_table(cpu);
20818 + pack_descriptor(&d, address, length, 0x9b, flags);
20819 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20820 + pack_descriptor(&d, address, length, 0x93, flags);
20821 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20822 + }
20823 + return entry;
20824 + }
20825 + case 0x80: /* Not present */
20826 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20827 + return 0;
20828 + default: /* Shouldn't happen */
20829 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20830 + service, return_code);
20831 + return 0;
20832 }
20833 }
20834
20835 static struct {
20836 unsigned long address;
20837 unsigned short segment;
20838 -} pci_indirect = { 0, __KERNEL_CS };
20839 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
20840
20841 -static int pci_bios_present;
20842 +static int pci_bios_present __read_only;
20843
20844 static int __devinit check_pcibios(void)
20845 {
20846 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
20847 unsigned long flags, pcibios_entry;
20848
20849 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
20850 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
20851 + pci_indirect.address = pcibios_entry;
20852
20853 local_irq_save(flags);
20854 - __asm__(
20855 - "lcall *(%%edi); cld\n\t"
20856 + __asm__("movw %w6, %%ds\n\t"
20857 + "lcall *%%ss:(%%edi); cld\n\t"
20858 + "push %%ss\n\t"
20859 + "pop %%ds\n\t"
20860 "jc 1f\n\t"
20861 "xor %%ah, %%ah\n"
20862 "1:"
20863 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
20864 "=b" (ebx),
20865 "=c" (ecx)
20866 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
20867 - "D" (&pci_indirect)
20868 + "D" (&pci_indirect),
20869 + "r" (__PCIBIOS_DS)
20870 : "memory");
20871 local_irq_restore(flags);
20872
20873 @@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se
20874
20875 switch (len) {
20876 case 1:
20877 - __asm__("lcall *(%%esi); cld\n\t"
20878 + __asm__("movw %w6, %%ds\n\t"
20879 + "lcall *%%ss:(%%esi); cld\n\t"
20880 + "push %%ss\n\t"
20881 + "pop %%ds\n\t"
20882 "jc 1f\n\t"
20883 "xor %%ah, %%ah\n"
20884 "1:"
20885 @@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se
20886 : "1" (PCIBIOS_READ_CONFIG_BYTE),
20887 "b" (bx),
20888 "D" ((long)reg),
20889 - "S" (&pci_indirect));
20890 + "S" (&pci_indirect),
20891 + "r" (__PCIBIOS_DS));
20892 /*
20893 * Zero-extend the result beyond 8 bits, do not trust the
20894 * BIOS having done it:
20895 @@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se
20896 *value &= 0xff;
20897 break;
20898 case 2:
20899 - __asm__("lcall *(%%esi); cld\n\t"
20900 + __asm__("movw %w6, %%ds\n\t"
20901 + "lcall *%%ss:(%%esi); cld\n\t"
20902 + "push %%ss\n\t"
20903 + "pop %%ds\n\t"
20904 "jc 1f\n\t"
20905 "xor %%ah, %%ah\n"
20906 "1:"
20907 @@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se
20908 : "1" (PCIBIOS_READ_CONFIG_WORD),
20909 "b" (bx),
20910 "D" ((long)reg),
20911 - "S" (&pci_indirect));
20912 + "S" (&pci_indirect),
20913 + "r" (__PCIBIOS_DS));
20914 /*
20915 * Zero-extend the result beyond 16 bits, do not trust the
20916 * BIOS having done it:
20917 @@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se
20918 *value &= 0xffff;
20919 break;
20920 case 4:
20921 - __asm__("lcall *(%%esi); cld\n\t"
20922 + __asm__("movw %w6, %%ds\n\t"
20923 + "lcall *%%ss:(%%esi); cld\n\t"
20924 + "push %%ss\n\t"
20925 + "pop %%ds\n\t"
20926 "jc 1f\n\t"
20927 "xor %%ah, %%ah\n"
20928 "1:"
20929 @@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se
20930 : "1" (PCIBIOS_READ_CONFIG_DWORD),
20931 "b" (bx),
20932 "D" ((long)reg),
20933 - "S" (&pci_indirect));
20934 + "S" (&pci_indirect),
20935 + "r" (__PCIBIOS_DS));
20936 break;
20937 }
20938
20939 @@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s
20940
20941 switch (len) {
20942 case 1:
20943 - __asm__("lcall *(%%esi); cld\n\t"
20944 + __asm__("movw %w6, %%ds\n\t"
20945 + "lcall *%%ss:(%%esi); cld\n\t"
20946 + "push %%ss\n\t"
20947 + "pop %%ds\n\t"
20948 "jc 1f\n\t"
20949 "xor %%ah, %%ah\n"
20950 "1:"
20951 @@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s
20952 "c" (value),
20953 "b" (bx),
20954 "D" ((long)reg),
20955 - "S" (&pci_indirect));
20956 + "S" (&pci_indirect),
20957 + "r" (__PCIBIOS_DS));
20958 break;
20959 case 2:
20960 - __asm__("lcall *(%%esi); cld\n\t"
20961 + __asm__("movw %w6, %%ds\n\t"
20962 + "lcall *%%ss:(%%esi); cld\n\t"
20963 + "push %%ss\n\t"
20964 + "pop %%ds\n\t"
20965 "jc 1f\n\t"
20966 "xor %%ah, %%ah\n"
20967 "1:"
20968 @@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s
20969 "c" (value),
20970 "b" (bx),
20971 "D" ((long)reg),
20972 - "S" (&pci_indirect));
20973 + "S" (&pci_indirect),
20974 + "r" (__PCIBIOS_DS));
20975 break;
20976 case 4:
20977 - __asm__("lcall *(%%esi); cld\n\t"
20978 + __asm__("movw %w6, %%ds\n\t"
20979 + "lcall *%%ss:(%%esi); cld\n\t"
20980 + "push %%ss\n\t"
20981 + "pop %%ds\n\t"
20982 "jc 1f\n\t"
20983 "xor %%ah, %%ah\n"
20984 "1:"
20985 @@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s
20986 "c" (value),
20987 "b" (bx),
20988 "D" ((long)reg),
20989 - "S" (&pci_indirect));
20990 + "S" (&pci_indirect),
20991 + "r" (__PCIBIOS_DS));
20992 break;
20993 }
20994
20995 @@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i
20996
20997 DBG("PCI: Fetching IRQ routing table... ");
20998 __asm__("push %%es\n\t"
20999 + "movw %w8, %%ds\n\t"
21000 "push %%ds\n\t"
21001 "pop %%es\n\t"
21002 - "lcall *(%%esi); cld\n\t"
21003 + "lcall *%%ss:(%%esi); cld\n\t"
21004 "pop %%es\n\t"
21005 + "push %%ss\n\t"
21006 + "pop %%ds\n"
21007 "jc 1f\n\t"
21008 "xor %%ah, %%ah\n"
21009 "1:"
21010 @@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i
21011 "1" (0),
21012 "D" ((long) &opt),
21013 "S" (&pci_indirect),
21014 - "m" (opt)
21015 + "m" (opt),
21016 + "r" (__PCIBIOS_DS)
21017 : "memory");
21018 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
21019 if (ret & 0xff00)
21020 @@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d
21021 {
21022 int ret;
21023
21024 - __asm__("lcall *(%%esi); cld\n\t"
21025 + __asm__("movw %w5, %%ds\n\t"
21026 + "lcall *%%ss:(%%esi); cld\n\t"
21027 + "push %%ss\n\t"
21028 + "pop %%ds\n"
21029 "jc 1f\n\t"
21030 "xor %%ah, %%ah\n"
21031 "1:"
21032 @@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d
21033 : "0" (PCIBIOS_SET_PCI_HW_INT),
21034 "b" ((dev->bus->number << 8) | dev->devfn),
21035 "c" ((irq << 8) | (pin + 10)),
21036 - "S" (&pci_indirect));
21037 + "S" (&pci_indirect),
21038 + "r" (__PCIBIOS_DS));
21039 return !(ret & 0xff00);
21040 }
21041 EXPORT_SYMBOL(pcibios_set_irq_routing);
21042 diff -urNp linux-2.6.39.4/arch/x86/platform/efi/efi_32.c linux-2.6.39.4/arch/x86/platform/efi/efi_32.c
21043 --- linux-2.6.39.4/arch/x86/platform/efi/efi_32.c 2011-05-19 00:06:34.000000000 -0400
21044 +++ linux-2.6.39.4/arch/x86/platform/efi/efi_32.c 2011-08-05 19:44:35.000000000 -0400
21045 @@ -38,70 +38,37 @@
21046 */
21047
21048 static unsigned long efi_rt_eflags;
21049 -static pgd_t efi_bak_pg_dir_pointer[2];
21050 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
21051
21052 -void efi_call_phys_prelog(void)
21053 +void __init efi_call_phys_prelog(void)
21054 {
21055 - unsigned long cr4;
21056 - unsigned long temp;
21057 struct desc_ptr gdt_descr;
21058
21059 local_irq_save(efi_rt_eflags);
21060
21061 - /*
21062 - * If I don't have PAE, I should just duplicate two entries in page
21063 - * directory. If I have PAE, I just need to duplicate one entry in
21064 - * page directory.
21065 - */
21066 - cr4 = read_cr4_safe();
21067 -
21068 - if (cr4 & X86_CR4_PAE) {
21069 - efi_bak_pg_dir_pointer[0].pgd =
21070 - swapper_pg_dir[pgd_index(0)].pgd;
21071 - swapper_pg_dir[0].pgd =
21072 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21073 - } else {
21074 - efi_bak_pg_dir_pointer[0].pgd =
21075 - swapper_pg_dir[pgd_index(0)].pgd;
21076 - efi_bak_pg_dir_pointer[1].pgd =
21077 - swapper_pg_dir[pgd_index(0x400000)].pgd;
21078 - swapper_pg_dir[pgd_index(0)].pgd =
21079 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21080 - temp = PAGE_OFFSET + 0x400000;
21081 - swapper_pg_dir[pgd_index(0x400000)].pgd =
21082 - swapper_pg_dir[pgd_index(temp)].pgd;
21083 - }
21084 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
21085 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21086 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
21087
21088 /*
21089 * After the lock is released, the original page table is restored.
21090 */
21091 __flush_tlb_all();
21092
21093 - gdt_descr.address = __pa(get_cpu_gdt_table(0));
21094 + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
21095 gdt_descr.size = GDT_SIZE - 1;
21096 load_gdt(&gdt_descr);
21097 }
21098
21099 -void efi_call_phys_epilog(void)
21100 +void __init efi_call_phys_epilog(void)
21101 {
21102 - unsigned long cr4;
21103 struct desc_ptr gdt_descr;
21104
21105 - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
21106 + gdt_descr.address = get_cpu_gdt_table(0);
21107 gdt_descr.size = GDT_SIZE - 1;
21108 load_gdt(&gdt_descr);
21109
21110 - cr4 = read_cr4_safe();
21111 -
21112 - if (cr4 & X86_CR4_PAE) {
21113 - swapper_pg_dir[pgd_index(0)].pgd =
21114 - efi_bak_pg_dir_pointer[0].pgd;
21115 - } else {
21116 - swapper_pg_dir[pgd_index(0)].pgd =
21117 - efi_bak_pg_dir_pointer[0].pgd;
21118 - swapper_pg_dir[pgd_index(0x400000)].pgd =
21119 - efi_bak_pg_dir_pointer[1].pgd;
21120 - }
21121 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
21122
21123 /*
21124 * After the lock is released, the original page table is restored.
21125 diff -urNp linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S
21126 --- linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S 2011-05-19 00:06:34.000000000 -0400
21127 +++ linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S 2011-08-05 19:44:35.000000000 -0400
21128 @@ -6,6 +6,7 @@
21129 */
21130
21131 #include <linux/linkage.h>
21132 +#include <linux/init.h>
21133 #include <asm/page_types.h>
21134
21135 /*
21136 @@ -20,7 +21,7 @@
21137 * service functions will comply with gcc calling convention, too.
21138 */
21139
21140 -.text
21141 +__INIT
21142 ENTRY(efi_call_phys)
21143 /*
21144 * 0. The function can only be called in Linux kernel. So CS has been
21145 @@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
21146 * The mapping of lower virtual memory has been created in prelog and
21147 * epilog.
21148 */
21149 - movl $1f, %edx
21150 - subl $__PAGE_OFFSET, %edx
21151 - jmp *%edx
21152 + jmp 1f-__PAGE_OFFSET
21153 1:
21154
21155 /*
21156 @@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
21157 * parameter 2, ..., param n. To make things easy, we save the return
21158 * address of efi_call_phys in a global variable.
21159 */
21160 - popl %edx
21161 - movl %edx, saved_return_addr
21162 - /* get the function pointer into ECX*/
21163 - popl %ecx
21164 - movl %ecx, efi_rt_function_ptr
21165 - movl $2f, %edx
21166 - subl $__PAGE_OFFSET, %edx
21167 - pushl %edx
21168 + popl (saved_return_addr)
21169 + popl (efi_rt_function_ptr)
21170
21171 /*
21172 * 3. Clear PG bit in %CR0.
21173 @@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
21174 /*
21175 * 5. Call the physical function.
21176 */
21177 - jmp *%ecx
21178 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
21179
21180 -2:
21181 /*
21182 * 6. After EFI runtime service returns, control will return to
21183 * following instruction. We'd better readjust stack pointer first.
21184 @@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
21185 movl %cr0, %edx
21186 orl $0x80000000, %edx
21187 movl %edx, %cr0
21188 - jmp 1f
21189 -1:
21190 +
21191 /*
21192 * 8. Now restore the virtual mode from flat mode by
21193 * adding EIP with PAGE_OFFSET.
21194 */
21195 - movl $1f, %edx
21196 - jmp *%edx
21197 + jmp 1f+__PAGE_OFFSET
21198 1:
21199
21200 /*
21201 * 9. Balance the stack. And because EAX contain the return value,
21202 * we'd better not clobber it.
21203 */
21204 - leal efi_rt_function_ptr, %edx
21205 - movl (%edx), %ecx
21206 - pushl %ecx
21207 + pushl (efi_rt_function_ptr)
21208
21209 /*
21210 - * 10. Push the saved return address onto the stack and return.
21211 + * 10. Return to the saved return address.
21212 */
21213 - leal saved_return_addr, %edx
21214 - movl (%edx), %ecx
21215 - pushl %ecx
21216 - ret
21217 + jmpl *(saved_return_addr)
21218 ENDPROC(efi_call_phys)
21219 .previous
21220
21221 -.data
21222 +__INITDATA
21223 saved_return_addr:
21224 .long 0
21225 efi_rt_function_ptr:
21226 diff -urNp linux-2.6.39.4/arch/x86/platform/mrst/mrst.c linux-2.6.39.4/arch/x86/platform/mrst/mrst.c
21227 --- linux-2.6.39.4/arch/x86/platform/mrst/mrst.c 2011-05-19 00:06:34.000000000 -0400
21228 +++ linux-2.6.39.4/arch/x86/platform/mrst/mrst.c 2011-08-05 20:34:06.000000000 -0400
21229 @@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
21230 }
21231
21232 /* Reboot and power off are handled by the SCU on a MID device */
21233 -static void mrst_power_off(void)
21234 +static __noreturn void mrst_power_off(void)
21235 {
21236 intel_scu_ipc_simple_command(0xf1, 1);
21237 + BUG();
21238 }
21239
21240 -static void mrst_reboot(void)
21241 +static __noreturn void mrst_reboot(void)
21242 {
21243 intel_scu_ipc_simple_command(0xf1, 0);
21244 + BUG();
21245 }
21246
21247 /*
21248 diff -urNp linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c
21249 --- linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c 2011-05-19 00:06:34.000000000 -0400
21250 +++ linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c 2011-08-05 19:44:35.000000000 -0400
21251 @@ -342,6 +342,8 @@ static void uv_reset_with_ipi(struct bau
21252 cpumask_t mask;
21253 struct reset_args reset_args;
21254
21255 + pax_track_stack();
21256 +
21257 reset_args.sender = sender;
21258
21259 cpus_clear(mask);
21260 diff -urNp linux-2.6.39.4/arch/x86/power/cpu.c linux-2.6.39.4/arch/x86/power/cpu.c
21261 --- linux-2.6.39.4/arch/x86/power/cpu.c 2011-05-19 00:06:34.000000000 -0400
21262 +++ linux-2.6.39.4/arch/x86/power/cpu.c 2011-08-05 19:44:35.000000000 -0400
21263 @@ -130,7 +130,7 @@ static void do_fpu_end(void)
21264 static void fix_processor_context(void)
21265 {
21266 int cpu = smp_processor_id();
21267 - struct tss_struct *t = &per_cpu(init_tss, cpu);
21268 + struct tss_struct *t = init_tss + cpu;
21269
21270 set_tss_desc(cpu, t); /*
21271 * This just modifies memory; should not be
21272 @@ -140,7 +140,9 @@ static void fix_processor_context(void)
21273 */
21274
21275 #ifdef CONFIG_X86_64
21276 + pax_open_kernel();
21277 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
21278 + pax_close_kernel();
21279
21280 syscall_init(); /* This sets MSR_*STAR and related */
21281 #endif
21282 Binary files linux-2.6.39.4/arch/x86/tools/test_get_len and linux-2.6.39.4/arch/x86/tools/test_get_len differ
21283 diff -urNp linux-2.6.39.4/arch/x86/vdso/Makefile linux-2.6.39.4/arch/x86/vdso/Makefile
21284 --- linux-2.6.39.4/arch/x86/vdso/Makefile 2011-05-19 00:06:34.000000000 -0400
21285 +++ linux-2.6.39.4/arch/x86/vdso/Makefile 2011-08-05 19:44:35.000000000 -0400
21286 @@ -123,7 +123,7 @@ quiet_cmd_vdso = VDSO $@
21287 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
21288 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
21289
21290 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21291 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21292 GCOV_PROFILE := n
21293
21294 #
21295 diff -urNp linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c
21296 --- linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c 2011-05-19 00:06:34.000000000 -0400
21297 +++ linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c 2011-08-05 19:44:35.000000000 -0400
21298 @@ -22,24 +22,48 @@
21299 #include <asm/hpet.h>
21300 #include <asm/unistd.h>
21301 #include <asm/io.h>
21302 +#include <asm/fixmap.h>
21303 #include "vextern.h"
21304
21305 #define gtod vdso_vsyscall_gtod_data
21306
21307 +notrace noinline long __vdso_fallback_time(long *t)
21308 +{
21309 + long secs;
21310 + asm volatile("syscall"
21311 + : "=a" (secs)
21312 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
21313 + return secs;
21314 +}
21315 +
21316 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
21317 {
21318 long ret;
21319 asm("syscall" : "=a" (ret) :
21320 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
21321 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
21322 return ret;
21323 }
21324
21325 +notrace static inline cycle_t __vdso_vread_hpet(void)
21326 +{
21327 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
21328 +}
21329 +
21330 +notrace static inline cycle_t __vdso_vread_tsc(void)
21331 +{
21332 + cycle_t ret = (cycle_t)vget_cycles();
21333 +
21334 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
21335 +}
21336 +
21337 notrace static inline long vgetns(void)
21338 {
21339 long v;
21340 - cycles_t (*vread)(void);
21341 - vread = gtod->clock.vread;
21342 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
21343 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
21344 + v = __vdso_vread_tsc();
21345 + else
21346 + v = __vdso_vread_hpet();
21347 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
21348 return (v * gtod->clock.mult) >> gtod->clock.shift;
21349 }
21350
21351 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
21352
21353 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
21354 {
21355 - if (likely(gtod->sysctl_enabled))
21356 + if (likely(gtod->sysctl_enabled &&
21357 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
21358 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
21359 switch (clock) {
21360 case CLOCK_REALTIME:
21361 if (likely(gtod->clock.vread))
21362 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
21363 int clock_gettime(clockid_t, struct timespec *)
21364 __attribute__((weak, alias("__vdso_clock_gettime")));
21365
21366 -notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
21367 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
21368 {
21369 long ret;
21370 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
21371 + asm("syscall" : "=a" (ret) :
21372 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
21373 + return ret;
21374 +}
21375 +
21376 +notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
21377 +{
21378 + if (likely(gtod->sysctl_enabled &&
21379 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
21380 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
21381 + {
21382 if (likely(tv != NULL)) {
21383 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
21384 offsetof(struct timespec, tv_nsec) ||
21385 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
21386 }
21387 return 0;
21388 }
21389 - asm("syscall" : "=a" (ret) :
21390 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
21391 - return ret;
21392 + return __vdso_fallback_gettimeofday(tv, tz);
21393 }
21394 int gettimeofday(struct timeval *, struct timezone *)
21395 __attribute__((weak, alias("__vdso_gettimeofday")));
21396 diff -urNp linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c
21397 --- linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c 2011-05-19 00:06:34.000000000 -0400
21398 +++ linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c 2011-08-05 19:44:35.000000000 -0400
21399 @@ -25,6 +25,7 @@
21400 #include <asm/tlbflush.h>
21401 #include <asm/vdso.h>
21402 #include <asm/proto.h>
21403 +#include <asm/mman.h>
21404
21405 enum {
21406 VDSO_DISABLED = 0,
21407 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
21408 void enable_sep_cpu(void)
21409 {
21410 int cpu = get_cpu();
21411 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
21412 + struct tss_struct *tss = init_tss + cpu;
21413
21414 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21415 put_cpu();
21416 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
21417 gate_vma.vm_start = FIXADDR_USER_START;
21418 gate_vma.vm_end = FIXADDR_USER_END;
21419 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
21420 - gate_vma.vm_page_prot = __P101;
21421 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
21422 /*
21423 * Make sure the vDSO gets into every core dump.
21424 * Dumping its contents makes post-mortem fully interpretable later
21425 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
21426 if (compat)
21427 addr = VDSO_HIGH_BASE;
21428 else {
21429 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
21430 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
21431 if (IS_ERR_VALUE(addr)) {
21432 ret = addr;
21433 goto up_fail;
21434 }
21435 }
21436
21437 - current->mm->context.vdso = (void *)addr;
21438 + current->mm->context.vdso = addr;
21439
21440 if (compat_uses_vma || !compat) {
21441 /*
21442 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
21443 }
21444
21445 current_thread_info()->sysenter_return =
21446 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21447 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21448
21449 up_fail:
21450 if (ret)
21451 - current->mm->context.vdso = NULL;
21452 + current->mm->context.vdso = 0;
21453
21454 up_write(&mm->mmap_sem);
21455
21456 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
21457
21458 const char *arch_vma_name(struct vm_area_struct *vma)
21459 {
21460 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21461 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21462 return "[vdso]";
21463 +
21464 +#ifdef CONFIG_PAX_SEGMEXEC
21465 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
21466 + return "[vdso]";
21467 +#endif
21468 +
21469 return NULL;
21470 }
21471
21472 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
21473 * Check to see if the corresponding task was created in compat vdso
21474 * mode.
21475 */
21476 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
21477 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
21478 return &gate_vma;
21479 return NULL;
21480 }
21481 diff -urNp linux-2.6.39.4/arch/x86/vdso/vdso.lds.S linux-2.6.39.4/arch/x86/vdso/vdso.lds.S
21482 --- linux-2.6.39.4/arch/x86/vdso/vdso.lds.S 2011-05-19 00:06:34.000000000 -0400
21483 +++ linux-2.6.39.4/arch/x86/vdso/vdso.lds.S 2011-08-05 19:44:35.000000000 -0400
21484 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
21485 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
21486 #include "vextern.h"
21487 #undef VEXTERN
21488 +
21489 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
21490 +VEXTERN(fallback_gettimeofday)
21491 +VEXTERN(fallback_time)
21492 +VEXTERN(getcpu)
21493 +#undef VEXTERN
21494 diff -urNp linux-2.6.39.4/arch/x86/vdso/vextern.h linux-2.6.39.4/arch/x86/vdso/vextern.h
21495 --- linux-2.6.39.4/arch/x86/vdso/vextern.h 2011-05-19 00:06:34.000000000 -0400
21496 +++ linux-2.6.39.4/arch/x86/vdso/vextern.h 2011-08-05 19:44:35.000000000 -0400
21497 @@ -11,6 +11,5 @@
21498 put into vextern.h and be referenced as a pointer with vdso prefix.
21499 The main kernel later fills in the values. */
21500
21501 -VEXTERN(jiffies)
21502 VEXTERN(vgetcpu_mode)
21503 VEXTERN(vsyscall_gtod_data)
21504 diff -urNp linux-2.6.39.4/arch/x86/vdso/vma.c linux-2.6.39.4/arch/x86/vdso/vma.c
21505 --- linux-2.6.39.4/arch/x86/vdso/vma.c 2011-05-19 00:06:34.000000000 -0400
21506 +++ linux-2.6.39.4/arch/x86/vdso/vma.c 2011-08-05 19:44:35.000000000 -0400
21507 @@ -58,7 +58,7 @@ static int __init init_vdso_vars(void)
21508 if (!vbase)
21509 goto oom;
21510
21511 - if (memcmp(vbase, "\177ELF", 4)) {
21512 + if (memcmp(vbase, ELFMAG, SELFMAG)) {
21513 printk("VDSO: I'm broken; not ELF\n");
21514 vdso_enabled = 0;
21515 }
21516 @@ -118,7 +118,7 @@ int arch_setup_additional_pages(struct l
21517 goto up_fail;
21518 }
21519
21520 - current->mm->context.vdso = (void *)addr;
21521 + current->mm->context.vdso = addr;
21522
21523 ret = install_special_mapping(mm, addr, vdso_size,
21524 VM_READ|VM_EXEC|
21525 @@ -126,7 +126,7 @@ int arch_setup_additional_pages(struct l
21526 VM_ALWAYSDUMP,
21527 vdso_pages);
21528 if (ret) {
21529 - current->mm->context.vdso = NULL;
21530 + current->mm->context.vdso = 0;
21531 goto up_fail;
21532 }
21533
21534 @@ -134,10 +134,3 @@ up_fail:
21535 up_write(&mm->mmap_sem);
21536 return ret;
21537 }
21538 -
21539 -static __init int vdso_setup(char *s)
21540 -{
21541 - vdso_enabled = simple_strtoul(s, NULL, 0);
21542 - return 0;
21543 -}
21544 -__setup("vdso=", vdso_setup);
21545 diff -urNp linux-2.6.39.4/arch/x86/xen/enlighten.c linux-2.6.39.4/arch/x86/xen/enlighten.c
21546 --- linux-2.6.39.4/arch/x86/xen/enlighten.c 2011-05-19 00:06:34.000000000 -0400
21547 +++ linux-2.6.39.4/arch/x86/xen/enlighten.c 2011-08-05 19:44:35.000000000 -0400
21548 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
21549
21550 struct shared_info xen_dummy_shared_info;
21551
21552 -void *xen_initial_gdt;
21553 -
21554 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
21555 __read_mostly int xen_have_vector_callback;
21556 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
21557 @@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic
21558 #endif
21559 };
21560
21561 -static void xen_reboot(int reason)
21562 +static __noreturn void xen_reboot(int reason)
21563 {
21564 struct sched_shutdown r = { .reason = reason };
21565
21566 @@ -1018,17 +1016,17 @@ static void xen_reboot(int reason)
21567 BUG();
21568 }
21569
21570 -static void xen_restart(char *msg)
21571 +static __noreturn void xen_restart(char *msg)
21572 {
21573 xen_reboot(SHUTDOWN_reboot);
21574 }
21575
21576 -static void xen_emergency_restart(void)
21577 +static __noreturn void xen_emergency_restart(void)
21578 {
21579 xen_reboot(SHUTDOWN_reboot);
21580 }
21581
21582 -static void xen_machine_halt(void)
21583 +static __noreturn void xen_machine_halt(void)
21584 {
21585 xen_reboot(SHUTDOWN_poweroff);
21586 }
21587 @@ -1127,7 +1125,17 @@ asmlinkage void __init xen_start_kernel(
21588 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
21589
21590 /* Work out if we support NX */
21591 - x86_configure_nx();
21592 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21593 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
21594 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
21595 + unsigned l, h;
21596 +
21597 + __supported_pte_mask |= _PAGE_NX;
21598 + rdmsr(MSR_EFER, l, h);
21599 + l |= EFER_NX;
21600 + wrmsr(MSR_EFER, l, h);
21601 + }
21602 +#endif
21603
21604 xen_setup_features();
21605
21606 @@ -1158,13 +1166,6 @@ asmlinkage void __init xen_start_kernel(
21607
21608 machine_ops = xen_machine_ops;
21609
21610 - /*
21611 - * The only reliable way to retain the initial address of the
21612 - * percpu gdt_page is to remember it here, so we can go and
21613 - * mark it RW later, when the initial percpu area is freed.
21614 - */
21615 - xen_initial_gdt = &per_cpu(gdt_page, 0);
21616 -
21617 xen_smp_init();
21618
21619 #ifdef CONFIG_ACPI_NUMA
21620 diff -urNp linux-2.6.39.4/arch/x86/xen/mmu.c linux-2.6.39.4/arch/x86/xen/mmu.c
21621 --- linux-2.6.39.4/arch/x86/xen/mmu.c 2011-07-09 09:18:51.000000000 -0400
21622 +++ linux-2.6.39.4/arch/x86/xen/mmu.c 2011-08-05 19:44:35.000000000 -0400
21623 @@ -1801,6 +1801,8 @@ __init pgd_t *xen_setup_kernel_pagetable
21624 convert_pfn_mfn(init_level4_pgt);
21625 convert_pfn_mfn(level3_ident_pgt);
21626 convert_pfn_mfn(level3_kernel_pgt);
21627 + convert_pfn_mfn(level3_vmalloc_pgt);
21628 + convert_pfn_mfn(level3_vmemmap_pgt);
21629
21630 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
21631 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
21632 @@ -1819,7 +1821,10 @@ __init pgd_t *xen_setup_kernel_pagetable
21633 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
21634 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
21635 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
21636 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
21637 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
21638 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
21639 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
21640 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
21641 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
21642
21643 diff -urNp linux-2.6.39.4/arch/x86/xen/smp.c linux-2.6.39.4/arch/x86/xen/smp.c
21644 --- linux-2.6.39.4/arch/x86/xen/smp.c 2011-07-09 09:18:51.000000000 -0400
21645 +++ linux-2.6.39.4/arch/x86/xen/smp.c 2011-08-05 19:44:35.000000000 -0400
21646 @@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_
21647 {
21648 BUG_ON(smp_processor_id() != 0);
21649 native_smp_prepare_boot_cpu();
21650 -
21651 - /* We've switched to the "real" per-cpu gdt, so make sure the
21652 - old memory can be recycled */
21653 - make_lowmem_page_readwrite(xen_initial_gdt);
21654 -
21655 xen_filter_cpu_maps();
21656 xen_setup_vcpu_info_placement();
21657 }
21658 @@ -266,12 +261,12 @@ cpu_initialize_context(unsigned int cpu,
21659 gdt = get_cpu_gdt_table(cpu);
21660
21661 ctxt->flags = VGCF_IN_KERNEL;
21662 - ctxt->user_regs.ds = __USER_DS;
21663 - ctxt->user_regs.es = __USER_DS;
21664 + ctxt->user_regs.ds = __KERNEL_DS;
21665 + ctxt->user_regs.es = __KERNEL_DS;
21666 ctxt->user_regs.ss = __KERNEL_DS;
21667 #ifdef CONFIG_X86_32
21668 ctxt->user_regs.fs = __KERNEL_PERCPU;
21669 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
21670 + savesegment(gs, ctxt->user_regs.gs);
21671 #else
21672 ctxt->gs_base_kernel = per_cpu_offset(cpu);
21673 #endif
21674 @@ -322,13 +317,12 @@ static int __cpuinit xen_cpu_up(unsigned
21675 int rc;
21676
21677 per_cpu(current_task, cpu) = idle;
21678 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
21679 #ifdef CONFIG_X86_32
21680 irq_ctx_init(cpu);
21681 #else
21682 clear_tsk_thread_flag(idle, TIF_FORK);
21683 - per_cpu(kernel_stack, cpu) =
21684 - (unsigned long)task_stack_page(idle) -
21685 - KERNEL_STACK_OFFSET + THREAD_SIZE;
21686 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
21687 #endif
21688 xen_setup_runstate_info(cpu);
21689 xen_setup_timer(cpu);
21690 diff -urNp linux-2.6.39.4/arch/x86/xen/xen-asm_32.S linux-2.6.39.4/arch/x86/xen/xen-asm_32.S
21691 --- linux-2.6.39.4/arch/x86/xen/xen-asm_32.S 2011-05-19 00:06:34.000000000 -0400
21692 +++ linux-2.6.39.4/arch/x86/xen/xen-asm_32.S 2011-08-05 19:44:35.000000000 -0400
21693 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
21694 ESP_OFFSET=4 # bytes pushed onto stack
21695
21696 /*
21697 - * Store vcpu_info pointer for easy access. Do it this way to
21698 - * avoid having to reload %fs
21699 + * Store vcpu_info pointer for easy access.
21700 */
21701 #ifdef CONFIG_SMP
21702 - GET_THREAD_INFO(%eax)
21703 - movl TI_cpu(%eax), %eax
21704 - movl __per_cpu_offset(,%eax,4), %eax
21705 - mov xen_vcpu(%eax), %eax
21706 + push %fs
21707 + mov $(__KERNEL_PERCPU), %eax
21708 + mov %eax, %fs
21709 + mov PER_CPU_VAR(xen_vcpu), %eax
21710 + pop %fs
21711 #else
21712 movl xen_vcpu, %eax
21713 #endif
21714 diff -urNp linux-2.6.39.4/arch/x86/xen/xen-head.S linux-2.6.39.4/arch/x86/xen/xen-head.S
21715 --- linux-2.6.39.4/arch/x86/xen/xen-head.S 2011-05-19 00:06:34.000000000 -0400
21716 +++ linux-2.6.39.4/arch/x86/xen/xen-head.S 2011-08-05 19:44:35.000000000 -0400
21717 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
21718 #ifdef CONFIG_X86_32
21719 mov %esi,xen_start_info
21720 mov $init_thread_union+THREAD_SIZE,%esp
21721 +#ifdef CONFIG_SMP
21722 + movl $cpu_gdt_table,%edi
21723 + movl $__per_cpu_load,%eax
21724 + movw %ax,__KERNEL_PERCPU + 2(%edi)
21725 + rorl $16,%eax
21726 + movb %al,__KERNEL_PERCPU + 4(%edi)
21727 + movb %ah,__KERNEL_PERCPU + 7(%edi)
21728 + movl $__per_cpu_end - 1,%eax
21729 + subl $__per_cpu_start,%eax
21730 + movw %ax,__KERNEL_PERCPU + 0(%edi)
21731 +#endif
21732 #else
21733 mov %rsi,xen_start_info
21734 mov $init_thread_union+THREAD_SIZE,%rsp
21735 diff -urNp linux-2.6.39.4/arch/x86/xen/xen-ops.h linux-2.6.39.4/arch/x86/xen/xen-ops.h
21736 --- linux-2.6.39.4/arch/x86/xen/xen-ops.h 2011-05-19 00:06:34.000000000 -0400
21737 +++ linux-2.6.39.4/arch/x86/xen/xen-ops.h 2011-08-05 19:44:35.000000000 -0400
21738 @@ -10,8 +10,6 @@
21739 extern const char xen_hypervisor_callback[];
21740 extern const char xen_failsafe_callback[];
21741
21742 -extern void *xen_initial_gdt;
21743 -
21744 struct trap_info;
21745 void xen_copy_trap_info(struct trap_info *traps);
21746
21747 diff -urNp linux-2.6.39.4/block/blk-iopoll.c linux-2.6.39.4/block/blk-iopoll.c
21748 --- linux-2.6.39.4/block/blk-iopoll.c 2011-05-19 00:06:34.000000000 -0400
21749 +++ linux-2.6.39.4/block/blk-iopoll.c 2011-08-05 19:44:35.000000000 -0400
21750 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
21751 }
21752 EXPORT_SYMBOL(blk_iopoll_complete);
21753
21754 -static void blk_iopoll_softirq(struct softirq_action *h)
21755 +static void blk_iopoll_softirq(void)
21756 {
21757 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
21758 int rearm = 0, budget = blk_iopoll_budget;
21759 diff -urNp linux-2.6.39.4/block/blk-map.c linux-2.6.39.4/block/blk-map.c
21760 --- linux-2.6.39.4/block/blk-map.c 2011-05-19 00:06:34.000000000 -0400
21761 +++ linux-2.6.39.4/block/blk-map.c 2011-08-05 19:44:35.000000000 -0400
21762 @@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
21763 if (!len || !kbuf)
21764 return -EINVAL;
21765
21766 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
21767 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
21768 if (do_copy)
21769 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
21770 else
21771 diff -urNp linux-2.6.39.4/block/blk-softirq.c linux-2.6.39.4/block/blk-softirq.c
21772 --- linux-2.6.39.4/block/blk-softirq.c 2011-05-19 00:06:34.000000000 -0400
21773 +++ linux-2.6.39.4/block/blk-softirq.c 2011-08-05 19:44:35.000000000 -0400
21774 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
21775 * Softirq action handler - move entries to local list and loop over them
21776 * while passing them to the queue registered handler.
21777 */
21778 -static void blk_done_softirq(struct softirq_action *h)
21779 +static void blk_done_softirq(void)
21780 {
21781 struct list_head *cpu_list, local_list;
21782
21783 diff -urNp linux-2.6.39.4/block/bsg.c linux-2.6.39.4/block/bsg.c
21784 --- linux-2.6.39.4/block/bsg.c 2011-05-19 00:06:34.000000000 -0400
21785 +++ linux-2.6.39.4/block/bsg.c 2011-08-05 19:44:35.000000000 -0400
21786 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
21787 struct sg_io_v4 *hdr, struct bsg_device *bd,
21788 fmode_t has_write_perm)
21789 {
21790 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21791 + unsigned char *cmdptr;
21792 +
21793 if (hdr->request_len > BLK_MAX_CDB) {
21794 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
21795 if (!rq->cmd)
21796 return -ENOMEM;
21797 - }
21798 + cmdptr = rq->cmd;
21799 + } else
21800 + cmdptr = tmpcmd;
21801
21802 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
21803 + if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
21804 hdr->request_len))
21805 return -EFAULT;
21806
21807 + if (cmdptr != rq->cmd)
21808 + memcpy(rq->cmd, cmdptr, hdr->request_len);
21809 +
21810 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
21811 if (blk_verify_command(rq->cmd, has_write_perm))
21812 return -EPERM;
21813 diff -urNp linux-2.6.39.4/block/scsi_ioctl.c linux-2.6.39.4/block/scsi_ioctl.c
21814 --- linux-2.6.39.4/block/scsi_ioctl.c 2011-05-19 00:06:34.000000000 -0400
21815 +++ linux-2.6.39.4/block/scsi_ioctl.c 2011-08-05 19:44:35.000000000 -0400
21816 @@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
21817 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
21818 struct sg_io_hdr *hdr, fmode_t mode)
21819 {
21820 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
21821 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21822 + unsigned char *cmdptr;
21823 +
21824 + if (rq->cmd != rq->__cmd)
21825 + cmdptr = rq->cmd;
21826 + else
21827 + cmdptr = tmpcmd;
21828 +
21829 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
21830 return -EFAULT;
21831 +
21832 + if (cmdptr != rq->cmd)
21833 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
21834 +
21835 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
21836 return -EPERM;
21837
21838 @@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
21839 int err;
21840 unsigned int in_len, out_len, bytes, opcode, cmdlen;
21841 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
21842 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21843 + unsigned char *cmdptr;
21844
21845 if (!sic)
21846 return -EINVAL;
21847 @@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
21848 */
21849 err = -EFAULT;
21850 rq->cmd_len = cmdlen;
21851 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
21852 +
21853 + if (rq->cmd != rq->__cmd)
21854 + cmdptr = rq->cmd;
21855 + else
21856 + cmdptr = tmpcmd;
21857 +
21858 + if (copy_from_user(cmdptr, sic->data, cmdlen))
21859 goto error;
21860
21861 + if (rq->cmd != cmdptr)
21862 + memcpy(rq->cmd, cmdptr, cmdlen);
21863 +
21864 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
21865 goto error;
21866
21867 diff -urNp linux-2.6.39.4/crypto/cryptd.c linux-2.6.39.4/crypto/cryptd.c
21868 --- linux-2.6.39.4/crypto/cryptd.c 2011-05-19 00:06:34.000000000 -0400
21869 +++ linux-2.6.39.4/crypto/cryptd.c 2011-08-05 20:34:06.000000000 -0400
21870 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
21871
21872 struct cryptd_blkcipher_request_ctx {
21873 crypto_completion_t complete;
21874 -};
21875 +} __no_const;
21876
21877 struct cryptd_hash_ctx {
21878 struct crypto_shash *child;
21879 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
21880
21881 struct cryptd_aead_request_ctx {
21882 crypto_completion_t complete;
21883 -};
21884 +} __no_const;
21885
21886 static void cryptd_queue_worker(struct work_struct *work);
21887
21888 diff -urNp linux-2.6.39.4/crypto/gf128mul.c linux-2.6.39.4/crypto/gf128mul.c
21889 --- linux-2.6.39.4/crypto/gf128mul.c 2011-05-19 00:06:34.000000000 -0400
21890 +++ linux-2.6.39.4/crypto/gf128mul.c 2011-08-05 19:44:35.000000000 -0400
21891 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
21892 for (i = 0; i < 7; ++i)
21893 gf128mul_x_lle(&p[i + 1], &p[i]);
21894
21895 - memset(r, 0, sizeof(r));
21896 + memset(r, 0, sizeof(*r));
21897 for (i = 0;;) {
21898 u8 ch = ((u8 *)b)[15 - i];
21899
21900 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
21901 for (i = 0; i < 7; ++i)
21902 gf128mul_x_bbe(&p[i + 1], &p[i]);
21903
21904 - memset(r, 0, sizeof(r));
21905 + memset(r, 0, sizeof(*r));
21906 for (i = 0;;) {
21907 u8 ch = ((u8 *)b)[i];
21908
21909 diff -urNp linux-2.6.39.4/crypto/serpent.c linux-2.6.39.4/crypto/serpent.c
21910 --- linux-2.6.39.4/crypto/serpent.c 2011-05-19 00:06:34.000000000 -0400
21911 +++ linux-2.6.39.4/crypto/serpent.c 2011-08-05 19:44:35.000000000 -0400
21912 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
21913 u32 r0,r1,r2,r3,r4;
21914 int i;
21915
21916 + pax_track_stack();
21917 +
21918 /* Copy key, add padding */
21919
21920 for (i = 0; i < keylen; ++i)
21921 diff -urNp linux-2.6.39.4/Documentation/dontdiff linux-2.6.39.4/Documentation/dontdiff
21922 --- linux-2.6.39.4/Documentation/dontdiff 2011-05-19 00:06:34.000000000 -0400
21923 +++ linux-2.6.39.4/Documentation/dontdiff 2011-08-05 19:44:35.000000000 -0400
21924 @@ -1,13 +1,16 @@
21925 *.a
21926 *.aux
21927 *.bin
21928 +*.cis
21929 *.cpio
21930 *.csp
21931 +*.dbg
21932 *.dsp
21933 *.dvi
21934 *.elf
21935 *.eps
21936 *.fw
21937 +*.gcno
21938 *.gen.S
21939 *.gif
21940 *.grep
21941 @@ -38,8 +41,10 @@
21942 *.tab.h
21943 *.tex
21944 *.ver
21945 +*.vim
21946 *.xml
21947 *_MODULES
21948 +*_reg_safe.h
21949 *_vga16.c
21950 *~
21951 *.9
21952 @@ -49,11 +54,16 @@
21953 53c700_d.h
21954 CVS
21955 ChangeSet
21956 +GPATH
21957 +GRTAGS
21958 +GSYMS
21959 +GTAGS
21960 Image
21961 Kerntypes
21962 Module.markers
21963 Module.symvers
21964 PENDING
21965 +PERF*
21966 SCCS
21967 System.map*
21968 TAGS
21969 @@ -80,8 +90,11 @@ btfixupprep
21970 build
21971 bvmlinux
21972 bzImage*
21973 +capability_names.h
21974 capflags.c
21975 classlist.h*
21976 +clut_vga16.c
21977 +common-cmds.h
21978 comp*.log
21979 compile.h*
21980 conf
21981 @@ -106,16 +119,19 @@ fore200e_mkfirm
21982 fore200e_pca_fw.c*
21983 gconf
21984 gen-devlist
21985 +gen-kdb_cmds.c
21986 gen_crc32table
21987 gen_init_cpio
21988 generated
21989 genheaders
21990 genksyms
21991 *_gray256.c
21992 +hash
21993 ihex2fw
21994 ikconfig.h*
21995 inat-tables.c
21996 initramfs_data.cpio
21997 +initramfs_data.cpio.bz2
21998 initramfs_data.cpio.gz
21999 initramfs_list
22000 int16.c
22001 @@ -125,7 +141,6 @@ int32.c
22002 int4.c
22003 int8.c
22004 kallsyms
22005 -kconfig
22006 keywords.c
22007 ksym.c*
22008 ksym.h*
22009 @@ -149,7 +164,9 @@ mkboot
22010 mkbugboot
22011 mkcpustr
22012 mkdep
22013 +mkpiggy
22014 mkprep
22015 +mkregtable
22016 mktables
22017 mktree
22018 modpost
22019 @@ -165,6 +182,7 @@ parse.h
22020 patches*
22021 pca200e.bin
22022 pca200e_ecd.bin2
22023 +perf-archive
22024 piggy.gz
22025 piggyback
22026 piggy.S
22027 @@ -180,7 +198,9 @@ r600_reg_safe.h
22028 raid6altivec*.c
22029 raid6int*.c
22030 raid6tables.c
22031 +regdb.c
22032 relocs
22033 +rlim_names.h
22034 rn50_reg_safe.h
22035 rs600_reg_safe.h
22036 rv515_reg_safe.h
22037 @@ -189,6 +209,7 @@ setup
22038 setup.bin
22039 setup.elf
22040 sImage
22041 +slabinfo
22042 sm_tbl*
22043 split-include
22044 syscalltab.h
22045 @@ -213,13 +234,17 @@ version.h*
22046 vmlinux
22047 vmlinux-*
22048 vmlinux.aout
22049 +vmlinux.bin.all
22050 +vmlinux.bin.bz2
22051 vmlinux.lds
22052 +vmlinux.relocs
22053 voffset.h
22054 vsyscall.lds
22055 vsyscall_32.lds
22056 wanxlfw.inc
22057 uImage
22058 unifdef
22059 +utsrelease.h
22060 wakeup.bin
22061 wakeup.elf
22062 wakeup.lds
22063 diff -urNp linux-2.6.39.4/Documentation/kernel-parameters.txt linux-2.6.39.4/Documentation/kernel-parameters.txt
22064 --- linux-2.6.39.4/Documentation/kernel-parameters.txt 2011-06-25 12:55:22.000000000 -0400
22065 +++ linux-2.6.39.4/Documentation/kernel-parameters.txt 2011-08-05 19:44:35.000000000 -0400
22066 @@ -1879,6 +1879,13 @@ bytes respectively. Such letter suffixes
22067 the specified number of seconds. This is to be used if
22068 your oopses keep scrolling off the screen.
22069
22070 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
22071 + virtualization environments that don't cope well with the
22072 + expand down segment used by UDEREF on X86-32 or the frequent
22073 + page table updates on X86-64.
22074 +
22075 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
22076 +
22077 pcbit= [HW,ISDN]
22078
22079 pcd. [PARIDE]
22080 diff -urNp linux-2.6.39.4/drivers/acpi/apei/cper.c linux-2.6.39.4/drivers/acpi/apei/cper.c
22081 --- linux-2.6.39.4/drivers/acpi/apei/cper.c 2011-05-19 00:06:34.000000000 -0400
22082 +++ linux-2.6.39.4/drivers/acpi/apei/cper.c 2011-08-05 19:44:35.000000000 -0400
22083 @@ -38,12 +38,12 @@
22084 */
22085 u64 cper_next_record_id(void)
22086 {
22087 - static atomic64_t seq;
22088 + static atomic64_unchecked_t seq;
22089
22090 - if (!atomic64_read(&seq))
22091 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
22092 + if (!atomic64_read_unchecked(&seq))
22093 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
22094
22095 - return atomic64_inc_return(&seq);
22096 + return atomic64_inc_return_unchecked(&seq);
22097 }
22098 EXPORT_SYMBOL_GPL(cper_next_record_id);
22099
22100 diff -urNp linux-2.6.39.4/drivers/acpi/power_meter.c linux-2.6.39.4/drivers/acpi/power_meter.c
22101 --- linux-2.6.39.4/drivers/acpi/power_meter.c 2011-05-19 00:06:34.000000000 -0400
22102 +++ linux-2.6.39.4/drivers/acpi/power_meter.c 2011-08-05 19:44:35.000000000 -0400
22103 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
22104 return res;
22105
22106 temp /= 1000;
22107 - if (temp < 0)
22108 - return -EINVAL;
22109
22110 mutex_lock(&resource->lock);
22111 resource->trip[attr->index - 7] = temp;
22112 diff -urNp linux-2.6.39.4/drivers/acpi/proc.c linux-2.6.39.4/drivers/acpi/proc.c
22113 --- linux-2.6.39.4/drivers/acpi/proc.c 2011-05-19 00:06:34.000000000 -0400
22114 +++ linux-2.6.39.4/drivers/acpi/proc.c 2011-08-05 19:44:35.000000000 -0400
22115 @@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
22116 size_t count, loff_t * ppos)
22117 {
22118 struct list_head *node, *next;
22119 - char strbuf[5];
22120 - char str[5] = "";
22121 - unsigned int len = count;
22122 -
22123 - if (len > 4)
22124 - len = 4;
22125 - if (len < 0)
22126 - return -EFAULT;
22127 + char strbuf[5] = {0};
22128
22129 - if (copy_from_user(strbuf, buffer, len))
22130 + if (count > 4)
22131 + count = 4;
22132 + if (copy_from_user(strbuf, buffer, count))
22133 return -EFAULT;
22134 - strbuf[len] = '\0';
22135 - sscanf(strbuf, "%s", str);
22136 + strbuf[count] = '\0';
22137
22138 mutex_lock(&acpi_device_lock);
22139 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
22140 @@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
22141 if (!dev->wakeup.flags.valid)
22142 continue;
22143
22144 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
22145 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
22146 if (device_can_wakeup(&dev->dev)) {
22147 bool enable = !device_may_wakeup(&dev->dev);
22148 device_set_wakeup_enable(&dev->dev, enable);
22149 diff -urNp linux-2.6.39.4/drivers/acpi/processor_driver.c linux-2.6.39.4/drivers/acpi/processor_driver.c
22150 --- linux-2.6.39.4/drivers/acpi/processor_driver.c 2011-05-19 00:06:34.000000000 -0400
22151 +++ linux-2.6.39.4/drivers/acpi/processor_driver.c 2011-08-05 19:44:35.000000000 -0400
22152 @@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
22153 return 0;
22154 #endif
22155
22156 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
22157 + BUG_ON(pr->id >= nr_cpu_ids);
22158
22159 /*
22160 * Buggy BIOS check
22161 diff -urNp linux-2.6.39.4/drivers/ata/libata-core.c linux-2.6.39.4/drivers/ata/libata-core.c
22162 --- linux-2.6.39.4/drivers/ata/libata-core.c 2011-05-19 00:06:34.000000000 -0400
22163 +++ linux-2.6.39.4/drivers/ata/libata-core.c 2011-08-05 20:34:06.000000000 -0400
22164 @@ -4747,7 +4747,7 @@ void ata_qc_free(struct ata_queued_cmd *
22165 struct ata_port *ap;
22166 unsigned int tag;
22167
22168 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22169 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22170 ap = qc->ap;
22171
22172 qc->flags = 0;
22173 @@ -4763,7 +4763,7 @@ void __ata_qc_complete(struct ata_queued
22174 struct ata_port *ap;
22175 struct ata_link *link;
22176
22177 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22178 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22179 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
22180 ap = qc->ap;
22181 link = qc->dev->link;
22182 @@ -5768,6 +5768,7 @@ static void ata_finalize_port_ops(struct
22183 return;
22184
22185 spin_lock(&lock);
22186 + pax_open_kernel();
22187
22188 for (cur = ops->inherits; cur; cur = cur->inherits) {
22189 void **inherit = (void **)cur;
22190 @@ -5781,8 +5782,9 @@ static void ata_finalize_port_ops(struct
22191 if (IS_ERR(*pp))
22192 *pp = NULL;
22193
22194 - ops->inherits = NULL;
22195 + *(struct ata_port_operations **)&ops->inherits = NULL;
22196
22197 + pax_close_kernel();
22198 spin_unlock(&lock);
22199 }
22200
22201 diff -urNp linux-2.6.39.4/drivers/ata/libata-eh.c linux-2.6.39.4/drivers/ata/libata-eh.c
22202 --- linux-2.6.39.4/drivers/ata/libata-eh.c 2011-08-05 21:11:51.000000000 -0400
22203 +++ linux-2.6.39.4/drivers/ata/libata-eh.c 2011-08-05 21:12:20.000000000 -0400
22204 @@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap)
22205 {
22206 struct ata_link *link;
22207
22208 + pax_track_stack();
22209 +
22210 ata_for_each_link(link, ap, HOST_FIRST)
22211 ata_eh_link_report(link);
22212 }
22213 diff -urNp linux-2.6.39.4/drivers/ata/pata_arasan_cf.c linux-2.6.39.4/drivers/ata/pata_arasan_cf.c
22214 --- linux-2.6.39.4/drivers/ata/pata_arasan_cf.c 2011-05-19 00:06:34.000000000 -0400
22215 +++ linux-2.6.39.4/drivers/ata/pata_arasan_cf.c 2011-08-05 20:34:06.000000000 -0400
22216 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
22217 /* Handle platform specific quirks */
22218 if (pdata->quirk) {
22219 if (pdata->quirk & CF_BROKEN_PIO) {
22220 - ap->ops->set_piomode = NULL;
22221 + pax_open_kernel();
22222 + *(void **)&ap->ops->set_piomode = NULL;
22223 + pax_close_kernel();
22224 ap->pio_mask = 0;
22225 }
22226 if (pdata->quirk & CF_BROKEN_MWDMA)
22227 diff -urNp linux-2.6.39.4/drivers/atm/adummy.c linux-2.6.39.4/drivers/atm/adummy.c
22228 --- linux-2.6.39.4/drivers/atm/adummy.c 2011-05-19 00:06:34.000000000 -0400
22229 +++ linux-2.6.39.4/drivers/atm/adummy.c 2011-08-05 19:44:36.000000000 -0400
22230 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
22231 vcc->pop(vcc, skb);
22232 else
22233 dev_kfree_skb_any(skb);
22234 - atomic_inc(&vcc->stats->tx);
22235 + atomic_inc_unchecked(&vcc->stats->tx);
22236
22237 return 0;
22238 }
22239 diff -urNp linux-2.6.39.4/drivers/atm/ambassador.c linux-2.6.39.4/drivers/atm/ambassador.c
22240 --- linux-2.6.39.4/drivers/atm/ambassador.c 2011-05-19 00:06:34.000000000 -0400
22241 +++ linux-2.6.39.4/drivers/atm/ambassador.c 2011-08-05 19:44:36.000000000 -0400
22242 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
22243 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
22244
22245 // VC layer stats
22246 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22247 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22248
22249 // free the descriptor
22250 kfree (tx_descr);
22251 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
22252 dump_skb ("<<<", vc, skb);
22253
22254 // VC layer stats
22255 - atomic_inc(&atm_vcc->stats->rx);
22256 + atomic_inc_unchecked(&atm_vcc->stats->rx);
22257 __net_timestamp(skb);
22258 // end of our responsibility
22259 atm_vcc->push (atm_vcc, skb);
22260 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
22261 } else {
22262 PRINTK (KERN_INFO, "dropped over-size frame");
22263 // should we count this?
22264 - atomic_inc(&atm_vcc->stats->rx_drop);
22265 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22266 }
22267
22268 } else {
22269 @@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
22270 }
22271
22272 if (check_area (skb->data, skb->len)) {
22273 - atomic_inc(&atm_vcc->stats->tx_err);
22274 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
22275 return -ENOMEM; // ?
22276 }
22277
22278 diff -urNp linux-2.6.39.4/drivers/atm/atmtcp.c linux-2.6.39.4/drivers/atm/atmtcp.c
22279 --- linux-2.6.39.4/drivers/atm/atmtcp.c 2011-05-19 00:06:34.000000000 -0400
22280 +++ linux-2.6.39.4/drivers/atm/atmtcp.c 2011-08-05 19:44:36.000000000 -0400
22281 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
22282 if (vcc->pop) vcc->pop(vcc,skb);
22283 else dev_kfree_skb(skb);
22284 if (dev_data) return 0;
22285 - atomic_inc(&vcc->stats->tx_err);
22286 + atomic_inc_unchecked(&vcc->stats->tx_err);
22287 return -ENOLINK;
22288 }
22289 size = skb->len+sizeof(struct atmtcp_hdr);
22290 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
22291 if (!new_skb) {
22292 if (vcc->pop) vcc->pop(vcc,skb);
22293 else dev_kfree_skb(skb);
22294 - atomic_inc(&vcc->stats->tx_err);
22295 + atomic_inc_unchecked(&vcc->stats->tx_err);
22296 return -ENOBUFS;
22297 }
22298 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
22299 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
22300 if (vcc->pop) vcc->pop(vcc,skb);
22301 else dev_kfree_skb(skb);
22302 out_vcc->push(out_vcc,new_skb);
22303 - atomic_inc(&vcc->stats->tx);
22304 - atomic_inc(&out_vcc->stats->rx);
22305 + atomic_inc_unchecked(&vcc->stats->tx);
22306 + atomic_inc_unchecked(&out_vcc->stats->rx);
22307 return 0;
22308 }
22309
22310 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
22311 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
22312 read_unlock(&vcc_sklist_lock);
22313 if (!out_vcc) {
22314 - atomic_inc(&vcc->stats->tx_err);
22315 + atomic_inc_unchecked(&vcc->stats->tx_err);
22316 goto done;
22317 }
22318 skb_pull(skb,sizeof(struct atmtcp_hdr));
22319 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
22320 __net_timestamp(new_skb);
22321 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
22322 out_vcc->push(out_vcc,new_skb);
22323 - atomic_inc(&vcc->stats->tx);
22324 - atomic_inc(&out_vcc->stats->rx);
22325 + atomic_inc_unchecked(&vcc->stats->tx);
22326 + atomic_inc_unchecked(&out_vcc->stats->rx);
22327 done:
22328 if (vcc->pop) vcc->pop(vcc,skb);
22329 else dev_kfree_skb(skb);
22330 diff -urNp linux-2.6.39.4/drivers/atm/eni.c linux-2.6.39.4/drivers/atm/eni.c
22331 --- linux-2.6.39.4/drivers/atm/eni.c 2011-05-19 00:06:34.000000000 -0400
22332 +++ linux-2.6.39.4/drivers/atm/eni.c 2011-08-05 19:44:36.000000000 -0400
22333 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
22334 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
22335 vcc->dev->number);
22336 length = 0;
22337 - atomic_inc(&vcc->stats->rx_err);
22338 + atomic_inc_unchecked(&vcc->stats->rx_err);
22339 }
22340 else {
22341 length = ATM_CELL_SIZE-1; /* no HEC */
22342 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22343 size);
22344 }
22345 eff = length = 0;
22346 - atomic_inc(&vcc->stats->rx_err);
22347 + atomic_inc_unchecked(&vcc->stats->rx_err);
22348 }
22349 else {
22350 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
22351 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22352 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
22353 vcc->dev->number,vcc->vci,length,size << 2,descr);
22354 length = eff = 0;
22355 - atomic_inc(&vcc->stats->rx_err);
22356 + atomic_inc_unchecked(&vcc->stats->rx_err);
22357 }
22358 }
22359 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
22360 @@ -771,7 +771,7 @@ rx_dequeued++;
22361 vcc->push(vcc,skb);
22362 pushed++;
22363 }
22364 - atomic_inc(&vcc->stats->rx);
22365 + atomic_inc_unchecked(&vcc->stats->rx);
22366 }
22367 wake_up(&eni_dev->rx_wait);
22368 }
22369 @@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
22370 PCI_DMA_TODEVICE);
22371 if (vcc->pop) vcc->pop(vcc,skb);
22372 else dev_kfree_skb_irq(skb);
22373 - atomic_inc(&vcc->stats->tx);
22374 + atomic_inc_unchecked(&vcc->stats->tx);
22375 wake_up(&eni_dev->tx_wait);
22376 dma_complete++;
22377 }
22378 diff -urNp linux-2.6.39.4/drivers/atm/firestream.c linux-2.6.39.4/drivers/atm/firestream.c
22379 --- linux-2.6.39.4/drivers/atm/firestream.c 2011-05-19 00:06:34.000000000 -0400
22380 +++ linux-2.6.39.4/drivers/atm/firestream.c 2011-08-05 19:44:36.000000000 -0400
22381 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct
22382 }
22383 }
22384
22385 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22386 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22387
22388 fs_dprintk (FS_DEBUG_TXMEM, "i");
22389 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
22390 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_
22391 #endif
22392 skb_put (skb, qe->p1 & 0xffff);
22393 ATM_SKB(skb)->vcc = atm_vcc;
22394 - atomic_inc(&atm_vcc->stats->rx);
22395 + atomic_inc_unchecked(&atm_vcc->stats->rx);
22396 __net_timestamp(skb);
22397 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
22398 atm_vcc->push (atm_vcc, skb);
22399 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_
22400 kfree (pe);
22401 }
22402 if (atm_vcc)
22403 - atomic_inc(&atm_vcc->stats->rx_drop);
22404 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22405 break;
22406 case 0x1f: /* Reassembly abort: no buffers. */
22407 /* Silently increment error counter. */
22408 if (atm_vcc)
22409 - atomic_inc(&atm_vcc->stats->rx_drop);
22410 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22411 break;
22412 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
22413 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
22414 diff -urNp linux-2.6.39.4/drivers/atm/fore200e.c linux-2.6.39.4/drivers/atm/fore200e.c
22415 --- linux-2.6.39.4/drivers/atm/fore200e.c 2011-05-19 00:06:34.000000000 -0400
22416 +++ linux-2.6.39.4/drivers/atm/fore200e.c 2011-08-05 19:44:36.000000000 -0400
22417 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
22418 #endif
22419 /* check error condition */
22420 if (*entry->status & STATUS_ERROR)
22421 - atomic_inc(&vcc->stats->tx_err);
22422 + atomic_inc_unchecked(&vcc->stats->tx_err);
22423 else
22424 - atomic_inc(&vcc->stats->tx);
22425 + atomic_inc_unchecked(&vcc->stats->tx);
22426 }
22427 }
22428
22429 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
22430 if (skb == NULL) {
22431 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
22432
22433 - atomic_inc(&vcc->stats->rx_drop);
22434 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22435 return -ENOMEM;
22436 }
22437
22438 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
22439
22440 dev_kfree_skb_any(skb);
22441
22442 - atomic_inc(&vcc->stats->rx_drop);
22443 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22444 return -ENOMEM;
22445 }
22446
22447 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22448
22449 vcc->push(vcc, skb);
22450 - atomic_inc(&vcc->stats->rx);
22451 + atomic_inc_unchecked(&vcc->stats->rx);
22452
22453 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22454
22455 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
22456 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
22457 fore200e->atm_dev->number,
22458 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
22459 - atomic_inc(&vcc->stats->rx_err);
22460 + atomic_inc_unchecked(&vcc->stats->rx_err);
22461 }
22462 }
22463
22464 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
22465 goto retry_here;
22466 }
22467
22468 - atomic_inc(&vcc->stats->tx_err);
22469 + atomic_inc_unchecked(&vcc->stats->tx_err);
22470
22471 fore200e->tx_sat++;
22472 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
22473 diff -urNp linux-2.6.39.4/drivers/atm/he.c linux-2.6.39.4/drivers/atm/he.c
22474 --- linux-2.6.39.4/drivers/atm/he.c 2011-05-19 00:06:34.000000000 -0400
22475 +++ linux-2.6.39.4/drivers/atm/he.c 2011-08-05 19:44:36.000000000 -0400
22476 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22477
22478 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
22479 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
22480 - atomic_inc(&vcc->stats->rx_drop);
22481 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22482 goto return_host_buffers;
22483 }
22484
22485 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22486 RBRQ_LEN_ERR(he_dev->rbrq_head)
22487 ? "LEN_ERR" : "",
22488 vcc->vpi, vcc->vci);
22489 - atomic_inc(&vcc->stats->rx_err);
22490 + atomic_inc_unchecked(&vcc->stats->rx_err);
22491 goto return_host_buffers;
22492 }
22493
22494 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22495 vcc->push(vcc, skb);
22496 spin_lock(&he_dev->global_lock);
22497
22498 - atomic_inc(&vcc->stats->rx);
22499 + atomic_inc_unchecked(&vcc->stats->rx);
22500
22501 return_host_buffers:
22502 ++pdus_assembled;
22503 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
22504 tpd->vcc->pop(tpd->vcc, tpd->skb);
22505 else
22506 dev_kfree_skb_any(tpd->skb);
22507 - atomic_inc(&tpd->vcc->stats->tx_err);
22508 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
22509 }
22510 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
22511 return;
22512 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22513 vcc->pop(vcc, skb);
22514 else
22515 dev_kfree_skb_any(skb);
22516 - atomic_inc(&vcc->stats->tx_err);
22517 + atomic_inc_unchecked(&vcc->stats->tx_err);
22518 return -EINVAL;
22519 }
22520
22521 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22522 vcc->pop(vcc, skb);
22523 else
22524 dev_kfree_skb_any(skb);
22525 - atomic_inc(&vcc->stats->tx_err);
22526 + atomic_inc_unchecked(&vcc->stats->tx_err);
22527 return -EINVAL;
22528 }
22529 #endif
22530 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22531 vcc->pop(vcc, skb);
22532 else
22533 dev_kfree_skb_any(skb);
22534 - atomic_inc(&vcc->stats->tx_err);
22535 + atomic_inc_unchecked(&vcc->stats->tx_err);
22536 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22537 return -ENOMEM;
22538 }
22539 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22540 vcc->pop(vcc, skb);
22541 else
22542 dev_kfree_skb_any(skb);
22543 - atomic_inc(&vcc->stats->tx_err);
22544 + atomic_inc_unchecked(&vcc->stats->tx_err);
22545 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22546 return -ENOMEM;
22547 }
22548 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22549 __enqueue_tpd(he_dev, tpd, cid);
22550 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22551
22552 - atomic_inc(&vcc->stats->tx);
22553 + atomic_inc_unchecked(&vcc->stats->tx);
22554
22555 return 0;
22556 }
22557 diff -urNp linux-2.6.39.4/drivers/atm/horizon.c linux-2.6.39.4/drivers/atm/horizon.c
22558 --- linux-2.6.39.4/drivers/atm/horizon.c 2011-05-19 00:06:34.000000000 -0400
22559 +++ linux-2.6.39.4/drivers/atm/horizon.c 2011-08-05 19:44:36.000000000 -0400
22560 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
22561 {
22562 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
22563 // VC layer stats
22564 - atomic_inc(&vcc->stats->rx);
22565 + atomic_inc_unchecked(&vcc->stats->rx);
22566 __net_timestamp(skb);
22567 // end of our responsibility
22568 vcc->push (vcc, skb);
22569 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
22570 dev->tx_iovec = NULL;
22571
22572 // VC layer stats
22573 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22574 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22575
22576 // free the skb
22577 hrz_kfree_skb (skb);
22578 diff -urNp linux-2.6.39.4/drivers/atm/idt77252.c linux-2.6.39.4/drivers/atm/idt77252.c
22579 --- linux-2.6.39.4/drivers/atm/idt77252.c 2011-05-19 00:06:34.000000000 -0400
22580 +++ linux-2.6.39.4/drivers/atm/idt77252.c 2011-08-05 19:44:36.000000000 -0400
22581 @@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
22582 else
22583 dev_kfree_skb(skb);
22584
22585 - atomic_inc(&vcc->stats->tx);
22586 + atomic_inc_unchecked(&vcc->stats->tx);
22587 }
22588
22589 atomic_dec(&scq->used);
22590 @@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
22591 if ((sb = dev_alloc_skb(64)) == NULL) {
22592 printk("%s: Can't allocate buffers for aal0.\n",
22593 card->name);
22594 - atomic_add(i, &vcc->stats->rx_drop);
22595 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
22596 break;
22597 }
22598 if (!atm_charge(vcc, sb->truesize)) {
22599 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
22600 card->name);
22601 - atomic_add(i - 1, &vcc->stats->rx_drop);
22602 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
22603 dev_kfree_skb(sb);
22604 break;
22605 }
22606 @@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
22607 ATM_SKB(sb)->vcc = vcc;
22608 __net_timestamp(sb);
22609 vcc->push(vcc, sb);
22610 - atomic_inc(&vcc->stats->rx);
22611 + atomic_inc_unchecked(&vcc->stats->rx);
22612
22613 cell += ATM_CELL_PAYLOAD;
22614 }
22615 @@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
22616 "(CDC: %08x)\n",
22617 card->name, len, rpp->len, readl(SAR_REG_CDC));
22618 recycle_rx_pool_skb(card, rpp);
22619 - atomic_inc(&vcc->stats->rx_err);
22620 + atomic_inc_unchecked(&vcc->stats->rx_err);
22621 return;
22622 }
22623 if (stat & SAR_RSQE_CRC) {
22624 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
22625 recycle_rx_pool_skb(card, rpp);
22626 - atomic_inc(&vcc->stats->rx_err);
22627 + atomic_inc_unchecked(&vcc->stats->rx_err);
22628 return;
22629 }
22630 if (skb_queue_len(&rpp->queue) > 1) {
22631 @@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
22632 RXPRINTK("%s: Can't alloc RX skb.\n",
22633 card->name);
22634 recycle_rx_pool_skb(card, rpp);
22635 - atomic_inc(&vcc->stats->rx_err);
22636 + atomic_inc_unchecked(&vcc->stats->rx_err);
22637 return;
22638 }
22639 if (!atm_charge(vcc, skb->truesize)) {
22640 @@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
22641 __net_timestamp(skb);
22642
22643 vcc->push(vcc, skb);
22644 - atomic_inc(&vcc->stats->rx);
22645 + atomic_inc_unchecked(&vcc->stats->rx);
22646
22647 return;
22648 }
22649 @@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
22650 __net_timestamp(skb);
22651
22652 vcc->push(vcc, skb);
22653 - atomic_inc(&vcc->stats->rx);
22654 + atomic_inc_unchecked(&vcc->stats->rx);
22655
22656 if (skb->truesize > SAR_FB_SIZE_3)
22657 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
22658 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *car
22659 if (vcc->qos.aal != ATM_AAL0) {
22660 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
22661 card->name, vpi, vci);
22662 - atomic_inc(&vcc->stats->rx_drop);
22663 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22664 goto drop;
22665 }
22666
22667 if ((sb = dev_alloc_skb(64)) == NULL) {
22668 printk("%s: Can't allocate buffers for AAL0.\n",
22669 card->name);
22670 - atomic_inc(&vcc->stats->rx_err);
22671 + atomic_inc_unchecked(&vcc->stats->rx_err);
22672 goto drop;
22673 }
22674
22675 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *car
22676 ATM_SKB(sb)->vcc = vcc;
22677 __net_timestamp(sb);
22678 vcc->push(vcc, sb);
22679 - atomic_inc(&vcc->stats->rx);
22680 + atomic_inc_unchecked(&vcc->stats->rx);
22681
22682 drop:
22683 skb_pull(queue, 64);
22684 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22685
22686 if (vc == NULL) {
22687 printk("%s: NULL connection in send().\n", card->name);
22688 - atomic_inc(&vcc->stats->tx_err);
22689 + atomic_inc_unchecked(&vcc->stats->tx_err);
22690 dev_kfree_skb(skb);
22691 return -EINVAL;
22692 }
22693 if (!test_bit(VCF_TX, &vc->flags)) {
22694 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
22695 - atomic_inc(&vcc->stats->tx_err);
22696 + atomic_inc_unchecked(&vcc->stats->tx_err);
22697 dev_kfree_skb(skb);
22698 return -EINVAL;
22699 }
22700 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22701 break;
22702 default:
22703 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
22704 - atomic_inc(&vcc->stats->tx_err);
22705 + atomic_inc_unchecked(&vcc->stats->tx_err);
22706 dev_kfree_skb(skb);
22707 return -EINVAL;
22708 }
22709
22710 if (skb_shinfo(skb)->nr_frags != 0) {
22711 printk("%s: No scatter-gather yet.\n", card->name);
22712 - atomic_inc(&vcc->stats->tx_err);
22713 + atomic_inc_unchecked(&vcc->stats->tx_err);
22714 dev_kfree_skb(skb);
22715 return -EINVAL;
22716 }
22717 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22718
22719 err = queue_skb(card, vc, skb, oam);
22720 if (err) {
22721 - atomic_inc(&vcc->stats->tx_err);
22722 + atomic_inc_unchecked(&vcc->stats->tx_err);
22723 dev_kfree_skb(skb);
22724 return err;
22725 }
22726 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
22727 skb = dev_alloc_skb(64);
22728 if (!skb) {
22729 printk("%s: Out of memory in send_oam().\n", card->name);
22730 - atomic_inc(&vcc->stats->tx_err);
22731 + atomic_inc_unchecked(&vcc->stats->tx_err);
22732 return -ENOMEM;
22733 }
22734 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
22735 diff -urNp linux-2.6.39.4/drivers/atm/iphase.c linux-2.6.39.4/drivers/atm/iphase.c
22736 --- linux-2.6.39.4/drivers/atm/iphase.c 2011-05-19 00:06:34.000000000 -0400
22737 +++ linux-2.6.39.4/drivers/atm/iphase.c 2011-08-05 19:44:36.000000000 -0400
22738 @@ -1124,7 +1124,7 @@ static int rx_pkt(struct atm_dev *dev)
22739 status = (u_short) (buf_desc_ptr->desc_mode);
22740 if (status & (RX_CER | RX_PTE | RX_OFL))
22741 {
22742 - atomic_inc(&vcc->stats->rx_err);
22743 + atomic_inc_unchecked(&vcc->stats->rx_err);
22744 IF_ERR(printk("IA: bad packet, dropping it");)
22745 if (status & RX_CER) {
22746 IF_ERR(printk(" cause: packet CRC error\n");)
22747 @@ -1147,7 +1147,7 @@ static int rx_pkt(struct atm_dev *dev)
22748 len = dma_addr - buf_addr;
22749 if (len > iadev->rx_buf_sz) {
22750 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
22751 - atomic_inc(&vcc->stats->rx_err);
22752 + atomic_inc_unchecked(&vcc->stats->rx_err);
22753 goto out_free_desc;
22754 }
22755
22756 @@ -1297,7 +1297,7 @@ static void rx_dle_intr(struct atm_dev *
22757 ia_vcc = INPH_IA_VCC(vcc);
22758 if (ia_vcc == NULL)
22759 {
22760 - atomic_inc(&vcc->stats->rx_err);
22761 + atomic_inc_unchecked(&vcc->stats->rx_err);
22762 dev_kfree_skb_any(skb);
22763 atm_return(vcc, atm_guess_pdu2truesize(len));
22764 goto INCR_DLE;
22765 @@ -1309,7 +1309,7 @@ static void rx_dle_intr(struct atm_dev *
22766 if ((length > iadev->rx_buf_sz) || (length >
22767 (skb->len - sizeof(struct cpcs_trailer))))
22768 {
22769 - atomic_inc(&vcc->stats->rx_err);
22770 + atomic_inc_unchecked(&vcc->stats->rx_err);
22771 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
22772 length, skb->len);)
22773 dev_kfree_skb_any(skb);
22774 @@ -1325,7 +1325,7 @@ static void rx_dle_intr(struct atm_dev *
22775
22776 IF_RX(printk("rx_dle_intr: skb push");)
22777 vcc->push(vcc,skb);
22778 - atomic_inc(&vcc->stats->rx);
22779 + atomic_inc_unchecked(&vcc->stats->rx);
22780 iadev->rx_pkt_cnt++;
22781 }
22782 INCR_DLE:
22783 @@ -2807,15 +2807,15 @@ static int ia_ioctl(struct atm_dev *dev,
22784 {
22785 struct k_sonet_stats *stats;
22786 stats = &PRIV(_ia_dev[board])->sonet_stats;
22787 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
22788 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
22789 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
22790 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
22791 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
22792 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
22793 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
22794 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
22795 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
22796 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
22797 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
22798 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
22799 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
22800 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
22801 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
22802 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
22803 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
22804 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
22805 }
22806 ia_cmds.status = 0;
22807 break;
22808 @@ -2920,7 +2920,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
22809 if ((desc == 0) || (desc > iadev->num_tx_desc))
22810 {
22811 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
22812 - atomic_inc(&vcc->stats->tx);
22813 + atomic_inc_unchecked(&vcc->stats->tx);
22814 if (vcc->pop)
22815 vcc->pop(vcc, skb);
22816 else
22817 @@ -3025,14 +3025,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
22818 ATM_DESC(skb) = vcc->vci;
22819 skb_queue_tail(&iadev->tx_dma_q, skb);
22820
22821 - atomic_inc(&vcc->stats->tx);
22822 + atomic_inc_unchecked(&vcc->stats->tx);
22823 iadev->tx_pkt_cnt++;
22824 /* Increment transaction counter */
22825 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
22826
22827 #if 0
22828 /* add flow control logic */
22829 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
22830 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
22831 if (iavcc->vc_desc_cnt > 10) {
22832 vcc->tx_quota = vcc->tx_quota * 3 / 4;
22833 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
22834 diff -urNp linux-2.6.39.4/drivers/atm/lanai.c linux-2.6.39.4/drivers/atm/lanai.c
22835 --- linux-2.6.39.4/drivers/atm/lanai.c 2011-05-19 00:06:34.000000000 -0400
22836 +++ linux-2.6.39.4/drivers/atm/lanai.c 2011-08-05 19:44:36.000000000 -0400
22837 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
22838 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
22839 lanai_endtx(lanai, lvcc);
22840 lanai_free_skb(lvcc->tx.atmvcc, skb);
22841 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
22842 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
22843 }
22844
22845 /* Try to fill the buffer - don't call unless there is backlog */
22846 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
22847 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
22848 __net_timestamp(skb);
22849 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
22850 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
22851 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
22852 out:
22853 lvcc->rx.buf.ptr = end;
22854 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
22855 @@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
22856 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
22857 "vcc %d\n", lanai->number, (unsigned int) s, vci);
22858 lanai->stats.service_rxnotaal5++;
22859 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22860 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22861 return 0;
22862 }
22863 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
22864 @@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
22865 int bytes;
22866 read_unlock(&vcc_sklist_lock);
22867 DPRINTK("got trashed rx pdu on vci %d\n", vci);
22868 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22869 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22870 lvcc->stats.x.aal5.service_trash++;
22871 bytes = (SERVICE_GET_END(s) * 16) -
22872 (((unsigned long) lvcc->rx.buf.ptr) -
22873 @@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
22874 }
22875 if (s & SERVICE_STREAM) {
22876 read_unlock(&vcc_sklist_lock);
22877 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22878 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22879 lvcc->stats.x.aal5.service_stream++;
22880 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
22881 "PDU on VCI %d!\n", lanai->number, vci);
22882 @@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
22883 return 0;
22884 }
22885 DPRINTK("got rx crc error on vci %d\n", vci);
22886 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22887 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22888 lvcc->stats.x.aal5.service_rxcrc++;
22889 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
22890 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
22891 diff -urNp linux-2.6.39.4/drivers/atm/nicstar.c linux-2.6.39.4/drivers/atm/nicstar.c
22892 --- linux-2.6.39.4/drivers/atm/nicstar.c 2011-05-19 00:06:34.000000000 -0400
22893 +++ linux-2.6.39.4/drivers/atm/nicstar.c 2011-08-05 19:44:36.000000000 -0400
22894 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
22895 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
22896 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
22897 card->index);
22898 - atomic_inc(&vcc->stats->tx_err);
22899 + atomic_inc_unchecked(&vcc->stats->tx_err);
22900 dev_kfree_skb_any(skb);
22901 return -EINVAL;
22902 }
22903 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
22904 if (!vc->tx) {
22905 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
22906 card->index);
22907 - atomic_inc(&vcc->stats->tx_err);
22908 + atomic_inc_unchecked(&vcc->stats->tx_err);
22909 dev_kfree_skb_any(skb);
22910 return -EINVAL;
22911 }
22912 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
22913 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
22914 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
22915 card->index);
22916 - atomic_inc(&vcc->stats->tx_err);
22917 + atomic_inc_unchecked(&vcc->stats->tx_err);
22918 dev_kfree_skb_any(skb);
22919 return -EINVAL;
22920 }
22921
22922 if (skb_shinfo(skb)->nr_frags != 0) {
22923 printk("nicstar%d: No scatter-gather yet.\n", card->index);
22924 - atomic_inc(&vcc->stats->tx_err);
22925 + atomic_inc_unchecked(&vcc->stats->tx_err);
22926 dev_kfree_skb_any(skb);
22927 return -EINVAL;
22928 }
22929 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
22930 }
22931
22932 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
22933 - atomic_inc(&vcc->stats->tx_err);
22934 + atomic_inc_unchecked(&vcc->stats->tx_err);
22935 dev_kfree_skb_any(skb);
22936 return -EIO;
22937 }
22938 - atomic_inc(&vcc->stats->tx);
22939 + atomic_inc_unchecked(&vcc->stats->tx);
22940
22941 return 0;
22942 }
22943 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
22944 printk
22945 ("nicstar%d: Can't allocate buffers for aal0.\n",
22946 card->index);
22947 - atomic_add(i, &vcc->stats->rx_drop);
22948 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
22949 break;
22950 }
22951 if (!atm_charge(vcc, sb->truesize)) {
22952 RXPRINTK
22953 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
22954 card->index);
22955 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22956 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22957 dev_kfree_skb_any(sb);
22958 break;
22959 }
22960 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
22961 ATM_SKB(sb)->vcc = vcc;
22962 __net_timestamp(sb);
22963 vcc->push(vcc, sb);
22964 - atomic_inc(&vcc->stats->rx);
22965 + atomic_inc_unchecked(&vcc->stats->rx);
22966 cell += ATM_CELL_PAYLOAD;
22967 }
22968
22969 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
22970 if (iovb == NULL) {
22971 printk("nicstar%d: Out of iovec buffers.\n",
22972 card->index);
22973 - atomic_inc(&vcc->stats->rx_drop);
22974 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22975 recycle_rx_buf(card, skb);
22976 return;
22977 }
22978 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
22979 small or large buffer itself. */
22980 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
22981 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
22982 - atomic_inc(&vcc->stats->rx_err);
22983 + atomic_inc_unchecked(&vcc->stats->rx_err);
22984 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22985 NS_MAX_IOVECS);
22986 NS_PRV_IOVCNT(iovb) = 0;
22987 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
22988 ("nicstar%d: Expected a small buffer, and this is not one.\n",
22989 card->index);
22990 which_list(card, skb);
22991 - atomic_inc(&vcc->stats->rx_err);
22992 + atomic_inc_unchecked(&vcc->stats->rx_err);
22993 recycle_rx_buf(card, skb);
22994 vc->rx_iov = NULL;
22995 recycle_iov_buf(card, iovb);
22996 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
22997 ("nicstar%d: Expected a large buffer, and this is not one.\n",
22998 card->index);
22999 which_list(card, skb);
23000 - atomic_inc(&vcc->stats->rx_err);
23001 + atomic_inc_unchecked(&vcc->stats->rx_err);
23002 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
23003 NS_PRV_IOVCNT(iovb));
23004 vc->rx_iov = NULL;
23005 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
23006 printk(" - PDU size mismatch.\n");
23007 else
23008 printk(".\n");
23009 - atomic_inc(&vcc->stats->rx_err);
23010 + atomic_inc_unchecked(&vcc->stats->rx_err);
23011 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
23012 NS_PRV_IOVCNT(iovb));
23013 vc->rx_iov = NULL;
23014 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
23015 /* skb points to a small buffer */
23016 if (!atm_charge(vcc, skb->truesize)) {
23017 push_rxbufs(card, skb);
23018 - atomic_inc(&vcc->stats->rx_drop);
23019 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23020 } else {
23021 skb_put(skb, len);
23022 dequeue_sm_buf(card, skb);
23023 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
23024 ATM_SKB(skb)->vcc = vcc;
23025 __net_timestamp(skb);
23026 vcc->push(vcc, skb);
23027 - atomic_inc(&vcc->stats->rx);
23028 + atomic_inc_unchecked(&vcc->stats->rx);
23029 }
23030 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
23031 struct sk_buff *sb;
23032 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
23033 if (len <= NS_SMBUFSIZE) {
23034 if (!atm_charge(vcc, sb->truesize)) {
23035 push_rxbufs(card, sb);
23036 - atomic_inc(&vcc->stats->rx_drop);
23037 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23038 } else {
23039 skb_put(sb, len);
23040 dequeue_sm_buf(card, sb);
23041 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
23042 ATM_SKB(sb)->vcc = vcc;
23043 __net_timestamp(sb);
23044 vcc->push(vcc, sb);
23045 - atomic_inc(&vcc->stats->rx);
23046 + atomic_inc_unchecked(&vcc->stats->rx);
23047 }
23048
23049 push_rxbufs(card, skb);
23050 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
23051
23052 if (!atm_charge(vcc, skb->truesize)) {
23053 push_rxbufs(card, skb);
23054 - atomic_inc(&vcc->stats->rx_drop);
23055 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23056 } else {
23057 dequeue_lg_buf(card, skb);
23058 #ifdef NS_USE_DESTRUCTORS
23059 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
23060 ATM_SKB(skb)->vcc = vcc;
23061 __net_timestamp(skb);
23062 vcc->push(vcc, skb);
23063 - atomic_inc(&vcc->stats->rx);
23064 + atomic_inc_unchecked(&vcc->stats->rx);
23065 }
23066
23067 push_rxbufs(card, sb);
23068 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
23069 printk
23070 ("nicstar%d: Out of huge buffers.\n",
23071 card->index);
23072 - atomic_inc(&vcc->stats->rx_drop);
23073 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23074 recycle_iovec_rx_bufs(card,
23075 (struct iovec *)
23076 iovb->data,
23077 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
23078 card->hbpool.count++;
23079 } else
23080 dev_kfree_skb_any(hb);
23081 - atomic_inc(&vcc->stats->rx_drop);
23082 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23083 } else {
23084 /* Copy the small buffer to the huge buffer */
23085 sb = (struct sk_buff *)iov->iov_base;
23086 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
23087 #endif /* NS_USE_DESTRUCTORS */
23088 __net_timestamp(hb);
23089 vcc->push(vcc, hb);
23090 - atomic_inc(&vcc->stats->rx);
23091 + atomic_inc_unchecked(&vcc->stats->rx);
23092 }
23093 }
23094
23095 diff -urNp linux-2.6.39.4/drivers/atm/solos-pci.c linux-2.6.39.4/drivers/atm/solos-pci.c
23096 --- linux-2.6.39.4/drivers/atm/solos-pci.c 2011-05-19 00:06:34.000000000 -0400
23097 +++ linux-2.6.39.4/drivers/atm/solos-pci.c 2011-08-05 19:44:36.000000000 -0400
23098 @@ -715,7 +715,7 @@ void solos_bh(unsigned long card_arg)
23099 }
23100 atm_charge(vcc, skb->truesize);
23101 vcc->push(vcc, skb);
23102 - atomic_inc(&vcc->stats->rx);
23103 + atomic_inc_unchecked(&vcc->stats->rx);
23104 break;
23105
23106 case PKT_STATUS:
23107 @@ -900,6 +900,8 @@ static int print_buffer(struct sk_buff *
23108 char msg[500];
23109 char item[10];
23110
23111 + pax_track_stack();
23112 +
23113 len = buf->len;
23114 for (i = 0; i < len; i++){
23115 if(i % 8 == 0)
23116 @@ -1009,7 +1011,7 @@ static uint32_t fpga_tx(struct solos_car
23117 vcc = SKB_CB(oldskb)->vcc;
23118
23119 if (vcc) {
23120 - atomic_inc(&vcc->stats->tx);
23121 + atomic_inc_unchecked(&vcc->stats->tx);
23122 solos_pop(vcc, oldskb);
23123 } else
23124 dev_kfree_skb_irq(oldskb);
23125 diff -urNp linux-2.6.39.4/drivers/atm/suni.c linux-2.6.39.4/drivers/atm/suni.c
23126 --- linux-2.6.39.4/drivers/atm/suni.c 2011-05-19 00:06:34.000000000 -0400
23127 +++ linux-2.6.39.4/drivers/atm/suni.c 2011-08-05 19:44:36.000000000 -0400
23128 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
23129
23130
23131 #define ADD_LIMITED(s,v) \
23132 - atomic_add((v),&stats->s); \
23133 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
23134 + atomic_add_unchecked((v),&stats->s); \
23135 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
23136
23137
23138 static void suni_hz(unsigned long from_timer)
23139 diff -urNp linux-2.6.39.4/drivers/atm/uPD98402.c linux-2.6.39.4/drivers/atm/uPD98402.c
23140 --- linux-2.6.39.4/drivers/atm/uPD98402.c 2011-05-19 00:06:34.000000000 -0400
23141 +++ linux-2.6.39.4/drivers/atm/uPD98402.c 2011-08-05 19:44:36.000000000 -0400
23142 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
23143 struct sonet_stats tmp;
23144 int error = 0;
23145
23146 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
23147 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
23148 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
23149 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
23150 if (zero && !error) {
23151 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
23152
23153
23154 #define ADD_LIMITED(s,v) \
23155 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
23156 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
23157 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
23158 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
23159 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
23160 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
23161
23162
23163 static void stat_event(struct atm_dev *dev)
23164 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
23165 if (reason & uPD98402_INT_PFM) stat_event(dev);
23166 if (reason & uPD98402_INT_PCO) {
23167 (void) GET(PCOCR); /* clear interrupt cause */
23168 - atomic_add(GET(HECCT),
23169 + atomic_add_unchecked(GET(HECCT),
23170 &PRIV(dev)->sonet_stats.uncorr_hcs);
23171 }
23172 if ((reason & uPD98402_INT_RFO) &&
23173 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
23174 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
23175 uPD98402_INT_LOS),PIMR); /* enable them */
23176 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
23177 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
23178 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
23179 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
23180 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
23181 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
23182 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
23183 return 0;
23184 }
23185
23186 diff -urNp linux-2.6.39.4/drivers/atm/zatm.c linux-2.6.39.4/drivers/atm/zatm.c
23187 --- linux-2.6.39.4/drivers/atm/zatm.c 2011-05-19 00:06:34.000000000 -0400
23188 +++ linux-2.6.39.4/drivers/atm/zatm.c 2011-08-05 19:44:36.000000000 -0400
23189 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23190 }
23191 if (!size) {
23192 dev_kfree_skb_irq(skb);
23193 - if (vcc) atomic_inc(&vcc->stats->rx_err);
23194 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
23195 continue;
23196 }
23197 if (!atm_charge(vcc,skb->truesize)) {
23198 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23199 skb->len = size;
23200 ATM_SKB(skb)->vcc = vcc;
23201 vcc->push(vcc,skb);
23202 - atomic_inc(&vcc->stats->rx);
23203 + atomic_inc_unchecked(&vcc->stats->rx);
23204 }
23205 zout(pos & 0xffff,MTA(mbx));
23206 #if 0 /* probably a stupid idea */
23207 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
23208 skb_queue_head(&zatm_vcc->backlog,skb);
23209 break;
23210 }
23211 - atomic_inc(&vcc->stats->tx);
23212 + atomic_inc_unchecked(&vcc->stats->tx);
23213 wake_up(&zatm_vcc->tx_wait);
23214 }
23215
23216 diff -urNp linux-2.6.39.4/drivers/base/power/wakeup.c linux-2.6.39.4/drivers/base/power/wakeup.c
23217 --- linux-2.6.39.4/drivers/base/power/wakeup.c 2011-05-19 00:06:34.000000000 -0400
23218 +++ linux-2.6.39.4/drivers/base/power/wakeup.c 2011-08-05 19:44:36.000000000 -0400
23219 @@ -29,14 +29,14 @@ bool events_check_enabled;
23220 * They need to be modified together atomically, so it's better to use one
23221 * atomic variable to hold them both.
23222 */
23223 -static atomic_t combined_event_count = ATOMIC_INIT(0);
23224 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
23225
23226 #define IN_PROGRESS_BITS (sizeof(int) * 4)
23227 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
23228
23229 static void split_counters(unsigned int *cnt, unsigned int *inpr)
23230 {
23231 - unsigned int comb = atomic_read(&combined_event_count);
23232 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
23233
23234 *cnt = (comb >> IN_PROGRESS_BITS);
23235 *inpr = comb & MAX_IN_PROGRESS;
23236 @@ -351,7 +351,7 @@ static void wakeup_source_activate(struc
23237 ws->last_time = ktime_get();
23238
23239 /* Increment the counter of events in progress. */
23240 - atomic_inc(&combined_event_count);
23241 + atomic_inc_unchecked(&combined_event_count);
23242 }
23243
23244 /**
23245 @@ -441,7 +441,7 @@ static void wakeup_source_deactivate(str
23246 * Increment the counter of registered wakeup events and decrement the
23247 * couter of wakeup events in progress simultaneously.
23248 */
23249 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
23250 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
23251 }
23252
23253 /**
23254 diff -urNp linux-2.6.39.4/drivers/block/cciss.c linux-2.6.39.4/drivers/block/cciss.c
23255 --- linux-2.6.39.4/drivers/block/cciss.c 2011-05-19 00:06:34.000000000 -0400
23256 +++ linux-2.6.39.4/drivers/block/cciss.c 2011-08-05 20:34:06.000000000 -0400
23257 @@ -1151,6 +1151,8 @@ static int cciss_ioctl32_passthru(struct
23258 int err;
23259 u32 cp;
23260
23261 + memset(&arg64, 0, sizeof(arg64));
23262 +
23263 err = 0;
23264 err |=
23265 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
23266 @@ -2933,7 +2935,7 @@ static void start_io(ctlr_info_t *h)
23267 while (!list_empty(&h->reqQ)) {
23268 c = list_entry(h->reqQ.next, CommandList_struct, list);
23269 /* can't do anything if fifo is full */
23270 - if ((h->access.fifo_full(h))) {
23271 + if ((h->access->fifo_full(h))) {
23272 dev_warn(&h->pdev->dev, "fifo full\n");
23273 break;
23274 }
23275 @@ -2943,7 +2945,7 @@ static void start_io(ctlr_info_t *h)
23276 h->Qdepth--;
23277
23278 /* Tell the controller execute command */
23279 - h->access.submit_command(h, c);
23280 + h->access->submit_command(h, c);
23281
23282 /* Put job onto the completed Q */
23283 addQ(&h->cmpQ, c);
23284 @@ -3369,17 +3371,17 @@ startio:
23285
23286 static inline unsigned long get_next_completion(ctlr_info_t *h)
23287 {
23288 - return h->access.command_completed(h);
23289 + return h->access->command_completed(h);
23290 }
23291
23292 static inline int interrupt_pending(ctlr_info_t *h)
23293 {
23294 - return h->access.intr_pending(h);
23295 + return h->access->intr_pending(h);
23296 }
23297
23298 static inline long interrupt_not_for_us(ctlr_info_t *h)
23299 {
23300 - return ((h->access.intr_pending(h) == 0) ||
23301 + return ((h->access->intr_pending(h) == 0) ||
23302 (h->interrupts_enabled == 0));
23303 }
23304
23305 @@ -3412,7 +3414,7 @@ static inline u32 next_command(ctlr_info
23306 u32 a;
23307
23308 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
23309 - return h->access.command_completed(h);
23310 + return h->access->command_completed(h);
23311
23312 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
23313 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
23314 @@ -3910,7 +3912,7 @@ static void __devinit cciss_put_controll
23315 trans_support & CFGTBL_Trans_use_short_tags);
23316
23317 /* Change the access methods to the performant access methods */
23318 - h->access = SA5_performant_access;
23319 + h->access = &SA5_performant_access;
23320 h->transMethod = CFGTBL_Trans_Performant;
23321
23322 return;
23323 @@ -4179,7 +4181,7 @@ static int __devinit cciss_pci_init(ctlr
23324 if (prod_index < 0)
23325 return -ENODEV;
23326 h->product_name = products[prod_index].product_name;
23327 - h->access = *(products[prod_index].access);
23328 + h->access = products[prod_index].access;
23329
23330 if (cciss_board_disabled(h)) {
23331 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
23332 @@ -4661,7 +4663,7 @@ static int __devinit cciss_init_one(stru
23333 }
23334
23335 /* make sure the board interrupts are off */
23336 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23337 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23338 if (h->msi_vector || h->msix_vector) {
23339 if (request_irq(h->intr[PERF_MODE_INT],
23340 do_cciss_msix_intr,
23341 @@ -4744,7 +4746,7 @@ static int __devinit cciss_init_one(stru
23342 cciss_scsi_setup(h);
23343
23344 /* Turn the interrupts on so we can service requests */
23345 - h->access.set_intr_mask(h, CCISS_INTR_ON);
23346 + h->access->set_intr_mask(h, CCISS_INTR_ON);
23347
23348 /* Get the firmware version */
23349 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
23350 @@ -4828,7 +4830,7 @@ static void cciss_shutdown(struct pci_de
23351 kfree(flush_buf);
23352 if (return_code != IO_OK)
23353 dev_warn(&h->pdev->dev, "Error flushing cache\n");
23354 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23355 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23356 free_irq(h->intr[PERF_MODE_INT], h);
23357 }
23358
23359 diff -urNp linux-2.6.39.4/drivers/block/cciss.h linux-2.6.39.4/drivers/block/cciss.h
23360 --- linux-2.6.39.4/drivers/block/cciss.h 2011-05-19 00:06:34.000000000 -0400
23361 +++ linux-2.6.39.4/drivers/block/cciss.h 2011-08-05 20:34:06.000000000 -0400
23362 @@ -100,7 +100,7 @@ struct ctlr_info
23363 /* information about each logical volume */
23364 drive_info_struct *drv[CISS_MAX_LUN];
23365
23366 - struct access_method access;
23367 + struct access_method *access;
23368
23369 /* queue and queue Info */
23370 struct list_head reqQ;
23371 diff -urNp linux-2.6.39.4/drivers/block/cpqarray.c linux-2.6.39.4/drivers/block/cpqarray.c
23372 --- linux-2.6.39.4/drivers/block/cpqarray.c 2011-05-19 00:06:34.000000000 -0400
23373 +++ linux-2.6.39.4/drivers/block/cpqarray.c 2011-08-05 20:34:06.000000000 -0400
23374 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
23375 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
23376 goto Enomem4;
23377 }
23378 - hba[i]->access.set_intr_mask(hba[i], 0);
23379 + hba[i]->access->set_intr_mask(hba[i], 0);
23380 if (request_irq(hba[i]->intr, do_ida_intr,
23381 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
23382 {
23383 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
23384 add_timer(&hba[i]->timer);
23385
23386 /* Enable IRQ now that spinlock and rate limit timer are set up */
23387 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23388 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23389
23390 for(j=0; j<NWD; j++) {
23391 struct gendisk *disk = ida_gendisk[i][j];
23392 @@ -694,7 +694,7 @@ DBGINFO(
23393 for(i=0; i<NR_PRODUCTS; i++) {
23394 if (board_id == products[i].board_id) {
23395 c->product_name = products[i].product_name;
23396 - c->access = *(products[i].access);
23397 + c->access = products[i].access;
23398 break;
23399 }
23400 }
23401 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
23402 hba[ctlr]->intr = intr;
23403 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
23404 hba[ctlr]->product_name = products[j].product_name;
23405 - hba[ctlr]->access = *(products[j].access);
23406 + hba[ctlr]->access = products[j].access;
23407 hba[ctlr]->ctlr = ctlr;
23408 hba[ctlr]->board_id = board_id;
23409 hba[ctlr]->pci_dev = NULL; /* not PCI */
23410 @@ -911,6 +911,8 @@ static void do_ida_request(struct reques
23411 struct scatterlist tmp_sg[SG_MAX];
23412 int i, dir, seg;
23413
23414 + pax_track_stack();
23415 +
23416 queue_next:
23417 creq = blk_peek_request(q);
23418 if (!creq)
23419 @@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
23420
23421 while((c = h->reqQ) != NULL) {
23422 /* Can't do anything if we're busy */
23423 - if (h->access.fifo_full(h) == 0)
23424 + if (h->access->fifo_full(h) == 0)
23425 return;
23426
23427 /* Get the first entry from the request Q */
23428 @@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
23429 h->Qdepth--;
23430
23431 /* Tell the controller to do our bidding */
23432 - h->access.submit_command(h, c);
23433 + h->access->submit_command(h, c);
23434
23435 /* Get onto the completion Q */
23436 addQ(&h->cmpQ, c);
23437 @@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
23438 unsigned long flags;
23439 __u32 a,a1;
23440
23441 - istat = h->access.intr_pending(h);
23442 + istat = h->access->intr_pending(h);
23443 /* Is this interrupt for us? */
23444 if (istat == 0)
23445 return IRQ_NONE;
23446 @@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
23447 */
23448 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
23449 if (istat & FIFO_NOT_EMPTY) {
23450 - while((a = h->access.command_completed(h))) {
23451 + while((a = h->access->command_completed(h))) {
23452 a1 = a; a &= ~3;
23453 if ((c = h->cmpQ) == NULL)
23454 {
23455 @@ -1449,11 +1451,11 @@ static int sendcmd(
23456 /*
23457 * Disable interrupt
23458 */
23459 - info_p->access.set_intr_mask(info_p, 0);
23460 + info_p->access->set_intr_mask(info_p, 0);
23461 /* Make sure there is room in the command FIFO */
23462 /* Actually it should be completely empty at this time. */
23463 for (i = 200000; i > 0; i--) {
23464 - temp = info_p->access.fifo_full(info_p);
23465 + temp = info_p->access->fifo_full(info_p);
23466 if (temp != 0) {
23467 break;
23468 }
23469 @@ -1466,7 +1468,7 @@ DBG(
23470 /*
23471 * Send the cmd
23472 */
23473 - info_p->access.submit_command(info_p, c);
23474 + info_p->access->submit_command(info_p, c);
23475 complete = pollcomplete(ctlr);
23476
23477 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
23478 @@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
23479 * we check the new geometry. Then turn interrupts back on when
23480 * we're done.
23481 */
23482 - host->access.set_intr_mask(host, 0);
23483 + host->access->set_intr_mask(host, 0);
23484 getgeometry(ctlr);
23485 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
23486 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
23487
23488 for(i=0; i<NWD; i++) {
23489 struct gendisk *disk = ida_gendisk[ctlr][i];
23490 @@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
23491 /* Wait (up to 2 seconds) for a command to complete */
23492
23493 for (i = 200000; i > 0; i--) {
23494 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
23495 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
23496 if (done == 0) {
23497 udelay(10); /* a short fixed delay */
23498 } else
23499 diff -urNp linux-2.6.39.4/drivers/block/cpqarray.h linux-2.6.39.4/drivers/block/cpqarray.h
23500 --- linux-2.6.39.4/drivers/block/cpqarray.h 2011-05-19 00:06:34.000000000 -0400
23501 +++ linux-2.6.39.4/drivers/block/cpqarray.h 2011-08-05 20:34:06.000000000 -0400
23502 @@ -99,7 +99,7 @@ struct ctlr_info {
23503 drv_info_t drv[NWD];
23504 struct proc_dir_entry *proc;
23505
23506 - struct access_method access;
23507 + struct access_method *access;
23508
23509 cmdlist_t *reqQ;
23510 cmdlist_t *cmpQ;
23511 diff -urNp linux-2.6.39.4/drivers/block/DAC960.c linux-2.6.39.4/drivers/block/DAC960.c
23512 --- linux-2.6.39.4/drivers/block/DAC960.c 2011-05-19 00:06:34.000000000 -0400
23513 +++ linux-2.6.39.4/drivers/block/DAC960.c 2011-08-05 19:44:36.000000000 -0400
23514 @@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
23515 unsigned long flags;
23516 int Channel, TargetID;
23517
23518 + pax_track_stack();
23519 +
23520 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
23521 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
23522 sizeof(DAC960_SCSI_Inquiry_T) +
23523 diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_int.h linux-2.6.39.4/drivers/block/drbd/drbd_int.h
23524 --- linux-2.6.39.4/drivers/block/drbd/drbd_int.h 2011-05-19 00:06:34.000000000 -0400
23525 +++ linux-2.6.39.4/drivers/block/drbd/drbd_int.h 2011-08-05 19:44:36.000000000 -0400
23526 @@ -736,7 +736,7 @@ struct drbd_request;
23527 struct drbd_epoch {
23528 struct list_head list;
23529 unsigned int barrier_nr;
23530 - atomic_t epoch_size; /* increased on every request added. */
23531 + atomic_unchecked_t epoch_size; /* increased on every request added. */
23532 atomic_t active; /* increased on every req. added, and dec on every finished. */
23533 unsigned long flags;
23534 };
23535 @@ -1108,7 +1108,7 @@ struct drbd_conf {
23536 void *int_dig_in;
23537 void *int_dig_vv;
23538 wait_queue_head_t seq_wait;
23539 - atomic_t packet_seq;
23540 + atomic_unchecked_t packet_seq;
23541 unsigned int peer_seq;
23542 spinlock_t peer_seq_lock;
23543 unsigned int minor;
23544 diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_main.c linux-2.6.39.4/drivers/block/drbd/drbd_main.c
23545 --- linux-2.6.39.4/drivers/block/drbd/drbd_main.c 2011-05-19 00:06:34.000000000 -0400
23546 +++ linux-2.6.39.4/drivers/block/drbd/drbd_main.c 2011-08-05 19:44:36.000000000 -0400
23547 @@ -2387,7 +2387,7 @@ static int _drbd_send_ack(struct drbd_co
23548 p.sector = sector;
23549 p.block_id = block_id;
23550 p.blksize = blksize;
23551 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
23552 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
23553
23554 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
23555 return false;
23556 @@ -2686,7 +2686,7 @@ int drbd_send_dblock(struct drbd_conf *m
23557 p.sector = cpu_to_be64(req->sector);
23558 p.block_id = (unsigned long)req;
23559 p.seq_num = cpu_to_be32(req->seq_num =
23560 - atomic_add_return(1, &mdev->packet_seq));
23561 + atomic_add_return_unchecked(1, &mdev->packet_seq));
23562
23563 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
23564
23565 @@ -2971,7 +2971,7 @@ void drbd_init_set_defaults(struct drbd_
23566 atomic_set(&mdev->unacked_cnt, 0);
23567 atomic_set(&mdev->local_cnt, 0);
23568 atomic_set(&mdev->net_cnt, 0);
23569 - atomic_set(&mdev->packet_seq, 0);
23570 + atomic_set_unchecked(&mdev->packet_seq, 0);
23571 atomic_set(&mdev->pp_in_use, 0);
23572 atomic_set(&mdev->pp_in_use_by_net, 0);
23573 atomic_set(&mdev->rs_sect_in, 0);
23574 @@ -3051,8 +3051,8 @@ void drbd_mdev_cleanup(struct drbd_conf
23575 mdev->receiver.t_state);
23576
23577 /* no need to lock it, I'm the only thread alive */
23578 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
23579 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
23580 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
23581 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
23582 mdev->al_writ_cnt =
23583 mdev->bm_writ_cnt =
23584 mdev->read_cnt =
23585 diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_nl.c linux-2.6.39.4/drivers/block/drbd/drbd_nl.c
23586 --- linux-2.6.39.4/drivers/block/drbd/drbd_nl.c 2011-05-19 00:06:34.000000000 -0400
23587 +++ linux-2.6.39.4/drivers/block/drbd/drbd_nl.c 2011-08-05 19:44:36.000000000 -0400
23588 @@ -2298,7 +2298,7 @@ static void drbd_connector_callback(stru
23589 module_put(THIS_MODULE);
23590 }
23591
23592 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23593 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23594
23595 static unsigned short *
23596 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
23597 @@ -2369,7 +2369,7 @@ void drbd_bcast_state(struct drbd_conf *
23598 cn_reply->id.idx = CN_IDX_DRBD;
23599 cn_reply->id.val = CN_VAL_DRBD;
23600
23601 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23602 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23603 cn_reply->ack = 0; /* not used here. */
23604 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23605 (int)((char *)tl - (char *)reply->tag_list);
23606 @@ -2401,7 +2401,7 @@ void drbd_bcast_ev_helper(struct drbd_co
23607 cn_reply->id.idx = CN_IDX_DRBD;
23608 cn_reply->id.val = CN_VAL_DRBD;
23609
23610 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23611 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23612 cn_reply->ack = 0; /* not used here. */
23613 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23614 (int)((char *)tl - (char *)reply->tag_list);
23615 @@ -2479,7 +2479,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
23616 cn_reply->id.idx = CN_IDX_DRBD;
23617 cn_reply->id.val = CN_VAL_DRBD;
23618
23619 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
23620 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
23621 cn_reply->ack = 0; // not used here.
23622 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23623 (int)((char*)tl - (char*)reply->tag_list);
23624 @@ -2518,7 +2518,7 @@ void drbd_bcast_sync_progress(struct drb
23625 cn_reply->id.idx = CN_IDX_DRBD;
23626 cn_reply->id.val = CN_VAL_DRBD;
23627
23628 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23629 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23630 cn_reply->ack = 0; /* not used here. */
23631 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23632 (int)((char *)tl - (char *)reply->tag_list);
23633 diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c
23634 --- linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c 2011-05-19 00:06:34.000000000 -0400
23635 +++ linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c 2011-08-05 19:44:36.000000000 -0400
23636 @@ -894,7 +894,7 @@ retry:
23637 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
23638 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
23639
23640 - atomic_set(&mdev->packet_seq, 0);
23641 + atomic_set_unchecked(&mdev->packet_seq, 0);
23642 mdev->peer_seq = 0;
23643
23644 drbd_thread_start(&mdev->asender);
23645 @@ -990,7 +990,7 @@ static enum finish_epoch drbd_may_finish
23646 do {
23647 next_epoch = NULL;
23648
23649 - epoch_size = atomic_read(&epoch->epoch_size);
23650 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
23651
23652 switch (ev & ~EV_CLEANUP) {
23653 case EV_PUT:
23654 @@ -1025,7 +1025,7 @@ static enum finish_epoch drbd_may_finish
23655 rv = FE_DESTROYED;
23656 } else {
23657 epoch->flags = 0;
23658 - atomic_set(&epoch->epoch_size, 0);
23659 + atomic_set_unchecked(&epoch->epoch_size, 0);
23660 /* atomic_set(&epoch->active, 0); is already zero */
23661 if (rv == FE_STILL_LIVE)
23662 rv = FE_RECYCLED;
23663 @@ -1196,14 +1196,14 @@ static int receive_Barrier(struct drbd_c
23664 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
23665 drbd_flush(mdev);
23666
23667 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
23668 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23669 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
23670 if (epoch)
23671 break;
23672 }
23673
23674 epoch = mdev->current_epoch;
23675 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
23676 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
23677
23678 D_ASSERT(atomic_read(&epoch->active) == 0);
23679 D_ASSERT(epoch->flags == 0);
23680 @@ -1215,11 +1215,11 @@ static int receive_Barrier(struct drbd_c
23681 }
23682
23683 epoch->flags = 0;
23684 - atomic_set(&epoch->epoch_size, 0);
23685 + atomic_set_unchecked(&epoch->epoch_size, 0);
23686 atomic_set(&epoch->active, 0);
23687
23688 spin_lock(&mdev->epoch_lock);
23689 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
23690 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23691 list_add(&epoch->list, &mdev->current_epoch->list);
23692 mdev->current_epoch = epoch;
23693 mdev->epochs++;
23694 @@ -1668,7 +1668,7 @@ static int receive_Data(struct drbd_conf
23695 spin_unlock(&mdev->peer_seq_lock);
23696
23697 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
23698 - atomic_inc(&mdev->current_epoch->epoch_size);
23699 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
23700 return drbd_drain_block(mdev, data_size);
23701 }
23702
23703 @@ -1694,7 +1694,7 @@ static int receive_Data(struct drbd_conf
23704
23705 spin_lock(&mdev->epoch_lock);
23706 e->epoch = mdev->current_epoch;
23707 - atomic_inc(&e->epoch->epoch_size);
23708 + atomic_inc_unchecked(&e->epoch->epoch_size);
23709 atomic_inc(&e->epoch->active);
23710 spin_unlock(&mdev->epoch_lock);
23711
23712 @@ -3905,7 +3905,7 @@ static void drbd_disconnect(struct drbd_
23713 D_ASSERT(list_empty(&mdev->done_ee));
23714
23715 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
23716 - atomic_set(&mdev->current_epoch->epoch_size, 0);
23717 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
23718 D_ASSERT(list_empty(&mdev->current_epoch->list));
23719 }
23720
23721 diff -urNp linux-2.6.39.4/drivers/block/nbd.c linux-2.6.39.4/drivers/block/nbd.c
23722 --- linux-2.6.39.4/drivers/block/nbd.c 2011-06-25 12:55:22.000000000 -0400
23723 +++ linux-2.6.39.4/drivers/block/nbd.c 2011-08-05 19:44:36.000000000 -0400
23724 @@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
23725 struct kvec iov;
23726 sigset_t blocked, oldset;
23727
23728 + pax_track_stack();
23729 +
23730 if (unlikely(!sock)) {
23731 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
23732 lo->disk->disk_name, (send ? "send" : "recv"));
23733 @@ -571,6 +573,8 @@ static void do_nbd_request(struct reques
23734 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
23735 unsigned int cmd, unsigned long arg)
23736 {
23737 + pax_track_stack();
23738 +
23739 switch (cmd) {
23740 case NBD_DISCONNECT: {
23741 struct request sreq;
23742 diff -urNp linux-2.6.39.4/drivers/char/agp/frontend.c linux-2.6.39.4/drivers/char/agp/frontend.c
23743 --- linux-2.6.39.4/drivers/char/agp/frontend.c 2011-05-19 00:06:34.000000000 -0400
23744 +++ linux-2.6.39.4/drivers/char/agp/frontend.c 2011-08-05 19:44:36.000000000 -0400
23745 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
23746 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
23747 return -EFAULT;
23748
23749 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
23750 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
23751 return -EFAULT;
23752
23753 client = agp_find_client_by_pid(reserve.pid);
23754 diff -urNp linux-2.6.39.4/drivers/char/briq_panel.c linux-2.6.39.4/drivers/char/briq_panel.c
23755 --- linux-2.6.39.4/drivers/char/briq_panel.c 2011-05-19 00:06:34.000000000 -0400
23756 +++ linux-2.6.39.4/drivers/char/briq_panel.c 2011-08-05 19:44:36.000000000 -0400
23757 @@ -9,6 +9,7 @@
23758 #include <linux/types.h>
23759 #include <linux/errno.h>
23760 #include <linux/tty.h>
23761 +#include <linux/mutex.h>
23762 #include <linux/timer.h>
23763 #include <linux/kernel.h>
23764 #include <linux/wait.h>
23765 @@ -34,6 +35,7 @@ static int vfd_is_open;
23766 static unsigned char vfd[40];
23767 static int vfd_cursor;
23768 static unsigned char ledpb, led;
23769 +static DEFINE_MUTEX(vfd_mutex);
23770
23771 static void update_vfd(void)
23772 {
23773 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
23774 if (!vfd_is_open)
23775 return -EBUSY;
23776
23777 + mutex_lock(&vfd_mutex);
23778 for (;;) {
23779 char c;
23780 if (!indx)
23781 break;
23782 - if (get_user(c, buf))
23783 + if (get_user(c, buf)) {
23784 + mutex_unlock(&vfd_mutex);
23785 return -EFAULT;
23786 + }
23787 if (esc) {
23788 set_led(c);
23789 esc = 0;
23790 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
23791 buf++;
23792 }
23793 update_vfd();
23794 + mutex_unlock(&vfd_mutex);
23795
23796 return len;
23797 }
23798 diff -urNp linux-2.6.39.4/drivers/char/genrtc.c linux-2.6.39.4/drivers/char/genrtc.c
23799 --- linux-2.6.39.4/drivers/char/genrtc.c 2011-05-19 00:06:34.000000000 -0400
23800 +++ linux-2.6.39.4/drivers/char/genrtc.c 2011-08-05 19:44:36.000000000 -0400
23801 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
23802 switch (cmd) {
23803
23804 case RTC_PLL_GET:
23805 + memset(&pll, 0, sizeof(pll));
23806 if (get_rtc_pll(&pll))
23807 return -EINVAL;
23808 else
23809 diff -urNp linux-2.6.39.4/drivers/char/hpet.c linux-2.6.39.4/drivers/char/hpet.c
23810 --- linux-2.6.39.4/drivers/char/hpet.c 2011-05-19 00:06:34.000000000 -0400
23811 +++ linux-2.6.39.4/drivers/char/hpet.c 2011-08-05 19:44:36.000000000 -0400
23812 @@ -553,7 +553,7 @@ static inline unsigned long hpet_time_di
23813 }
23814
23815 static int
23816 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
23817 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
23818 struct hpet_info *info)
23819 {
23820 struct hpet_timer __iomem *timer;
23821 diff -urNp linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c
23822 --- linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c 2011-05-19 00:06:34.000000000 -0400
23823 +++ linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c 2011-08-05 20:34:06.000000000 -0400
23824 @@ -414,7 +414,7 @@ struct ipmi_smi {
23825 struct proc_dir_entry *proc_dir;
23826 char proc_dir_name[10];
23827
23828 - atomic_t stats[IPMI_NUM_STATS];
23829 + atomic_unchecked_t stats[IPMI_NUM_STATS];
23830
23831 /*
23832 * run_to_completion duplicate of smb_info, smi_info
23833 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
23834
23835
23836 #define ipmi_inc_stat(intf, stat) \
23837 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
23838 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
23839 #define ipmi_get_stat(intf, stat) \
23840 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
23841 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
23842
23843 static int is_lan_addr(struct ipmi_addr *addr)
23844 {
23845 @@ -2844,7 +2844,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
23846 INIT_LIST_HEAD(&intf->cmd_rcvrs);
23847 init_waitqueue_head(&intf->waitq);
23848 for (i = 0; i < IPMI_NUM_STATS; i++)
23849 - atomic_set(&intf->stats[i], 0);
23850 + atomic_set_unchecked(&intf->stats[i], 0);
23851
23852 intf->proc_dir = NULL;
23853
23854 @@ -4196,6 +4196,8 @@ static void send_panic_events(char *str)
23855 struct ipmi_smi_msg smi_msg;
23856 struct ipmi_recv_msg recv_msg;
23857
23858 + pax_track_stack();
23859 +
23860 si = (struct ipmi_system_interface_addr *) &addr;
23861 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
23862 si->channel = IPMI_BMC_CHANNEL;
23863 diff -urNp linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c
23864 --- linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c 2011-05-19 00:06:34.000000000 -0400
23865 +++ linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c 2011-08-05 19:44:36.000000000 -0400
23866 @@ -276,7 +276,7 @@ struct smi_info {
23867 unsigned char slave_addr;
23868
23869 /* Counters and things for the proc filesystem. */
23870 - atomic_t stats[SI_NUM_STATS];
23871 + atomic_unchecked_t stats[SI_NUM_STATS];
23872
23873 struct task_struct *thread;
23874
23875 @@ -285,9 +285,9 @@ struct smi_info {
23876 };
23877
23878 #define smi_inc_stat(smi, stat) \
23879 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
23880 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
23881 #define smi_get_stat(smi, stat) \
23882 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
23883 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
23884
23885 #define SI_MAX_PARMS 4
23886
23887 @@ -3198,7 +3198,7 @@ static int try_smi_init(struct smi_info
23888 atomic_set(&new_smi->req_events, 0);
23889 new_smi->run_to_completion = 0;
23890 for (i = 0; i < SI_NUM_STATS; i++)
23891 - atomic_set(&new_smi->stats[i], 0);
23892 + atomic_set_unchecked(&new_smi->stats[i], 0);
23893
23894 new_smi->interrupt_disabled = 1;
23895 atomic_set(&new_smi->stop_operation, 0);
23896 diff -urNp linux-2.6.39.4/drivers/char/Kconfig linux-2.6.39.4/drivers/char/Kconfig
23897 --- linux-2.6.39.4/drivers/char/Kconfig 2011-05-19 00:06:34.000000000 -0400
23898 +++ linux-2.6.39.4/drivers/char/Kconfig 2011-08-05 19:44:36.000000000 -0400
23899 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
23900
23901 config DEVKMEM
23902 bool "/dev/kmem virtual device support"
23903 - default y
23904 + default n
23905 + depends on !GRKERNSEC_KMEM
23906 help
23907 Say Y here if you want to support the /dev/kmem device. The
23908 /dev/kmem device is rarely used, but can be used for certain
23909 @@ -596,6 +597,7 @@ config DEVPORT
23910 bool
23911 depends on !M68K
23912 depends on ISA || PCI
23913 + depends on !GRKERNSEC_KMEM
23914 default y
23915
23916 source "drivers/s390/char/Kconfig"
23917 diff -urNp linux-2.6.39.4/drivers/char/mem.c linux-2.6.39.4/drivers/char/mem.c
23918 --- linux-2.6.39.4/drivers/char/mem.c 2011-05-19 00:06:34.000000000 -0400
23919 +++ linux-2.6.39.4/drivers/char/mem.c 2011-08-05 19:44:36.000000000 -0400
23920 @@ -18,6 +18,7 @@
23921 #include <linux/raw.h>
23922 #include <linux/tty.h>
23923 #include <linux/capability.h>
23924 +#include <linux/security.h>
23925 #include <linux/ptrace.h>
23926 #include <linux/device.h>
23927 #include <linux/highmem.h>
23928 @@ -34,6 +35,10 @@
23929 # include <linux/efi.h>
23930 #endif
23931
23932 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23933 +extern struct file_operations grsec_fops;
23934 +#endif
23935 +
23936 static inline unsigned long size_inside_page(unsigned long start,
23937 unsigned long size)
23938 {
23939 @@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
23940
23941 while (cursor < to) {
23942 if (!devmem_is_allowed(pfn)) {
23943 +#ifdef CONFIG_GRKERNSEC_KMEM
23944 + gr_handle_mem_readwrite(from, to);
23945 +#else
23946 printk(KERN_INFO
23947 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23948 current->comm, from, to);
23949 +#endif
23950 return 0;
23951 }
23952 cursor += PAGE_SIZE;
23953 @@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
23954 }
23955 return 1;
23956 }
23957 +#elif defined(CONFIG_GRKERNSEC_KMEM)
23958 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23959 +{
23960 + return 0;
23961 +}
23962 #else
23963 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23964 {
23965 @@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
23966
23967 while (count > 0) {
23968 unsigned long remaining;
23969 + char *temp;
23970
23971 sz = size_inside_page(p, count);
23972
23973 @@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
23974 if (!ptr)
23975 return -EFAULT;
23976
23977 - remaining = copy_to_user(buf, ptr, sz);
23978 +#ifdef CONFIG_PAX_USERCOPY
23979 + temp = kmalloc(sz, GFP_KERNEL);
23980 + if (!temp) {
23981 + unxlate_dev_mem_ptr(p, ptr);
23982 + return -ENOMEM;
23983 + }
23984 + memcpy(temp, ptr, sz);
23985 +#else
23986 + temp = ptr;
23987 +#endif
23988 +
23989 + remaining = copy_to_user(buf, temp, sz);
23990 +
23991 +#ifdef CONFIG_PAX_USERCOPY
23992 + kfree(temp);
23993 +#endif
23994 +
23995 unxlate_dev_mem_ptr(p, ptr);
23996 if (remaining)
23997 return -EFAULT;
23998 @@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
23999 size_t count, loff_t *ppos)
24000 {
24001 unsigned long p = *ppos;
24002 - ssize_t low_count, read, sz;
24003 + ssize_t low_count, read, sz, err = 0;
24004 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
24005 - int err = 0;
24006
24007 read = 0;
24008 if (p < (unsigned long) high_memory) {
24009 @@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
24010 }
24011 #endif
24012 while (low_count > 0) {
24013 + char *temp;
24014 +
24015 sz = size_inside_page(p, low_count);
24016
24017 /*
24018 @@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
24019 */
24020 kbuf = xlate_dev_kmem_ptr((char *)p);
24021
24022 - if (copy_to_user(buf, kbuf, sz))
24023 +#ifdef CONFIG_PAX_USERCOPY
24024 + temp = kmalloc(sz, GFP_KERNEL);
24025 + if (!temp)
24026 + return -ENOMEM;
24027 + memcpy(temp, kbuf, sz);
24028 +#else
24029 + temp = kbuf;
24030 +#endif
24031 +
24032 + err = copy_to_user(buf, temp, sz);
24033 +
24034 +#ifdef CONFIG_PAX_USERCOPY
24035 + kfree(temp);
24036 +#endif
24037 +
24038 + if (err)
24039 return -EFAULT;
24040 buf += sz;
24041 p += sz;
24042 @@ -854,6 +901,9 @@ static const struct memdev {
24043 #ifdef CONFIG_CRASH_DUMP
24044 [12] = { "oldmem", 0, &oldmem_fops, NULL },
24045 #endif
24046 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
24047 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
24048 +#endif
24049 };
24050
24051 static int memory_open(struct inode *inode, struct file *filp)
24052 diff -urNp linux-2.6.39.4/drivers/char/nvram.c linux-2.6.39.4/drivers/char/nvram.c
24053 --- linux-2.6.39.4/drivers/char/nvram.c 2011-05-19 00:06:34.000000000 -0400
24054 +++ linux-2.6.39.4/drivers/char/nvram.c 2011-08-05 19:44:36.000000000 -0400
24055 @@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f
24056
24057 spin_unlock_irq(&rtc_lock);
24058
24059 - if (copy_to_user(buf, contents, tmp - contents))
24060 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
24061 return -EFAULT;
24062
24063 *ppos = i;
24064 diff -urNp linux-2.6.39.4/drivers/char/random.c linux-2.6.39.4/drivers/char/random.c
24065 --- linux-2.6.39.4/drivers/char/random.c 2011-05-19 00:06:34.000000000 -0400
24066 +++ linux-2.6.39.4/drivers/char/random.c 2011-08-05 19:44:36.000000000 -0400
24067 @@ -261,8 +261,13 @@
24068 /*
24069 * Configuration information
24070 */
24071 +#ifdef CONFIG_GRKERNSEC_RANDNET
24072 +#define INPUT_POOL_WORDS 512
24073 +#define OUTPUT_POOL_WORDS 128
24074 +#else
24075 #define INPUT_POOL_WORDS 128
24076 #define OUTPUT_POOL_WORDS 32
24077 +#endif
24078 #define SEC_XFER_SIZE 512
24079 #define EXTRACT_SIZE 10
24080
24081 @@ -300,10 +305,17 @@ static struct poolinfo {
24082 int poolwords;
24083 int tap1, tap2, tap3, tap4, tap5;
24084 } poolinfo_table[] = {
24085 +#ifdef CONFIG_GRKERNSEC_RANDNET
24086 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
24087 + { 512, 411, 308, 208, 104, 1 },
24088 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
24089 + { 128, 103, 76, 51, 25, 1 },
24090 +#else
24091 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
24092 { 128, 103, 76, 51, 25, 1 },
24093 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
24094 { 32, 26, 20, 14, 7, 1 },
24095 +#endif
24096 #if 0
24097 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
24098 { 2048, 1638, 1231, 819, 411, 1 },
24099 @@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
24100
24101 extract_buf(r, tmp);
24102 i = min_t(int, nbytes, EXTRACT_SIZE);
24103 - if (copy_to_user(buf, tmp, i)) {
24104 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
24105 ret = -EFAULT;
24106 break;
24107 }
24108 @@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
24109 #include <linux/sysctl.h>
24110
24111 static int min_read_thresh = 8, min_write_thresh;
24112 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
24113 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
24114 static int max_write_thresh = INPUT_POOL_WORDS * 32;
24115 static char sysctl_bootid[16];
24116
24117 diff -urNp linux-2.6.39.4/drivers/char/sonypi.c linux-2.6.39.4/drivers/char/sonypi.c
24118 --- linux-2.6.39.4/drivers/char/sonypi.c 2011-05-19 00:06:34.000000000 -0400
24119 +++ linux-2.6.39.4/drivers/char/sonypi.c 2011-08-05 19:44:36.000000000 -0400
24120 @@ -55,6 +55,7 @@
24121 #include <asm/uaccess.h>
24122 #include <asm/io.h>
24123 #include <asm/system.h>
24124 +#include <asm/local.h>
24125
24126 #include <linux/sonypi.h>
24127
24128 @@ -491,7 +492,7 @@ static struct sonypi_device {
24129 spinlock_t fifo_lock;
24130 wait_queue_head_t fifo_proc_list;
24131 struct fasync_struct *fifo_async;
24132 - int open_count;
24133 + local_t open_count;
24134 int model;
24135 struct input_dev *input_jog_dev;
24136 struct input_dev *input_key_dev;
24137 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
24138 static int sonypi_misc_release(struct inode *inode, struct file *file)
24139 {
24140 mutex_lock(&sonypi_device.lock);
24141 - sonypi_device.open_count--;
24142 + local_dec(&sonypi_device.open_count);
24143 mutex_unlock(&sonypi_device.lock);
24144 return 0;
24145 }
24146 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
24147 {
24148 mutex_lock(&sonypi_device.lock);
24149 /* Flush input queue on first open */
24150 - if (!sonypi_device.open_count)
24151 + if (!local_read(&sonypi_device.open_count))
24152 kfifo_reset(&sonypi_device.fifo);
24153 - sonypi_device.open_count++;
24154 + local_inc(&sonypi_device.open_count);
24155 mutex_unlock(&sonypi_device.lock);
24156
24157 return 0;
24158 diff -urNp linux-2.6.39.4/drivers/char/tpm/tpm_bios.c linux-2.6.39.4/drivers/char/tpm/tpm_bios.c
24159 --- linux-2.6.39.4/drivers/char/tpm/tpm_bios.c 2011-05-19 00:06:34.000000000 -0400
24160 +++ linux-2.6.39.4/drivers/char/tpm/tpm_bios.c 2011-08-05 19:44:36.000000000 -0400
24161 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
24162 event = addr;
24163
24164 if ((event->event_type == 0 && event->event_size == 0) ||
24165 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
24166 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
24167 return NULL;
24168
24169 return addr;
24170 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
24171 return NULL;
24172
24173 if ((event->event_type == 0 && event->event_size == 0) ||
24174 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
24175 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
24176 return NULL;
24177
24178 (*pos)++;
24179 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
24180 int i;
24181
24182 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
24183 - seq_putc(m, data[i]);
24184 + if (!seq_putc(m, data[i]))
24185 + return -EFAULT;
24186
24187 return 0;
24188 }
24189 @@ -410,6 +411,11 @@ static int read_log(struct tpm_bios_log
24190 log->bios_event_log_end = log->bios_event_log + len;
24191
24192 virt = acpi_os_map_memory(start, len);
24193 + if (!virt) {
24194 + kfree(log->bios_event_log);
24195 + log->bios_event_log = NULL;
24196 + return -EFAULT;
24197 + }
24198
24199 memcpy(log->bios_event_log, virt, len);
24200
24201 diff -urNp linux-2.6.39.4/drivers/char/tpm/tpm.c linux-2.6.39.4/drivers/char/tpm/tpm.c
24202 --- linux-2.6.39.4/drivers/char/tpm/tpm.c 2011-05-19 00:06:34.000000000 -0400
24203 +++ linux-2.6.39.4/drivers/char/tpm/tpm.c 2011-08-05 19:44:36.000000000 -0400
24204 @@ -411,7 +411,7 @@ static ssize_t tpm_transmit(struct tpm_c
24205 chip->vendor.req_complete_val)
24206 goto out_recv;
24207
24208 - if ((status == chip->vendor.req_canceled)) {
24209 + if (status == chip->vendor.req_canceled) {
24210 dev_err(chip->dev, "Operation Canceled\n");
24211 rc = -ECANCELED;
24212 goto out;
24213 @@ -844,6 +844,8 @@ ssize_t tpm_show_pubek(struct device *de
24214
24215 struct tpm_chip *chip = dev_get_drvdata(dev);
24216
24217 + pax_track_stack();
24218 +
24219 tpm_cmd.header.in = tpm_readpubek_header;
24220 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
24221 "attempting to read the PUBEK");
24222 diff -urNp linux-2.6.39.4/drivers/crypto/hifn_795x.c linux-2.6.39.4/drivers/crypto/hifn_795x.c
24223 --- linux-2.6.39.4/drivers/crypto/hifn_795x.c 2011-05-19 00:06:34.000000000 -0400
24224 +++ linux-2.6.39.4/drivers/crypto/hifn_795x.c 2011-08-05 19:44:36.000000000 -0400
24225 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
24226 0xCA, 0x34, 0x2B, 0x2E};
24227 struct scatterlist sg;
24228
24229 + pax_track_stack();
24230 +
24231 memset(src, 0, sizeof(src));
24232 memset(ctx.key, 0, sizeof(ctx.key));
24233
24234 diff -urNp linux-2.6.39.4/drivers/crypto/padlock-aes.c linux-2.6.39.4/drivers/crypto/padlock-aes.c
24235 --- linux-2.6.39.4/drivers/crypto/padlock-aes.c 2011-05-19 00:06:34.000000000 -0400
24236 +++ linux-2.6.39.4/drivers/crypto/padlock-aes.c 2011-08-05 19:44:36.000000000 -0400
24237 @@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
24238 struct crypto_aes_ctx gen_aes;
24239 int cpu;
24240
24241 + pax_track_stack();
24242 +
24243 if (key_len % 8) {
24244 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
24245 return -EINVAL;
24246 diff -urNp linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c
24247 --- linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c 2011-05-19 00:06:34.000000000 -0400
24248 +++ linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c 2011-08-05 19:44:36.000000000 -0400
24249 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
24250 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
24251 static int edac_pci_poll_msec = 1000; /* one second workq period */
24252
24253 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
24254 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
24255 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
24256 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
24257
24258 static struct kobject *edac_pci_top_main_kobj;
24259 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
24260 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
24261 edac_printk(KERN_CRIT, EDAC_PCI,
24262 "Signaled System Error on %s\n",
24263 pci_name(dev));
24264 - atomic_inc(&pci_nonparity_count);
24265 + atomic_inc_unchecked(&pci_nonparity_count);
24266 }
24267
24268 if (status & (PCI_STATUS_PARITY)) {
24269 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
24270 "Master Data Parity Error on %s\n",
24271 pci_name(dev));
24272
24273 - atomic_inc(&pci_parity_count);
24274 + atomic_inc_unchecked(&pci_parity_count);
24275 }
24276
24277 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24278 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
24279 "Detected Parity Error on %s\n",
24280 pci_name(dev));
24281
24282 - atomic_inc(&pci_parity_count);
24283 + atomic_inc_unchecked(&pci_parity_count);
24284 }
24285 }
24286
24287 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
24288 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
24289 "Signaled System Error on %s\n",
24290 pci_name(dev));
24291 - atomic_inc(&pci_nonparity_count);
24292 + atomic_inc_unchecked(&pci_nonparity_count);
24293 }
24294
24295 if (status & (PCI_STATUS_PARITY)) {
24296 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
24297 "Master Data Parity Error on "
24298 "%s\n", pci_name(dev));
24299
24300 - atomic_inc(&pci_parity_count);
24301 + atomic_inc_unchecked(&pci_parity_count);
24302 }
24303
24304 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24305 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
24306 "Detected Parity Error on %s\n",
24307 pci_name(dev));
24308
24309 - atomic_inc(&pci_parity_count);
24310 + atomic_inc_unchecked(&pci_parity_count);
24311 }
24312 }
24313 }
24314 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
24315 if (!check_pci_errors)
24316 return;
24317
24318 - before_count = atomic_read(&pci_parity_count);
24319 + before_count = atomic_read_unchecked(&pci_parity_count);
24320
24321 /* scan all PCI devices looking for a Parity Error on devices and
24322 * bridges.
24323 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
24324 /* Only if operator has selected panic on PCI Error */
24325 if (edac_pci_get_panic_on_pe()) {
24326 /* If the count is different 'after' from 'before' */
24327 - if (before_count != atomic_read(&pci_parity_count))
24328 + if (before_count != atomic_read_unchecked(&pci_parity_count))
24329 panic("EDAC: PCI Parity Error");
24330 }
24331 }
24332 diff -urNp linux-2.6.39.4/drivers/edac/i7core_edac.c linux-2.6.39.4/drivers/edac/i7core_edac.c
24333 --- linux-2.6.39.4/drivers/edac/i7core_edac.c 2011-05-19 00:06:34.000000000 -0400
24334 +++ linux-2.6.39.4/drivers/edac/i7core_edac.c 2011-08-05 19:44:36.000000000 -0400
24335 @@ -1670,7 +1670,7 @@ static void i7core_mce_output_error(stru
24336 char *type, *optype, *err, *msg;
24337 unsigned long error = m->status & 0x1ff0000l;
24338 u32 optypenum = (m->status >> 4) & 0x07;
24339 - u32 core_err_cnt = (m->status >> 38) && 0x7fff;
24340 + u32 core_err_cnt = (m->status >> 38) & 0x7fff;
24341 u32 dimm = (m->misc >> 16) & 0x3;
24342 u32 channel = (m->misc >> 18) & 0x3;
24343 u32 syndrome = m->misc >> 32;
24344 diff -urNp linux-2.6.39.4/drivers/edac/mce_amd.h linux-2.6.39.4/drivers/edac/mce_amd.h
24345 --- linux-2.6.39.4/drivers/edac/mce_amd.h 2011-05-19 00:06:34.000000000 -0400
24346 +++ linux-2.6.39.4/drivers/edac/mce_amd.h 2011-08-05 20:34:06.000000000 -0400
24347 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
24348 bool (*dc_mce)(u16, u8);
24349 bool (*ic_mce)(u16, u8);
24350 bool (*nb_mce)(u16, u8);
24351 -};
24352 +} __no_const;
24353
24354 void amd_report_gart_errors(bool);
24355 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
24356 diff -urNp linux-2.6.39.4/drivers/firewire/core-card.c linux-2.6.39.4/drivers/firewire/core-card.c
24357 --- linux-2.6.39.4/drivers/firewire/core-card.c 2011-05-19 00:06:34.000000000 -0400
24358 +++ linux-2.6.39.4/drivers/firewire/core-card.c 2011-08-05 20:34:06.000000000 -0400
24359 @@ -652,7 +652,7 @@ void fw_card_release(struct kref *kref)
24360
24361 void fw_core_remove_card(struct fw_card *card)
24362 {
24363 - struct fw_card_driver dummy_driver = dummy_driver_template;
24364 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
24365
24366 card->driver->update_phy_reg(card, 4,
24367 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
24368 diff -urNp linux-2.6.39.4/drivers/firewire/core-cdev.c linux-2.6.39.4/drivers/firewire/core-cdev.c
24369 --- linux-2.6.39.4/drivers/firewire/core-cdev.c 2011-05-19 00:06:34.000000000 -0400
24370 +++ linux-2.6.39.4/drivers/firewire/core-cdev.c 2011-08-05 19:44:36.000000000 -0400
24371 @@ -1312,8 +1312,7 @@ static int init_iso_resource(struct clie
24372 int ret;
24373
24374 if ((request->channels == 0 && request->bandwidth == 0) ||
24375 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
24376 - request->bandwidth < 0)
24377 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
24378 return -EINVAL;
24379
24380 r = kmalloc(sizeof(*r), GFP_KERNEL);
24381 diff -urNp linux-2.6.39.4/drivers/firewire/core.h linux-2.6.39.4/drivers/firewire/core.h
24382 --- linux-2.6.39.4/drivers/firewire/core.h 2011-05-19 00:06:34.000000000 -0400
24383 +++ linux-2.6.39.4/drivers/firewire/core.h 2011-08-05 20:34:06.000000000 -0400
24384 @@ -99,6 +99,7 @@ struct fw_card_driver {
24385
24386 int (*stop_iso)(struct fw_iso_context *ctx);
24387 };
24388 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
24389
24390 void fw_card_initialize(struct fw_card *card,
24391 const struct fw_card_driver *driver, struct device *device);
24392 diff -urNp linux-2.6.39.4/drivers/firewire/core-transaction.c linux-2.6.39.4/drivers/firewire/core-transaction.c
24393 --- linux-2.6.39.4/drivers/firewire/core-transaction.c 2011-05-19 00:06:34.000000000 -0400
24394 +++ linux-2.6.39.4/drivers/firewire/core-transaction.c 2011-08-05 19:44:36.000000000 -0400
24395 @@ -36,6 +36,7 @@
24396 #include <linux/string.h>
24397 #include <linux/timer.h>
24398 #include <linux/types.h>
24399 +#include <linux/sched.h>
24400
24401 #include <asm/byteorder.h>
24402
24403 @@ -420,6 +421,8 @@ int fw_run_transaction(struct fw_card *c
24404 struct transaction_callback_data d;
24405 struct fw_transaction t;
24406
24407 + pax_track_stack();
24408 +
24409 init_timer_on_stack(&t.split_timeout_timer);
24410 init_completion(&d.done);
24411 d.payload = payload;
24412 diff -urNp linux-2.6.39.4/drivers/firmware/dmi_scan.c linux-2.6.39.4/drivers/firmware/dmi_scan.c
24413 --- linux-2.6.39.4/drivers/firmware/dmi_scan.c 2011-05-19 00:06:34.000000000 -0400
24414 +++ linux-2.6.39.4/drivers/firmware/dmi_scan.c 2011-08-05 19:44:36.000000000 -0400
24415 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
24416 }
24417 }
24418 else {
24419 - /*
24420 - * no iounmap() for that ioremap(); it would be a no-op, but
24421 - * it's so early in setup that sucker gets confused into doing
24422 - * what it shouldn't if we actually call it.
24423 - */
24424 p = dmi_ioremap(0xF0000, 0x10000);
24425 if (p == NULL)
24426 goto error;
24427 diff -urNp linux-2.6.39.4/drivers/gpio/vr41xx_giu.c linux-2.6.39.4/drivers/gpio/vr41xx_giu.c
24428 --- linux-2.6.39.4/drivers/gpio/vr41xx_giu.c 2011-05-19 00:06:34.000000000 -0400
24429 +++ linux-2.6.39.4/drivers/gpio/vr41xx_giu.c 2011-08-05 19:44:36.000000000 -0400
24430 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
24431 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
24432 maskl, pendl, maskh, pendh);
24433
24434 - atomic_inc(&irq_err_count);
24435 + atomic_inc_unchecked(&irq_err_count);
24436
24437 return -EINVAL;
24438 }
24439 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c
24440 --- linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c 2011-05-19 00:06:34.000000000 -0400
24441 +++ linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c 2011-08-05 19:44:36.000000000 -0400
24442 @@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
24443 struct drm_crtc *tmp;
24444 int crtc_mask = 1;
24445
24446 - WARN(!crtc, "checking null crtc?\n");
24447 + BUG_ON(!crtc);
24448
24449 dev = crtc->dev;
24450
24451 @@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
24452 struct drm_encoder *encoder;
24453 bool ret = true;
24454
24455 + pax_track_stack();
24456 +
24457 crtc->enabled = drm_helper_crtc_in_use(crtc);
24458 if (!crtc->enabled)
24459 return true;
24460 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_drv.c linux-2.6.39.4/drivers/gpu/drm/drm_drv.c
24461 --- linux-2.6.39.4/drivers/gpu/drm/drm_drv.c 2011-05-19 00:06:34.000000000 -0400
24462 +++ linux-2.6.39.4/drivers/gpu/drm/drm_drv.c 2011-08-05 19:44:36.000000000 -0400
24463 @@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
24464
24465 dev = file_priv->minor->dev;
24466 atomic_inc(&dev->ioctl_count);
24467 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
24468 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
24469 ++file_priv->ioctl_count;
24470
24471 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
24472 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_fops.c linux-2.6.39.4/drivers/gpu/drm/drm_fops.c
24473 --- linux-2.6.39.4/drivers/gpu/drm/drm_fops.c 2011-05-19 00:06:34.000000000 -0400
24474 +++ linux-2.6.39.4/drivers/gpu/drm/drm_fops.c 2011-08-05 19:44:36.000000000 -0400
24475 @@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
24476 }
24477
24478 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
24479 - atomic_set(&dev->counts[i], 0);
24480 + atomic_set_unchecked(&dev->counts[i], 0);
24481
24482 dev->sigdata.lock = NULL;
24483
24484 @@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
24485
24486 retcode = drm_open_helper(inode, filp, dev);
24487 if (!retcode) {
24488 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
24489 - if (!dev->open_count++)
24490 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
24491 + if (local_inc_return(&dev->open_count) == 1)
24492 retcode = drm_setup(dev);
24493 }
24494 if (!retcode) {
24495 @@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
24496
24497 mutex_lock(&drm_global_mutex);
24498
24499 - DRM_DEBUG("open_count = %d\n", dev->open_count);
24500 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
24501
24502 if (dev->driver->preclose)
24503 dev->driver->preclose(dev, file_priv);
24504 @@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
24505 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
24506 task_pid_nr(current),
24507 (long)old_encode_dev(file_priv->minor->device),
24508 - dev->open_count);
24509 + local_read(&dev->open_count));
24510
24511 /* if the master has gone away we can't do anything with the lock */
24512 if (file_priv->minor->master)
24513 @@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
24514 * End inline drm_release
24515 */
24516
24517 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
24518 - if (!--dev->open_count) {
24519 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
24520 + if (local_dec_and_test(&dev->open_count)) {
24521 if (atomic_read(&dev->ioctl_count)) {
24522 DRM_ERROR("Device busy: %d\n",
24523 atomic_read(&dev->ioctl_count));
24524 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_global.c linux-2.6.39.4/drivers/gpu/drm/drm_global.c
24525 --- linux-2.6.39.4/drivers/gpu/drm/drm_global.c 2011-05-19 00:06:34.000000000 -0400
24526 +++ linux-2.6.39.4/drivers/gpu/drm/drm_global.c 2011-08-05 19:44:36.000000000 -0400
24527 @@ -36,7 +36,7 @@
24528 struct drm_global_item {
24529 struct mutex mutex;
24530 void *object;
24531 - int refcount;
24532 + atomic_t refcount;
24533 };
24534
24535 static struct drm_global_item glob[DRM_GLOBAL_NUM];
24536 @@ -49,7 +49,7 @@ void drm_global_init(void)
24537 struct drm_global_item *item = &glob[i];
24538 mutex_init(&item->mutex);
24539 item->object = NULL;
24540 - item->refcount = 0;
24541 + atomic_set(&item->refcount, 0);
24542 }
24543 }
24544
24545 @@ -59,7 +59,7 @@ void drm_global_release(void)
24546 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
24547 struct drm_global_item *item = &glob[i];
24548 BUG_ON(item->object != NULL);
24549 - BUG_ON(item->refcount != 0);
24550 + BUG_ON(atomic_read(&item->refcount) != 0);
24551 }
24552 }
24553
24554 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
24555 void *object;
24556
24557 mutex_lock(&item->mutex);
24558 - if (item->refcount == 0) {
24559 + if (atomic_read(&item->refcount) == 0) {
24560 item->object = kzalloc(ref->size, GFP_KERNEL);
24561 if (unlikely(item->object == NULL)) {
24562 ret = -ENOMEM;
24563 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
24564 goto out_err;
24565
24566 }
24567 - ++item->refcount;
24568 + atomic_inc(&item->refcount);
24569 ref->object = item->object;
24570 object = item->object;
24571 mutex_unlock(&item->mutex);
24572 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
24573 struct drm_global_item *item = &glob[ref->global_type];
24574
24575 mutex_lock(&item->mutex);
24576 - BUG_ON(item->refcount == 0);
24577 + BUG_ON(atomic_read(&item->refcount) == 0);
24578 BUG_ON(ref->object != item->object);
24579 - if (--item->refcount == 0) {
24580 + if (atomic_dec_and_test(&item->refcount)) {
24581 ref->release(ref);
24582 item->object = NULL;
24583 }
24584 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_info.c linux-2.6.39.4/drivers/gpu/drm/drm_info.c
24585 --- linux-2.6.39.4/drivers/gpu/drm/drm_info.c 2011-05-19 00:06:34.000000000 -0400
24586 +++ linux-2.6.39.4/drivers/gpu/drm/drm_info.c 2011-08-05 19:44:36.000000000 -0400
24587 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
24588 struct drm_local_map *map;
24589 struct drm_map_list *r_list;
24590
24591 - /* Hardcoded from _DRM_FRAME_BUFFER,
24592 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
24593 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
24594 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
24595 + static const char * const types[] = {
24596 + [_DRM_FRAME_BUFFER] = "FB",
24597 + [_DRM_REGISTERS] = "REG",
24598 + [_DRM_SHM] = "SHM",
24599 + [_DRM_AGP] = "AGP",
24600 + [_DRM_SCATTER_GATHER] = "SG",
24601 + [_DRM_CONSISTENT] = "PCI",
24602 + [_DRM_GEM] = "GEM" };
24603 const char *type;
24604 int i;
24605
24606 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
24607 map = r_list->map;
24608 if (!map)
24609 continue;
24610 - if (map->type < 0 || map->type > 5)
24611 + if (map->type >= ARRAY_SIZE(types))
24612 type = "??";
24613 else
24614 type = types[map->type];
24615 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
24616 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
24617 vma->vm_flags & VM_LOCKED ? 'l' : '-',
24618 vma->vm_flags & VM_IO ? 'i' : '-',
24619 +#ifdef CONFIG_GRKERNSEC_HIDESYM
24620 + 0);
24621 +#else
24622 vma->vm_pgoff);
24623 +#endif
24624
24625 #if defined(__i386__)
24626 pgprot = pgprot_val(vma->vm_page_prot);
24627 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c
24628 --- linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c 2011-05-19 00:06:34.000000000 -0400
24629 +++ linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c 2011-08-05 19:44:36.000000000 -0400
24630 @@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
24631 stats->data[i].value =
24632 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
24633 else
24634 - stats->data[i].value = atomic_read(&dev->counts[i]);
24635 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
24636 stats->data[i].type = dev->types[i];
24637 }
24638
24639 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_lock.c linux-2.6.39.4/drivers/gpu/drm/drm_lock.c
24640 --- linux-2.6.39.4/drivers/gpu/drm/drm_lock.c 2011-05-19 00:06:34.000000000 -0400
24641 +++ linux-2.6.39.4/drivers/gpu/drm/drm_lock.c 2011-08-05 19:44:36.000000000 -0400
24642 @@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
24643 if (drm_lock_take(&master->lock, lock->context)) {
24644 master->lock.file_priv = file_priv;
24645 master->lock.lock_time = jiffies;
24646 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
24647 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
24648 break; /* Got lock */
24649 }
24650
24651 @@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
24652 return -EINVAL;
24653 }
24654
24655 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
24656 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
24657
24658 if (drm_lock_free(&master->lock, lock->context)) {
24659 /* FIXME: Should really bail out here. */
24660 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c
24661 --- linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c 2011-05-19 00:06:34.000000000 -0400
24662 +++ linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c 2011-08-05 19:44:36.000000000 -0400
24663 @@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
24664 dma->buflist[vertex->idx],
24665 vertex->discard, vertex->used);
24666
24667 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24668 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24669 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24670 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24671 sarea_priv->last_enqueue = dev_priv->counter - 1;
24672 sarea_priv->last_dispatch = (int)hw_status[5];
24673
24674 @@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
24675 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
24676 mc->last_render);
24677
24678 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24679 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24680 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24681 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24682 sarea_priv->last_enqueue = dev_priv->counter - 1;
24683 sarea_priv->last_dispatch = (int)hw_status[5];
24684
24685 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h
24686 --- linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h 2011-05-19 00:06:34.000000000 -0400
24687 +++ linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h 2011-08-05 19:44:36.000000000 -0400
24688 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
24689 int page_flipping;
24690
24691 wait_queue_head_t irq_queue;
24692 - atomic_t irq_received;
24693 - atomic_t irq_emitted;
24694 + atomic_unchecked_t irq_received;
24695 + atomic_unchecked_t irq_emitted;
24696
24697 int front_offset;
24698 } drm_i810_private_t;
24699 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c
24700 --- linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-19 00:06:34.000000000 -0400
24701 +++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-08-05 19:44:36.000000000 -0400
24702 @@ -496,7 +496,7 @@ static int i915_interrupt_info(struct se
24703 I915_READ(GTIMR));
24704 }
24705 seq_printf(m, "Interrupts received: %d\n",
24706 - atomic_read(&dev_priv->irq_received));
24707 + atomic_read_unchecked(&dev_priv->irq_received));
24708 for (i = 0; i < I915_NUM_RINGS; i++) {
24709 if (IS_GEN6(dev)) {
24710 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
24711 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c
24712 --- linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c 2011-05-19 00:06:34.000000000 -0400
24713 +++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c 2011-08-05 19:44:36.000000000 -0400
24714 @@ -1171,7 +1171,7 @@ static bool i915_switcheroo_can_switch(s
24715 bool can_switch;
24716
24717 spin_lock(&dev->count_lock);
24718 - can_switch = (dev->open_count == 0);
24719 + can_switch = (local_read(&dev->open_count) == 0);
24720 spin_unlock(&dev->count_lock);
24721 return can_switch;
24722 }
24723 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h
24724 --- linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h 2011-05-19 00:06:34.000000000 -0400
24725 +++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h 2011-08-05 20:34:06.000000000 -0400
24726 @@ -209,7 +209,7 @@ struct drm_i915_display_funcs {
24727 /* display clock increase/decrease */
24728 /* pll clock increase/decrease */
24729 /* clock gating init */
24730 -};
24731 +} __no_const;
24732
24733 struct intel_device_info {
24734 u8 gen;
24735 @@ -287,7 +287,7 @@ typedef struct drm_i915_private {
24736 int current_page;
24737 int page_flipping;
24738
24739 - atomic_t irq_received;
24740 + atomic_unchecked_t irq_received;
24741
24742 /* protects the irq masks */
24743 spinlock_t irq_lock;
24744 @@ -848,7 +848,7 @@ struct drm_i915_gem_object {
24745 * will be page flipped away on the next vblank. When it
24746 * reaches 0, dev_priv->pending_flip_queue will be woken up.
24747 */
24748 - atomic_t pending_flip;
24749 + atomic_unchecked_t pending_flip;
24750 };
24751
24752 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
24753 @@ -1232,7 +1232,7 @@ extern int intel_setup_gmbus(struct drm_
24754 extern void intel_teardown_gmbus(struct drm_device *dev);
24755 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
24756 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
24757 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24758 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24759 {
24760 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
24761 }
24762 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c
24763 --- linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-05-19 00:06:34.000000000 -0400
24764 +++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-08-05 19:44:36.000000000 -0400
24765 @@ -192,7 +192,7 @@ i915_gem_object_set_to_gpu_domain(struct
24766 i915_gem_release_mmap(obj);
24767
24768 if (obj->base.pending_write_domain)
24769 - cd->flips |= atomic_read(&obj->pending_flip);
24770 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
24771
24772 /* The actual obj->write_domain will be updated with
24773 * pending_write_domain after we emit the accumulated flush for all
24774 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c
24775 --- linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c 2011-07-09 09:18:51.000000000 -0400
24776 +++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c 2011-08-05 19:44:36.000000000 -0400
24777 @@ -1101,7 +1101,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
24778 int ret = IRQ_NONE, pipe;
24779 bool blc_event = false;
24780
24781 - atomic_inc(&dev_priv->irq_received);
24782 + atomic_inc_unchecked(&dev_priv->irq_received);
24783
24784 if (HAS_PCH_SPLIT(dev))
24785 return ironlake_irq_handler(dev);
24786 @@ -1666,7 +1666,7 @@ void i915_driver_irq_preinstall(struct d
24787 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24788 int pipe;
24789
24790 - atomic_set(&dev_priv->irq_received, 0);
24791 + atomic_set_unchecked(&dev_priv->irq_received, 0);
24792
24793 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
24794 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
24795 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c
24796 --- linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c 2011-05-19 00:06:34.000000000 -0400
24797 +++ linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c 2011-08-05 19:44:36.000000000 -0400
24798 @@ -2244,7 +2244,7 @@ intel_pipe_set_base(struct drm_crtc *crt
24799
24800 wait_event(dev_priv->pending_flip_queue,
24801 atomic_read(&dev_priv->mm.wedged) ||
24802 - atomic_read(&obj->pending_flip) == 0);
24803 + atomic_read_unchecked(&obj->pending_flip) == 0);
24804
24805 /* Big Hammer, we also need to ensure that any pending
24806 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
24807 @@ -2712,7 +2712,7 @@ static void intel_crtc_wait_for_pending_
24808 obj = to_intel_framebuffer(crtc->fb)->obj;
24809 dev_priv = crtc->dev->dev_private;
24810 wait_event(dev_priv->pending_flip_queue,
24811 - atomic_read(&obj->pending_flip) == 0);
24812 + atomic_read_unchecked(&obj->pending_flip) == 0);
24813 }
24814
24815 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
24816 @@ -6016,7 +6016,7 @@ static void do_intel_finish_page_flip(st
24817
24818 atomic_clear_mask(1 << intel_crtc->plane,
24819 &obj->pending_flip.counter);
24820 - if (atomic_read(&obj->pending_flip) == 0)
24821 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
24822 wake_up(&dev_priv->pending_flip_queue);
24823
24824 schedule_work(&work->work);
24825 @@ -6145,7 +6145,7 @@ static int intel_crtc_page_flip(struct d
24826 /* Block clients from rendering to the new back buffer until
24827 * the flip occurs and the object is no longer visible.
24828 */
24829 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24830 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24831
24832 switch (INTEL_INFO(dev)->gen) {
24833 case 2:
24834 diff -urNp linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h
24835 --- linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h 2011-05-19 00:06:34.000000000 -0400
24836 +++ linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h 2011-08-05 19:44:36.000000000 -0400
24837 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
24838 u32 clear_cmd;
24839 u32 maccess;
24840
24841 - atomic_t vbl_received; /**< Number of vblanks received. */
24842 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
24843 wait_queue_head_t fence_queue;
24844 - atomic_t last_fence_retired;
24845 + atomic_unchecked_t last_fence_retired;
24846 u32 next_fence_to_post;
24847
24848 unsigned int fb_cpp;
24849 diff -urNp linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c
24850 --- linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c 2011-05-19 00:06:34.000000000 -0400
24851 +++ linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c 2011-08-05 19:44:36.000000000 -0400
24852 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
24853 if (crtc != 0)
24854 return 0;
24855
24856 - return atomic_read(&dev_priv->vbl_received);
24857 + return atomic_read_unchecked(&dev_priv->vbl_received);
24858 }
24859
24860
24861 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24862 /* VBLANK interrupt */
24863 if (status & MGA_VLINEPEN) {
24864 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
24865 - atomic_inc(&dev_priv->vbl_received);
24866 + atomic_inc_unchecked(&dev_priv->vbl_received);
24867 drm_handle_vblank(dev, 0);
24868 handled = 1;
24869 }
24870 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24871 if ((prim_start & ~0x03) != (prim_end & ~0x03))
24872 MGA_WRITE(MGA_PRIMEND, prim_end);
24873
24874 - atomic_inc(&dev_priv->last_fence_retired);
24875 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
24876 DRM_WAKEUP(&dev_priv->fence_queue);
24877 handled = 1;
24878 }
24879 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
24880 * using fences.
24881 */
24882 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
24883 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
24884 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
24885 - *sequence) <= (1 << 23)));
24886
24887 *sequence = cur_fence;
24888 diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h
24889 --- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-05-19 00:06:34.000000000 -0400
24890 +++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-08-05 20:34:06.000000000 -0400
24891 @@ -228,7 +228,7 @@ struct nouveau_channel {
24892 struct list_head pending;
24893 uint32_t sequence;
24894 uint32_t sequence_ack;
24895 - atomic_t last_sequence_irq;
24896 + atomic_unchecked_t last_sequence_irq;
24897 } fence;
24898
24899 /* DMA push buffer */
24900 @@ -317,13 +317,13 @@ struct nouveau_instmem_engine {
24901 struct nouveau_mc_engine {
24902 int (*init)(struct drm_device *dev);
24903 void (*takedown)(struct drm_device *dev);
24904 -};
24905 +} __no_const;
24906
24907 struct nouveau_timer_engine {
24908 int (*init)(struct drm_device *dev);
24909 void (*takedown)(struct drm_device *dev);
24910 uint64_t (*read)(struct drm_device *dev);
24911 -};
24912 +} __no_const;
24913
24914 struct nouveau_fb_engine {
24915 int num_tiles;
24916 @@ -516,7 +516,7 @@ struct nouveau_vram_engine {
24917 void (*put)(struct drm_device *, struct nouveau_mem **);
24918
24919 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
24920 -};
24921 +} __no_const;
24922
24923 struct nouveau_engine {
24924 struct nouveau_instmem_engine instmem;
24925 @@ -662,7 +662,7 @@ struct drm_nouveau_private {
24926 struct drm_global_reference mem_global_ref;
24927 struct ttm_bo_global_ref bo_global_ref;
24928 struct ttm_bo_device bdev;
24929 - atomic_t validate_sequence;
24930 + atomic_unchecked_t validate_sequence;
24931 } ttm;
24932
24933 struct {
24934 diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c
24935 --- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-05-19 00:06:34.000000000 -0400
24936 +++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-08-05 19:44:36.000000000 -0400
24937 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
24938 if (USE_REFCNT(dev))
24939 sequence = nvchan_rd32(chan, 0x48);
24940 else
24941 - sequence = atomic_read(&chan->fence.last_sequence_irq);
24942 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
24943
24944 if (chan->fence.sequence_ack == sequence)
24945 goto out;
24946 @@ -553,7 +553,7 @@ nouveau_fence_channel_init(struct nouvea
24947 out_initialised:
24948 INIT_LIST_HEAD(&chan->fence.pending);
24949 spin_lock_init(&chan->fence.lock);
24950 - atomic_set(&chan->fence.last_sequence_irq, 0);
24951 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
24952 return 0;
24953 }
24954
24955 diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c
24956 --- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-05-19 00:06:34.000000000 -0400
24957 +++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-08-05 19:44:36.000000000 -0400
24958 @@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch
24959 int trycnt = 0;
24960 int ret, i;
24961
24962 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
24963 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
24964 retry:
24965 if (++trycnt > 100000) {
24966 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
24967 diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c
24968 --- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-05-19 00:06:34.000000000 -0400
24969 +++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-08-05 19:44:36.000000000 -0400
24970 @@ -583,7 +583,7 @@ static bool nouveau_switcheroo_can_switc
24971 bool can_switch;
24972
24973 spin_lock(&dev->count_lock);
24974 - can_switch = (dev->open_count == 0);
24975 + can_switch = (local_read(&dev->open_count) == 0);
24976 spin_unlock(&dev->count_lock);
24977 return can_switch;
24978 }
24979 diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c
24980 --- linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-05-19 00:06:34.000000000 -0400
24981 +++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-08-05 19:44:36.000000000 -0400
24982 @@ -552,7 +552,7 @@ static int
24983 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
24984 u32 class, u32 mthd, u32 data)
24985 {
24986 - atomic_set(&chan->fence.last_sequence_irq, data);
24987 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
24988 return 0;
24989 }
24990
24991 diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c
24992 --- linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c 2011-05-19 00:06:34.000000000 -0400
24993 +++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c 2011-08-05 19:44:36.000000000 -0400
24994 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
24995
24996 /* GH: Simple idle check.
24997 */
24998 - atomic_set(&dev_priv->idle_count, 0);
24999 + atomic_set_unchecked(&dev_priv->idle_count, 0);
25000
25001 /* We don't support anything other than bus-mastering ring mode,
25002 * but the ring can be in either AGP or PCI space for the ring
25003 diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h
25004 --- linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h 2011-05-19 00:06:34.000000000 -0400
25005 +++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h 2011-08-05 19:44:36.000000000 -0400
25006 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
25007 int is_pci;
25008 unsigned long cce_buffers_offset;
25009
25010 - atomic_t idle_count;
25011 + atomic_unchecked_t idle_count;
25012
25013 int page_flipping;
25014 int current_page;
25015 u32 crtc_offset;
25016 u32 crtc_offset_cntl;
25017
25018 - atomic_t vbl_received;
25019 + atomic_unchecked_t vbl_received;
25020
25021 u32 color_fmt;
25022 unsigned int front_offset;
25023 diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c
25024 --- linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c 2011-05-19 00:06:34.000000000 -0400
25025 +++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c 2011-08-05 19:44:36.000000000 -0400
25026 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
25027 if (crtc != 0)
25028 return 0;
25029
25030 - return atomic_read(&dev_priv->vbl_received);
25031 + return atomic_read_unchecked(&dev_priv->vbl_received);
25032 }
25033
25034 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
25035 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
25036 /* VBLANK interrupt */
25037 if (status & R128_CRTC_VBLANK_INT) {
25038 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
25039 - atomic_inc(&dev_priv->vbl_received);
25040 + atomic_inc_unchecked(&dev_priv->vbl_received);
25041 drm_handle_vblank(dev, 0);
25042 return IRQ_HANDLED;
25043 }
25044 diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c
25045 --- linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c 2011-05-19 00:06:34.000000000 -0400
25046 +++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c 2011-08-05 19:44:36.000000000 -0400
25047 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
25048
25049 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
25050 {
25051 - if (atomic_read(&dev_priv->idle_count) == 0)
25052 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
25053 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
25054 else
25055 - atomic_set(&dev_priv->idle_count, 0);
25056 + atomic_set_unchecked(&dev_priv->idle_count, 0);
25057 }
25058
25059 #endif
25060 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c
25061 --- linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c 2011-05-19 00:06:34.000000000 -0400
25062 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c 2011-08-05 19:44:36.000000000 -0400
25063 @@ -1245,6 +1245,8 @@ struct atom_context *atom_parse(struct c
25064 char name[512];
25065 int i;
25066
25067 + pax_track_stack();
25068 +
25069 ctx->card = card;
25070 ctx->bios = bios;
25071
25072 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c
25073 --- linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c 2011-05-19 00:06:34.000000000 -0400
25074 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c 2011-08-05 19:44:36.000000000 -0400
25075 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
25076 regex_t mask_rex;
25077 regmatch_t match[4];
25078 char buf[1024];
25079 - size_t end;
25080 + long end;
25081 int len;
25082 int done = 0;
25083 int r;
25084 unsigned o;
25085 struct offset *offset;
25086 char last_reg_s[10];
25087 - int last_reg;
25088 + unsigned long last_reg;
25089
25090 if (regcomp
25091 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
25092 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c
25093 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-19 00:06:34.000000000 -0400
25094 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-08-05 19:44:36.000000000 -0400
25095 @@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
25096 struct radeon_gpio_rec gpio;
25097 struct radeon_hpd hpd;
25098
25099 + pax_track_stack();
25100 +
25101 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
25102 return false;
25103
25104 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c
25105 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c 2011-06-25 12:55:22.000000000 -0400
25106 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c 2011-08-05 19:44:36.000000000 -0400
25107 @@ -674,7 +674,7 @@ static bool radeon_switcheroo_can_switch
25108 bool can_switch;
25109
25110 spin_lock(&dev->count_lock);
25111 - can_switch = (dev->open_count == 0);
25112 + can_switch = (local_read(&dev->open_count) == 0);
25113 spin_unlock(&dev->count_lock);
25114 return can_switch;
25115 }
25116 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c
25117 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c 2011-08-05 21:11:51.000000000 -0400
25118 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c 2011-08-05 21:12:20.000000000 -0400
25119 @@ -937,6 +937,8 @@ void radeon_compute_pll_legacy(struct ra
25120 uint32_t post_div;
25121 u32 pll_out_min, pll_out_max;
25122
25123 + pax_track_stack();
25124 +
25125 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
25126 freq = freq * 1000;
25127
25128 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h
25129 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-19 00:06:34.000000000 -0400
25130 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-08-05 19:44:36.000000000 -0400
25131 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
25132
25133 /* SW interrupt */
25134 wait_queue_head_t swi_queue;
25135 - atomic_t swi_emitted;
25136 + atomic_unchecked_t swi_emitted;
25137 int vblank_crtc;
25138 uint32_t irq_enable_reg;
25139 uint32_t r500_disp_irq_reg;
25140 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c
25141 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-19 00:06:34.000000000 -0400
25142 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-08-05 19:44:36.000000000 -0400
25143 @@ -49,7 +49,7 @@ int radeon_fence_emit(struct radeon_devi
25144 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
25145 return 0;
25146 }
25147 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
25148 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
25149 if (!rdev->cp.ready) {
25150 /* FIXME: cp is not running assume everythings is done right
25151 * away
25152 @@ -352,7 +352,7 @@ int radeon_fence_driver_init(struct rade
25153 return r;
25154 }
25155 WREG32(rdev->fence_drv.scratch_reg, 0);
25156 - atomic_set(&rdev->fence_drv.seq, 0);
25157 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
25158 INIT_LIST_HEAD(&rdev->fence_drv.created);
25159 INIT_LIST_HEAD(&rdev->fence_drv.emited);
25160 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
25161 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h
25162 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h 2011-05-19 00:06:34.000000000 -0400
25163 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h 2011-08-05 20:34:06.000000000 -0400
25164 @@ -189,7 +189,7 @@ extern int sumo_get_temp(struct radeon_d
25165 */
25166 struct radeon_fence_driver {
25167 uint32_t scratch_reg;
25168 - atomic_t seq;
25169 + atomic_unchecked_t seq;
25170 uint32_t last_seq;
25171 unsigned long last_jiffies;
25172 unsigned long last_timeout;
25173 @@ -958,7 +958,7 @@ struct radeon_asic {
25174 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
25175 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
25176 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
25177 -};
25178 +} __no_const;
25179
25180 /*
25181 * Asic structures
25182 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c
25183 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-05-19 00:06:34.000000000 -0400
25184 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-08-05 19:44:36.000000000 -0400
25185 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
25186 request = compat_alloc_user_space(sizeof(*request));
25187 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
25188 || __put_user(req32.param, &request->param)
25189 - || __put_user((void __user *)(unsigned long)req32.value,
25190 + || __put_user((unsigned long)req32.value,
25191 &request->value))
25192 return -EFAULT;
25193
25194 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c
25195 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-19 00:06:34.000000000 -0400
25196 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-08-05 19:44:36.000000000 -0400
25197 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
25198 unsigned int ret;
25199 RING_LOCALS;
25200
25201 - atomic_inc(&dev_priv->swi_emitted);
25202 - ret = atomic_read(&dev_priv->swi_emitted);
25203 + atomic_inc_unchecked(&dev_priv->swi_emitted);
25204 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
25205
25206 BEGIN_RING(4);
25207 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
25208 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
25209 drm_radeon_private_t *dev_priv =
25210 (drm_radeon_private_t *) dev->dev_private;
25211
25212 - atomic_set(&dev_priv->swi_emitted, 0);
25213 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
25214 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
25215
25216 dev->max_vblank_count = 0x001fffff;
25217 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c
25218 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c 2011-05-19 00:06:34.000000000 -0400
25219 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c 2011-08-05 19:44:36.000000000 -0400
25220 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
25221 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
25222 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
25223
25224 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25225 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25226 sarea_priv->nbox * sizeof(depth_boxes[0])))
25227 return -EFAULT;
25228
25229 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
25230 {
25231 drm_radeon_private_t *dev_priv = dev->dev_private;
25232 drm_radeon_getparam_t *param = data;
25233 - int value;
25234 + int value = 0;
25235
25236 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
25237
25238 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c
25239 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-05-19 00:06:34.000000000 -0400
25240 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-08-05 20:34:06.000000000 -0400
25241 @@ -644,8 +644,10 @@ int radeon_mmap(struct file *filp, struc
25242 }
25243 if (unlikely(ttm_vm_ops == NULL)) {
25244 ttm_vm_ops = vma->vm_ops;
25245 - radeon_ttm_vm_ops = *ttm_vm_ops;
25246 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25247 + pax_open_kernel();
25248 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
25249 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25250 + pax_close_kernel();
25251 }
25252 vma->vm_ops = &radeon_ttm_vm_ops;
25253 return 0;
25254 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c
25255 --- linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c 2011-05-19 00:06:34.000000000 -0400
25256 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c 2011-08-05 19:44:36.000000000 -0400
25257 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
25258 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
25259 rdev->pm.sideport_bandwidth.full)
25260 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
25261 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
25262 + read_delay_latency.full = dfixed_const(800 * 1000);
25263 read_delay_latency.full = dfixed_div(read_delay_latency,
25264 rdev->pm.igp_sideport_mclk);
25265 + a.full = dfixed_const(370);
25266 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
25267 } else {
25268 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
25269 rdev->pm.k8_bandwidth.full)
25270 diff -urNp linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c
25271 --- linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-05-19 00:06:34.000000000 -0400
25272 +++ linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-08-05 19:44:36.000000000 -0400
25273 @@ -397,9 +397,9 @@ static int ttm_pool_get_num_unused_pages
25274 */
25275 static int ttm_pool_mm_shrink(struct shrinker *shrink, int shrink_pages, gfp_t gfp_mask)
25276 {
25277 - static atomic_t start_pool = ATOMIC_INIT(0);
25278 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
25279 unsigned i;
25280 - unsigned pool_offset = atomic_add_return(1, &start_pool);
25281 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
25282 struct ttm_page_pool *pool;
25283
25284 pool_offset = pool_offset % NUM_POOLS;
25285 diff -urNp linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h
25286 --- linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h 2011-05-19 00:06:34.000000000 -0400
25287 +++ linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h 2011-08-05 19:44:36.000000000 -0400
25288 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
25289 typedef uint32_t maskarray_t[5];
25290
25291 typedef struct drm_via_irq {
25292 - atomic_t irq_received;
25293 + atomic_unchecked_t irq_received;
25294 uint32_t pending_mask;
25295 uint32_t enable_mask;
25296 wait_queue_head_t irq_queue;
25297 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
25298 struct timeval last_vblank;
25299 int last_vblank_valid;
25300 unsigned usec_per_vblank;
25301 - atomic_t vbl_received;
25302 + atomic_unchecked_t vbl_received;
25303 drm_via_state_t hc_state;
25304 char pci_buf[VIA_PCI_BUF_SIZE];
25305 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
25306 diff -urNp linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c
25307 --- linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c 2011-05-19 00:06:34.000000000 -0400
25308 +++ linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c 2011-08-05 19:44:36.000000000 -0400
25309 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
25310 if (crtc != 0)
25311 return 0;
25312
25313 - return atomic_read(&dev_priv->vbl_received);
25314 + return atomic_read_unchecked(&dev_priv->vbl_received);
25315 }
25316
25317 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
25318 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
25319
25320 status = VIA_READ(VIA_REG_INTERRUPT);
25321 if (status & VIA_IRQ_VBLANK_PENDING) {
25322 - atomic_inc(&dev_priv->vbl_received);
25323 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
25324 + atomic_inc_unchecked(&dev_priv->vbl_received);
25325 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
25326 do_gettimeofday(&cur_vblank);
25327 if (dev_priv->last_vblank_valid) {
25328 dev_priv->usec_per_vblank =
25329 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25330 dev_priv->last_vblank = cur_vblank;
25331 dev_priv->last_vblank_valid = 1;
25332 }
25333 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
25334 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
25335 DRM_DEBUG("US per vblank is: %u\n",
25336 dev_priv->usec_per_vblank);
25337 }
25338 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25339
25340 for (i = 0; i < dev_priv->num_irqs; ++i) {
25341 if (status & cur_irq->pending_mask) {
25342 - atomic_inc(&cur_irq->irq_received);
25343 + atomic_inc_unchecked(&cur_irq->irq_received);
25344 DRM_WAKEUP(&cur_irq->irq_queue);
25345 handled = 1;
25346 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
25347 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
25348 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25349 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
25350 masks[irq][4]));
25351 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
25352 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
25353 } else {
25354 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25355 (((cur_irq_sequence =
25356 - atomic_read(&cur_irq->irq_received)) -
25357 + atomic_read_unchecked(&cur_irq->irq_received)) -
25358 *sequence) <= (1 << 23)));
25359 }
25360 *sequence = cur_irq_sequence;
25361 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
25362 }
25363
25364 for (i = 0; i < dev_priv->num_irqs; ++i) {
25365 - atomic_set(&cur_irq->irq_received, 0);
25366 + atomic_set_unchecked(&cur_irq->irq_received, 0);
25367 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
25368 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
25369 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
25370 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
25371 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
25372 case VIA_IRQ_RELATIVE:
25373 irqwait->request.sequence +=
25374 - atomic_read(&cur_irq->irq_received);
25375 + atomic_read_unchecked(&cur_irq->irq_received);
25376 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
25377 case VIA_IRQ_ABSOLUTE:
25378 break;
25379 diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
25380 --- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-05-19 00:06:34.000000000 -0400
25381 +++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-08-05 19:44:36.000000000 -0400
25382 @@ -240,7 +240,7 @@ struct vmw_private {
25383 * Fencing and IRQs.
25384 */
25385
25386 - atomic_t fence_seq;
25387 + atomic_unchecked_t fence_seq;
25388 wait_queue_head_t fence_queue;
25389 wait_queue_head_t fifo_queue;
25390 atomic_t fence_queue_waiters;
25391 diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
25392 --- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-05-19 00:06:34.000000000 -0400
25393 +++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-08-05 19:44:36.000000000 -0400
25394 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
25395 while (!vmw_lag_lt(queue, us)) {
25396 spin_lock(&queue->lock);
25397 if (list_empty(&queue->head))
25398 - sequence = atomic_read(&dev_priv->fence_seq);
25399 + sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25400 else {
25401 fence = list_first_entry(&queue->head,
25402 struct vmw_fence, head);
25403 diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
25404 --- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-05-19 00:06:34.000000000 -0400
25405 +++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-08-05 20:34:06.000000000 -0400
25406 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
25407 (unsigned int) min,
25408 (unsigned int) fifo->capabilities);
25409
25410 - atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25411 + atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25412 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
25413 vmw_fence_queue_init(&fifo->fence_queue);
25414 return vmw_fifo_send_fence(dev_priv, &dummy);
25415 @@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25416
25417 fm = vmw_fifo_reserve(dev_priv, bytes);
25418 if (unlikely(fm == NULL)) {
25419 - *sequence = atomic_read(&dev_priv->fence_seq);
25420 + *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25421 ret = -ENOMEM;
25422 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
25423 false, 3*HZ);
25424 @@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25425 }
25426
25427 do {
25428 - *sequence = atomic_add_return(1, &dev_priv->fence_seq);
25429 + *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
25430 } while (*sequence == 0);
25431
25432 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
25433 diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
25434 --- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-05-19 00:06:34.000000000 -0400
25435 +++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-08-05 19:44:36.000000000 -0400
25436 @@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
25437 * emitted. Then the fence is stale and signaled.
25438 */
25439
25440 - ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
25441 + ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
25442 > VMW_FENCE_WRAP);
25443
25444 return ret;
25445 @@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
25446
25447 if (fifo_idle)
25448 down_read(&fifo_state->rwsem);
25449 - signal_seq = atomic_read(&dev_priv->fence_seq);
25450 + signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
25451 ret = 0;
25452
25453 for (;;) {
25454 diff -urNp linux-2.6.39.4/drivers/hid/hid-core.c linux-2.6.39.4/drivers/hid/hid-core.c
25455 --- linux-2.6.39.4/drivers/hid/hid-core.c 2011-05-19 00:06:34.000000000 -0400
25456 +++ linux-2.6.39.4/drivers/hid/hid-core.c 2011-08-05 19:44:36.000000000 -0400
25457 @@ -1888,7 +1888,7 @@ static bool hid_ignore(struct hid_device
25458
25459 int hid_add_device(struct hid_device *hdev)
25460 {
25461 - static atomic_t id = ATOMIC_INIT(0);
25462 + static atomic_unchecked_t id = ATOMIC_INIT(0);
25463 int ret;
25464
25465 if (WARN_ON(hdev->status & HID_STAT_ADDED))
25466 @@ -1903,7 +1903,7 @@ int hid_add_device(struct hid_device *hd
25467 /* XXX hack, any other cleaner solution after the driver core
25468 * is converted to allow more than 20 bytes as the device name? */
25469 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
25470 - hdev->vendor, hdev->product, atomic_inc_return(&id));
25471 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
25472
25473 hid_debug_register(hdev, dev_name(&hdev->dev));
25474 ret = device_add(&hdev->dev);
25475 diff -urNp linux-2.6.39.4/drivers/hid/usbhid/hiddev.c linux-2.6.39.4/drivers/hid/usbhid/hiddev.c
25476 --- linux-2.6.39.4/drivers/hid/usbhid/hiddev.c 2011-05-19 00:06:34.000000000 -0400
25477 +++ linux-2.6.39.4/drivers/hid/usbhid/hiddev.c 2011-08-05 19:44:36.000000000 -0400
25478 @@ -613,7 +613,7 @@ static long hiddev_ioctl(struct file *fi
25479 break;
25480
25481 case HIDIOCAPPLICATION:
25482 - if (arg < 0 || arg >= hid->maxapplication)
25483 + if (arg >= hid->maxapplication)
25484 break;
25485
25486 for (i = 0; i < hid->maxcollection; i++)
25487 diff -urNp linux-2.6.39.4/drivers/hwmon/sht15.c linux-2.6.39.4/drivers/hwmon/sht15.c
25488 --- linux-2.6.39.4/drivers/hwmon/sht15.c 2011-05-19 00:06:34.000000000 -0400
25489 +++ linux-2.6.39.4/drivers/hwmon/sht15.c 2011-08-05 19:44:36.000000000 -0400
25490 @@ -113,7 +113,7 @@ struct sht15_data {
25491 int supply_uV;
25492 int supply_uV_valid;
25493 struct work_struct update_supply_work;
25494 - atomic_t interrupt_handled;
25495 + atomic_unchecked_t interrupt_handled;
25496 };
25497
25498 /**
25499 @@ -246,13 +246,13 @@ static inline int sht15_update_single_va
25500 return ret;
25501
25502 gpio_direction_input(data->pdata->gpio_data);
25503 - atomic_set(&data->interrupt_handled, 0);
25504 + atomic_set_unchecked(&data->interrupt_handled, 0);
25505
25506 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25507 if (gpio_get_value(data->pdata->gpio_data) == 0) {
25508 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
25509 /* Only relevant if the interrupt hasn't occurred. */
25510 - if (!atomic_read(&data->interrupt_handled))
25511 + if (!atomic_read_unchecked(&data->interrupt_handled))
25512 schedule_work(&data->read_work);
25513 }
25514 ret = wait_event_timeout(data->wait_queue,
25515 @@ -399,7 +399,7 @@ static irqreturn_t sht15_interrupt_fired
25516 struct sht15_data *data = d;
25517 /* First disable the interrupt */
25518 disable_irq_nosync(irq);
25519 - atomic_inc(&data->interrupt_handled);
25520 + atomic_inc_unchecked(&data->interrupt_handled);
25521 /* Then schedule a reading work struct */
25522 if (data->flag != SHT15_READING_NOTHING)
25523 schedule_work(&data->read_work);
25524 @@ -450,11 +450,11 @@ static void sht15_bh_read_data(struct wo
25525 here as could have gone low in meantime so verify
25526 it hasn't!
25527 */
25528 - atomic_set(&data->interrupt_handled, 0);
25529 + atomic_set_unchecked(&data->interrupt_handled, 0);
25530 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25531 /* If still not occurred or another handler has been scheduled */
25532 if (gpio_get_value(data->pdata->gpio_data)
25533 - || atomic_read(&data->interrupt_handled))
25534 + || atomic_read_unchecked(&data->interrupt_handled))
25535 return;
25536 }
25537 /* Read the data back from the device */
25538 diff -urNp linux-2.6.39.4/drivers/hwmon/w83791d.c linux-2.6.39.4/drivers/hwmon/w83791d.c
25539 --- linux-2.6.39.4/drivers/hwmon/w83791d.c 2011-05-19 00:06:34.000000000 -0400
25540 +++ linux-2.6.39.4/drivers/hwmon/w83791d.c 2011-08-05 19:44:36.000000000 -0400
25541 @@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
25542 struct i2c_board_info *info);
25543 static int w83791d_remove(struct i2c_client *client);
25544
25545 -static int w83791d_read(struct i2c_client *client, u8 register);
25546 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
25547 +static int w83791d_read(struct i2c_client *client, u8 reg);
25548 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
25549 static struct w83791d_data *w83791d_update_device(struct device *dev);
25550
25551 #ifdef DEBUG
25552 diff -urNp linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c
25553 --- linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-05-19 00:06:34.000000000 -0400
25554 +++ linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-05 20:34:06.000000000 -0400
25555 @@ -43,7 +43,7 @@
25556 extern struct i2c_adapter amd756_smbus;
25557
25558 static struct i2c_adapter *s4882_adapter;
25559 -static struct i2c_algorithm *s4882_algo;
25560 +static i2c_algorithm_no_const *s4882_algo;
25561
25562 /* Wrapper access functions for multiplexed SMBus */
25563 static DEFINE_MUTEX(amd756_lock);
25564 diff -urNp linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c
25565 --- linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-05-19 00:06:34.000000000 -0400
25566 +++ linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-05 20:34:06.000000000 -0400
25567 @@ -41,7 +41,7 @@
25568 extern struct i2c_adapter *nforce2_smbus;
25569
25570 static struct i2c_adapter *s4985_adapter;
25571 -static struct i2c_algorithm *s4985_algo;
25572 +static i2c_algorithm_no_const *s4985_algo;
25573
25574 /* Wrapper access functions for multiplexed SMBus */
25575 static DEFINE_MUTEX(nforce2_lock);
25576 diff -urNp linux-2.6.39.4/drivers/i2c/i2c-mux.c linux-2.6.39.4/drivers/i2c/i2c-mux.c
25577 --- linux-2.6.39.4/drivers/i2c/i2c-mux.c 2011-05-19 00:06:34.000000000 -0400
25578 +++ linux-2.6.39.4/drivers/i2c/i2c-mux.c 2011-08-05 20:34:06.000000000 -0400
25579 @@ -28,7 +28,7 @@
25580 /* multiplexer per channel data */
25581 struct i2c_mux_priv {
25582 struct i2c_adapter adap;
25583 - struct i2c_algorithm algo;
25584 + i2c_algorithm_no_const algo;
25585
25586 struct i2c_adapter *parent;
25587 void *mux_dev; /* the mux chip/device */
25588 diff -urNp linux-2.6.39.4/drivers/ide/ide-cd.c linux-2.6.39.4/drivers/ide/ide-cd.c
25589 --- linux-2.6.39.4/drivers/ide/ide-cd.c 2011-06-03 00:04:14.000000000 -0400
25590 +++ linux-2.6.39.4/drivers/ide/ide-cd.c 2011-08-05 19:44:36.000000000 -0400
25591 @@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
25592 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
25593 if ((unsigned long)buf & alignment
25594 || blk_rq_bytes(rq) & q->dma_pad_mask
25595 - || object_is_on_stack(buf))
25596 + || object_starts_on_stack(buf))
25597 drive->dma = 0;
25598 }
25599 }
25600 diff -urNp linux-2.6.39.4/drivers/ide/ide-floppy.c linux-2.6.39.4/drivers/ide/ide-floppy.c
25601 --- linux-2.6.39.4/drivers/ide/ide-floppy.c 2011-05-19 00:06:34.000000000 -0400
25602 +++ linux-2.6.39.4/drivers/ide/ide-floppy.c 2011-08-05 19:44:36.000000000 -0400
25603 @@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
25604 u8 pc_buf[256], header_len, desc_cnt;
25605 int i, rc = 1, blocks, length;
25606
25607 + pax_track_stack();
25608 +
25609 ide_debug_log(IDE_DBG_FUNC, "enter");
25610
25611 drive->bios_cyl = 0;
25612 diff -urNp linux-2.6.39.4/drivers/ide/setup-pci.c linux-2.6.39.4/drivers/ide/setup-pci.c
25613 --- linux-2.6.39.4/drivers/ide/setup-pci.c 2011-05-19 00:06:34.000000000 -0400
25614 +++ linux-2.6.39.4/drivers/ide/setup-pci.c 2011-08-05 19:44:36.000000000 -0400
25615 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
25616 int ret, i, n_ports = dev2 ? 4 : 2;
25617 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
25618
25619 + pax_track_stack();
25620 +
25621 for (i = 0; i < n_ports / 2; i++) {
25622 ret = ide_setup_pci_controller(pdev[i], d, !i);
25623 if (ret < 0)
25624 diff -urNp linux-2.6.39.4/drivers/infiniband/core/cm.c linux-2.6.39.4/drivers/infiniband/core/cm.c
25625 --- linux-2.6.39.4/drivers/infiniband/core/cm.c 2011-05-19 00:06:34.000000000 -0400
25626 +++ linux-2.6.39.4/drivers/infiniband/core/cm.c 2011-08-05 19:44:36.000000000 -0400
25627 @@ -113,7 +113,7 @@ static char const counter_group_names[CM
25628
25629 struct cm_counter_group {
25630 struct kobject obj;
25631 - atomic_long_t counter[CM_ATTR_COUNT];
25632 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
25633 };
25634
25635 struct cm_counter_attribute {
25636 @@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
25637 struct ib_mad_send_buf *msg = NULL;
25638 int ret;
25639
25640 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25641 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25642 counter[CM_REQ_COUNTER]);
25643
25644 /* Quick state check to discard duplicate REQs. */
25645 @@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
25646 if (!cm_id_priv)
25647 return;
25648
25649 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25650 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25651 counter[CM_REP_COUNTER]);
25652 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
25653 if (ret)
25654 @@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
25655 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
25656 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
25657 spin_unlock_irq(&cm_id_priv->lock);
25658 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25659 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25660 counter[CM_RTU_COUNTER]);
25661 goto out;
25662 }
25663 @@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
25664 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
25665 dreq_msg->local_comm_id);
25666 if (!cm_id_priv) {
25667 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25668 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25669 counter[CM_DREQ_COUNTER]);
25670 cm_issue_drep(work->port, work->mad_recv_wc);
25671 return -EINVAL;
25672 @@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
25673 case IB_CM_MRA_REP_RCVD:
25674 break;
25675 case IB_CM_TIMEWAIT:
25676 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25677 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25678 counter[CM_DREQ_COUNTER]);
25679 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25680 goto unlock;
25681 @@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
25682 cm_free_msg(msg);
25683 goto deref;
25684 case IB_CM_DREQ_RCVD:
25685 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25686 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25687 counter[CM_DREQ_COUNTER]);
25688 goto unlock;
25689 default:
25690 @@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
25691 ib_modify_mad(cm_id_priv->av.port->mad_agent,
25692 cm_id_priv->msg, timeout)) {
25693 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
25694 - atomic_long_inc(&work->port->
25695 + atomic_long_inc_unchecked(&work->port->
25696 counter_group[CM_RECV_DUPLICATES].
25697 counter[CM_MRA_COUNTER]);
25698 goto out;
25699 @@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
25700 break;
25701 case IB_CM_MRA_REQ_RCVD:
25702 case IB_CM_MRA_REP_RCVD:
25703 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25704 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25705 counter[CM_MRA_COUNTER]);
25706 /* fall through */
25707 default:
25708 @@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
25709 case IB_CM_LAP_IDLE:
25710 break;
25711 case IB_CM_MRA_LAP_SENT:
25712 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25713 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25714 counter[CM_LAP_COUNTER]);
25715 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25716 goto unlock;
25717 @@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
25718 cm_free_msg(msg);
25719 goto deref;
25720 case IB_CM_LAP_RCVD:
25721 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25722 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25723 counter[CM_LAP_COUNTER]);
25724 goto unlock;
25725 default:
25726 @@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
25727 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
25728 if (cur_cm_id_priv) {
25729 spin_unlock_irq(&cm.lock);
25730 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25731 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25732 counter[CM_SIDR_REQ_COUNTER]);
25733 goto out; /* Duplicate message. */
25734 }
25735 @@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
25736 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
25737 msg->retries = 1;
25738
25739 - atomic_long_add(1 + msg->retries,
25740 + atomic_long_add_unchecked(1 + msg->retries,
25741 &port->counter_group[CM_XMIT].counter[attr_index]);
25742 if (msg->retries)
25743 - atomic_long_add(msg->retries,
25744 + atomic_long_add_unchecked(msg->retries,
25745 &port->counter_group[CM_XMIT_RETRIES].
25746 counter[attr_index]);
25747
25748 @@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
25749 }
25750
25751 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
25752 - atomic_long_inc(&port->counter_group[CM_RECV].
25753 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
25754 counter[attr_id - CM_ATTR_ID_OFFSET]);
25755
25756 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
25757 @@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
25758 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
25759
25760 return sprintf(buf, "%ld\n",
25761 - atomic_long_read(&group->counter[cm_attr->index]));
25762 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
25763 }
25764
25765 static const struct sysfs_ops cm_counter_ops = {
25766 diff -urNp linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c
25767 --- linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c 2011-05-19 00:06:34.000000000 -0400
25768 +++ linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c 2011-08-05 19:44:36.000000000 -0400
25769 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
25770
25771 struct task_struct *thread;
25772
25773 - atomic_t req_ser;
25774 - atomic_t flush_ser;
25775 + atomic_unchecked_t req_ser;
25776 + atomic_unchecked_t flush_ser;
25777
25778 wait_queue_head_t force_wait;
25779 };
25780 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
25781 struct ib_fmr_pool *pool = pool_ptr;
25782
25783 do {
25784 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
25785 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
25786 ib_fmr_batch_release(pool);
25787
25788 - atomic_inc(&pool->flush_ser);
25789 + atomic_inc_unchecked(&pool->flush_ser);
25790 wake_up_interruptible(&pool->force_wait);
25791
25792 if (pool->flush_function)
25793 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
25794 }
25795
25796 set_current_state(TASK_INTERRUPTIBLE);
25797 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
25798 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
25799 !kthread_should_stop())
25800 schedule();
25801 __set_current_state(TASK_RUNNING);
25802 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
25803 pool->dirty_watermark = params->dirty_watermark;
25804 pool->dirty_len = 0;
25805 spin_lock_init(&pool->pool_lock);
25806 - atomic_set(&pool->req_ser, 0);
25807 - atomic_set(&pool->flush_ser, 0);
25808 + atomic_set_unchecked(&pool->req_ser, 0);
25809 + atomic_set_unchecked(&pool->flush_ser, 0);
25810 init_waitqueue_head(&pool->force_wait);
25811
25812 pool->thread = kthread_run(ib_fmr_cleanup_thread,
25813 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
25814 }
25815 spin_unlock_irq(&pool->pool_lock);
25816
25817 - serial = atomic_inc_return(&pool->req_ser);
25818 + serial = atomic_inc_return_unchecked(&pool->req_ser);
25819 wake_up_process(pool->thread);
25820
25821 if (wait_event_interruptible(pool->force_wait,
25822 - atomic_read(&pool->flush_ser) - serial >= 0))
25823 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
25824 return -EINTR;
25825
25826 return 0;
25827 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
25828 } else {
25829 list_add_tail(&fmr->list, &pool->dirty_list);
25830 if (++pool->dirty_len >= pool->dirty_watermark) {
25831 - atomic_inc(&pool->req_ser);
25832 + atomic_inc_unchecked(&pool->req_ser);
25833 wake_up_process(pool->thread);
25834 }
25835 }
25836 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c
25837 --- linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c 2011-05-19 00:06:34.000000000 -0400
25838 +++ linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c 2011-08-05 19:44:36.000000000 -0400
25839 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
25840 int err;
25841 struct fw_ri_tpte tpt;
25842 u32 stag_idx;
25843 - static atomic_t key;
25844 + static atomic_unchecked_t key;
25845
25846 if (c4iw_fatal_error(rdev))
25847 return -EIO;
25848 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
25849 &rdev->resource.tpt_fifo_lock);
25850 if (!stag_idx)
25851 return -ENOMEM;
25852 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
25853 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
25854 }
25855 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
25856 __func__, stag_state, type, pdid, stag_idx);
25857 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c
25858 --- linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-19 00:06:34.000000000 -0400
25859 +++ linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-08-05 19:44:36.000000000 -0400
25860 @@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
25861 struct infinipath_counters counters;
25862 struct ipath_devdata *dd;
25863
25864 + pax_track_stack();
25865 +
25866 dd = file->f_path.dentry->d_inode->i_private;
25867 dd->ipath_f_read_counters(dd, &counters);
25868
25869 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c
25870 --- linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-05-19 00:06:34.000000000 -0400
25871 +++ linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-08-05 19:44:36.000000000 -0400
25872 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25873 struct ib_atomic_eth *ateth;
25874 struct ipath_ack_entry *e;
25875 u64 vaddr;
25876 - atomic64_t *maddr;
25877 + atomic64_unchecked_t *maddr;
25878 u64 sdata;
25879 u32 rkey;
25880 u8 next;
25881 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25882 IB_ACCESS_REMOTE_ATOMIC)))
25883 goto nack_acc_unlck;
25884 /* Perform atomic OP and save result. */
25885 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25886 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25887 sdata = be64_to_cpu(ateth->swap_data);
25888 e = &qp->s_ack_queue[qp->r_head_ack_queue];
25889 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
25890 - (u64) atomic64_add_return(sdata, maddr) - sdata :
25891 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25892 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25893 be64_to_cpu(ateth->compare_data),
25894 sdata);
25895 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c
25896 --- linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-05-19 00:06:34.000000000 -0400
25897 +++ linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-08-05 19:44:36.000000000 -0400
25898 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
25899 unsigned long flags;
25900 struct ib_wc wc;
25901 u64 sdata;
25902 - atomic64_t *maddr;
25903 + atomic64_unchecked_t *maddr;
25904 enum ib_wc_status send_status;
25905
25906 /*
25907 @@ -382,11 +382,11 @@ again:
25908 IB_ACCESS_REMOTE_ATOMIC)))
25909 goto acc_err;
25910 /* Perform atomic OP and save result. */
25911 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25912 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25913 sdata = wqe->wr.wr.atomic.compare_add;
25914 *(u64 *) sqp->s_sge.sge.vaddr =
25915 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
25916 - (u64) atomic64_add_return(sdata, maddr) - sdata :
25917 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25918 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25919 sdata, wqe->wr.wr.atomic.swap);
25920 goto send_comp;
25921 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c
25922 --- linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c 2011-05-19 00:06:34.000000000 -0400
25923 +++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c 2011-08-05 19:44:36.000000000 -0400
25924 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
25925 LIST_HEAD(nes_adapter_list);
25926 static LIST_HEAD(nes_dev_list);
25927
25928 -atomic_t qps_destroyed;
25929 +atomic_unchecked_t qps_destroyed;
25930
25931 static unsigned int ee_flsh_adapter;
25932 static unsigned int sysfs_nonidx_addr;
25933 @@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
25934 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
25935 struct nes_adapter *nesadapter = nesdev->nesadapter;
25936
25937 - atomic_inc(&qps_destroyed);
25938 + atomic_inc_unchecked(&qps_destroyed);
25939
25940 /* Free the control structures */
25941
25942 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c
25943 --- linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c 2011-05-19 00:06:34.000000000 -0400
25944 +++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c 2011-08-05 19:44:36.000000000 -0400
25945 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
25946 u32 cm_packets_retrans;
25947 u32 cm_packets_created;
25948 u32 cm_packets_received;
25949 -atomic_t cm_listens_created;
25950 -atomic_t cm_listens_destroyed;
25951 +atomic_unchecked_t cm_listens_created;
25952 +atomic_unchecked_t cm_listens_destroyed;
25953 u32 cm_backlog_drops;
25954 -atomic_t cm_loopbacks;
25955 -atomic_t cm_nodes_created;
25956 -atomic_t cm_nodes_destroyed;
25957 -atomic_t cm_accel_dropped_pkts;
25958 -atomic_t cm_resets_recvd;
25959 +atomic_unchecked_t cm_loopbacks;
25960 +atomic_unchecked_t cm_nodes_created;
25961 +atomic_unchecked_t cm_nodes_destroyed;
25962 +atomic_unchecked_t cm_accel_dropped_pkts;
25963 +atomic_unchecked_t cm_resets_recvd;
25964
25965 static inline int mini_cm_accelerated(struct nes_cm_core *,
25966 struct nes_cm_node *);
25967 @@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
25968
25969 static struct nes_cm_core *g_cm_core;
25970
25971 -atomic_t cm_connects;
25972 -atomic_t cm_accepts;
25973 -atomic_t cm_disconnects;
25974 -atomic_t cm_closes;
25975 -atomic_t cm_connecteds;
25976 -atomic_t cm_connect_reqs;
25977 -atomic_t cm_rejects;
25978 +atomic_unchecked_t cm_connects;
25979 +atomic_unchecked_t cm_accepts;
25980 +atomic_unchecked_t cm_disconnects;
25981 +atomic_unchecked_t cm_closes;
25982 +atomic_unchecked_t cm_connecteds;
25983 +atomic_unchecked_t cm_connect_reqs;
25984 +atomic_unchecked_t cm_rejects;
25985
25986
25987 /**
25988 @@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
25989 kfree(listener);
25990 listener = NULL;
25991 ret = 0;
25992 - atomic_inc(&cm_listens_destroyed);
25993 + atomic_inc_unchecked(&cm_listens_destroyed);
25994 } else {
25995 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
25996 }
25997 @@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
25998 cm_node->rem_mac);
25999
26000 add_hte_node(cm_core, cm_node);
26001 - atomic_inc(&cm_nodes_created);
26002 + atomic_inc_unchecked(&cm_nodes_created);
26003
26004 return cm_node;
26005 }
26006 @@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
26007 }
26008
26009 atomic_dec(&cm_core->node_cnt);
26010 - atomic_inc(&cm_nodes_destroyed);
26011 + atomic_inc_unchecked(&cm_nodes_destroyed);
26012 nesqp = cm_node->nesqp;
26013 if (nesqp) {
26014 nesqp->cm_node = NULL;
26015 @@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
26016
26017 static void drop_packet(struct sk_buff *skb)
26018 {
26019 - atomic_inc(&cm_accel_dropped_pkts);
26020 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
26021 dev_kfree_skb_any(skb);
26022 }
26023
26024 @@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
26025 {
26026
26027 int reset = 0; /* whether to send reset in case of err.. */
26028 - atomic_inc(&cm_resets_recvd);
26029 + atomic_inc_unchecked(&cm_resets_recvd);
26030 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
26031 " refcnt=%d\n", cm_node, cm_node->state,
26032 atomic_read(&cm_node->ref_count));
26033 @@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
26034 rem_ref_cm_node(cm_node->cm_core, cm_node);
26035 return NULL;
26036 }
26037 - atomic_inc(&cm_loopbacks);
26038 + atomic_inc_unchecked(&cm_loopbacks);
26039 loopbackremotenode->loopbackpartner = cm_node;
26040 loopbackremotenode->tcp_cntxt.rcv_wscale =
26041 NES_CM_DEFAULT_RCV_WND_SCALE;
26042 @@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
26043 add_ref_cm_node(cm_node);
26044 } else if (cm_node->state == NES_CM_STATE_TSA) {
26045 rem_ref_cm_node(cm_core, cm_node);
26046 - atomic_inc(&cm_accel_dropped_pkts);
26047 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
26048 dev_kfree_skb_any(skb);
26049 break;
26050 }
26051 @@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
26052
26053 if ((cm_id) && (cm_id->event_handler)) {
26054 if (issue_disconn) {
26055 - atomic_inc(&cm_disconnects);
26056 + atomic_inc_unchecked(&cm_disconnects);
26057 cm_event.event = IW_CM_EVENT_DISCONNECT;
26058 cm_event.status = disconn_status;
26059 cm_event.local_addr = cm_id->local_addr;
26060 @@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
26061 }
26062
26063 if (issue_close) {
26064 - atomic_inc(&cm_closes);
26065 + atomic_inc_unchecked(&cm_closes);
26066 nes_disconnect(nesqp, 1);
26067
26068 cm_id->provider_data = nesqp;
26069 @@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
26070
26071 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
26072 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
26073 - atomic_inc(&cm_accepts);
26074 + atomic_inc_unchecked(&cm_accepts);
26075
26076 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
26077 netdev_refcnt_read(nesvnic->netdev));
26078 @@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
26079
26080 struct nes_cm_core *cm_core;
26081
26082 - atomic_inc(&cm_rejects);
26083 + atomic_inc_unchecked(&cm_rejects);
26084 cm_node = (struct nes_cm_node *) cm_id->provider_data;
26085 loopback = cm_node->loopbackpartner;
26086 cm_core = cm_node->cm_core;
26087 @@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
26088 ntohl(cm_id->local_addr.sin_addr.s_addr),
26089 ntohs(cm_id->local_addr.sin_port));
26090
26091 - atomic_inc(&cm_connects);
26092 + atomic_inc_unchecked(&cm_connects);
26093 nesqp->active_conn = 1;
26094
26095 /* cache the cm_id in the qp */
26096 @@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
26097 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
26098 return err;
26099 }
26100 - atomic_inc(&cm_listens_created);
26101 + atomic_inc_unchecked(&cm_listens_created);
26102 }
26103
26104 cm_id->add_ref(cm_id);
26105 @@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
26106 if (nesqp->destroyed) {
26107 return;
26108 }
26109 - atomic_inc(&cm_connecteds);
26110 + atomic_inc_unchecked(&cm_connecteds);
26111 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
26112 " local port 0x%04X. jiffies = %lu.\n",
26113 nesqp->hwqp.qp_id,
26114 @@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
26115
26116 cm_id->add_ref(cm_id);
26117 ret = cm_id->event_handler(cm_id, &cm_event);
26118 - atomic_inc(&cm_closes);
26119 + atomic_inc_unchecked(&cm_closes);
26120 cm_event.event = IW_CM_EVENT_CLOSE;
26121 cm_event.status = IW_CM_EVENT_STATUS_OK;
26122 cm_event.provider_data = cm_id->provider_data;
26123 @@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
26124 return;
26125 cm_id = cm_node->cm_id;
26126
26127 - atomic_inc(&cm_connect_reqs);
26128 + atomic_inc_unchecked(&cm_connect_reqs);
26129 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26130 cm_node, cm_id, jiffies);
26131
26132 @@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
26133 return;
26134 cm_id = cm_node->cm_id;
26135
26136 - atomic_inc(&cm_connect_reqs);
26137 + atomic_inc_unchecked(&cm_connect_reqs);
26138 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26139 cm_node, cm_id, jiffies);
26140
26141 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h
26142 --- linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h 2011-05-19 00:06:34.000000000 -0400
26143 +++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h 2011-08-05 19:44:36.000000000 -0400
26144 @@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
26145 extern unsigned int wqm_quanta;
26146 extern struct list_head nes_adapter_list;
26147
26148 -extern atomic_t cm_connects;
26149 -extern atomic_t cm_accepts;
26150 -extern atomic_t cm_disconnects;
26151 -extern atomic_t cm_closes;
26152 -extern atomic_t cm_connecteds;
26153 -extern atomic_t cm_connect_reqs;
26154 -extern atomic_t cm_rejects;
26155 -extern atomic_t mod_qp_timouts;
26156 -extern atomic_t qps_created;
26157 -extern atomic_t qps_destroyed;
26158 -extern atomic_t sw_qps_destroyed;
26159 +extern atomic_unchecked_t cm_connects;
26160 +extern atomic_unchecked_t cm_accepts;
26161 +extern atomic_unchecked_t cm_disconnects;
26162 +extern atomic_unchecked_t cm_closes;
26163 +extern atomic_unchecked_t cm_connecteds;
26164 +extern atomic_unchecked_t cm_connect_reqs;
26165 +extern atomic_unchecked_t cm_rejects;
26166 +extern atomic_unchecked_t mod_qp_timouts;
26167 +extern atomic_unchecked_t qps_created;
26168 +extern atomic_unchecked_t qps_destroyed;
26169 +extern atomic_unchecked_t sw_qps_destroyed;
26170 extern u32 mh_detected;
26171 extern u32 mh_pauses_sent;
26172 extern u32 cm_packets_sent;
26173 @@ -194,14 +194,14 @@ extern u32 cm_packets_created;
26174 extern u32 cm_packets_received;
26175 extern u32 cm_packets_dropped;
26176 extern u32 cm_packets_retrans;
26177 -extern atomic_t cm_listens_created;
26178 -extern atomic_t cm_listens_destroyed;
26179 +extern atomic_unchecked_t cm_listens_created;
26180 +extern atomic_unchecked_t cm_listens_destroyed;
26181 extern u32 cm_backlog_drops;
26182 -extern atomic_t cm_loopbacks;
26183 -extern atomic_t cm_nodes_created;
26184 -extern atomic_t cm_nodes_destroyed;
26185 -extern atomic_t cm_accel_dropped_pkts;
26186 -extern atomic_t cm_resets_recvd;
26187 +extern atomic_unchecked_t cm_loopbacks;
26188 +extern atomic_unchecked_t cm_nodes_created;
26189 +extern atomic_unchecked_t cm_nodes_destroyed;
26190 +extern atomic_unchecked_t cm_accel_dropped_pkts;
26191 +extern atomic_unchecked_t cm_resets_recvd;
26192
26193 extern u32 int_mod_timer_init;
26194 extern u32 int_mod_cq_depth_256;
26195 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c
26196 --- linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c 2011-05-19 00:06:34.000000000 -0400
26197 +++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c 2011-08-05 19:44:36.000000000 -0400
26198 @@ -1302,31 +1302,31 @@ static void nes_netdev_get_ethtool_stats
26199 target_stat_values[++index] = mh_detected;
26200 target_stat_values[++index] = mh_pauses_sent;
26201 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
26202 - target_stat_values[++index] = atomic_read(&cm_connects);
26203 - target_stat_values[++index] = atomic_read(&cm_accepts);
26204 - target_stat_values[++index] = atomic_read(&cm_disconnects);
26205 - target_stat_values[++index] = atomic_read(&cm_connecteds);
26206 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
26207 - target_stat_values[++index] = atomic_read(&cm_rejects);
26208 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
26209 - target_stat_values[++index] = atomic_read(&qps_created);
26210 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
26211 - target_stat_values[++index] = atomic_read(&qps_destroyed);
26212 - target_stat_values[++index] = atomic_read(&cm_closes);
26213 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
26214 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
26215 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
26216 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
26217 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
26218 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
26219 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
26220 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
26221 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
26222 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
26223 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
26224 target_stat_values[++index] = cm_packets_sent;
26225 target_stat_values[++index] = cm_packets_bounced;
26226 target_stat_values[++index] = cm_packets_created;
26227 target_stat_values[++index] = cm_packets_received;
26228 target_stat_values[++index] = cm_packets_dropped;
26229 target_stat_values[++index] = cm_packets_retrans;
26230 - target_stat_values[++index] = atomic_read(&cm_listens_created);
26231 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
26232 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
26233 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
26234 target_stat_values[++index] = cm_backlog_drops;
26235 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
26236 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
26237 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
26238 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
26239 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
26240 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
26241 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
26242 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
26243 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
26244 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
26245 target_stat_values[++index] = nesadapter->free_4kpbl;
26246 target_stat_values[++index] = nesadapter->free_256pbl;
26247 target_stat_values[++index] = int_mod_timer_init;
26248 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c
26249 --- linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-19 00:06:34.000000000 -0400
26250 +++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-08-05 19:44:36.000000000 -0400
26251 @@ -46,9 +46,9 @@
26252
26253 #include <rdma/ib_umem.h>
26254
26255 -atomic_t mod_qp_timouts;
26256 -atomic_t qps_created;
26257 -atomic_t sw_qps_destroyed;
26258 +atomic_unchecked_t mod_qp_timouts;
26259 +atomic_unchecked_t qps_created;
26260 +atomic_unchecked_t sw_qps_destroyed;
26261
26262 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
26263
26264 @@ -1141,7 +1141,7 @@ static struct ib_qp *nes_create_qp(struc
26265 if (init_attr->create_flags)
26266 return ERR_PTR(-EINVAL);
26267
26268 - atomic_inc(&qps_created);
26269 + atomic_inc_unchecked(&qps_created);
26270 switch (init_attr->qp_type) {
26271 case IB_QPT_RC:
26272 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
26273 @@ -1470,7 +1470,7 @@ static int nes_destroy_qp(struct ib_qp *
26274 struct iw_cm_event cm_event;
26275 int ret;
26276
26277 - atomic_inc(&sw_qps_destroyed);
26278 + atomic_inc_unchecked(&sw_qps_destroyed);
26279 nesqp->destroyed = 1;
26280
26281 /* Blow away the connection if it exists. */
26282 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h
26283 --- linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h 2011-05-19 00:06:34.000000000 -0400
26284 +++ linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h 2011-08-05 20:34:06.000000000 -0400
26285 @@ -51,6 +51,7 @@
26286 #include <linux/completion.h>
26287 #include <linux/kref.h>
26288 #include <linux/sched.h>
26289 +#include <linux/slab.h>
26290
26291 #include "qib_common.h"
26292 #include "qib_verbs.h"
26293 diff -urNp linux-2.6.39.4/drivers/input/gameport/gameport.c linux-2.6.39.4/drivers/input/gameport/gameport.c
26294 --- linux-2.6.39.4/drivers/input/gameport/gameport.c 2011-05-19 00:06:34.000000000 -0400
26295 +++ linux-2.6.39.4/drivers/input/gameport/gameport.c 2011-08-05 19:44:37.000000000 -0400
26296 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
26297 */
26298 static void gameport_init_port(struct gameport *gameport)
26299 {
26300 - static atomic_t gameport_no = ATOMIC_INIT(0);
26301 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
26302
26303 __module_get(THIS_MODULE);
26304
26305 mutex_init(&gameport->drv_mutex);
26306 device_initialize(&gameport->dev);
26307 dev_set_name(&gameport->dev, "gameport%lu",
26308 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
26309 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
26310 gameport->dev.bus = &gameport_bus;
26311 gameport->dev.release = gameport_release_port;
26312 if (gameport->parent)
26313 diff -urNp linux-2.6.39.4/drivers/input/input.c linux-2.6.39.4/drivers/input/input.c
26314 --- linux-2.6.39.4/drivers/input/input.c 2011-07-09 09:18:51.000000000 -0400
26315 +++ linux-2.6.39.4/drivers/input/input.c 2011-08-05 19:44:37.000000000 -0400
26316 @@ -1815,7 +1815,7 @@ static void input_cleanse_bitmasks(struc
26317 */
26318 int input_register_device(struct input_dev *dev)
26319 {
26320 - static atomic_t input_no = ATOMIC_INIT(0);
26321 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
26322 struct input_handler *handler;
26323 const char *path;
26324 int error;
26325 @@ -1852,7 +1852,7 @@ int input_register_device(struct input_d
26326 dev->setkeycode = input_default_setkeycode;
26327
26328 dev_set_name(&dev->dev, "input%ld",
26329 - (unsigned long) atomic_inc_return(&input_no) - 1);
26330 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
26331
26332 error = device_add(&dev->dev);
26333 if (error)
26334 diff -urNp linux-2.6.39.4/drivers/input/joystick/sidewinder.c linux-2.6.39.4/drivers/input/joystick/sidewinder.c
26335 --- linux-2.6.39.4/drivers/input/joystick/sidewinder.c 2011-05-19 00:06:34.000000000 -0400
26336 +++ linux-2.6.39.4/drivers/input/joystick/sidewinder.c 2011-08-05 19:44:37.000000000 -0400
26337 @@ -30,6 +30,7 @@
26338 #include <linux/kernel.h>
26339 #include <linux/module.h>
26340 #include <linux/slab.h>
26341 +#include <linux/sched.h>
26342 #include <linux/init.h>
26343 #include <linux/input.h>
26344 #include <linux/gameport.h>
26345 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
26346 unsigned char buf[SW_LENGTH];
26347 int i;
26348
26349 + pax_track_stack();
26350 +
26351 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
26352
26353 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
26354 diff -urNp linux-2.6.39.4/drivers/input/joystick/xpad.c linux-2.6.39.4/drivers/input/joystick/xpad.c
26355 --- linux-2.6.39.4/drivers/input/joystick/xpad.c 2011-05-19 00:06:34.000000000 -0400
26356 +++ linux-2.6.39.4/drivers/input/joystick/xpad.c 2011-08-05 19:44:37.000000000 -0400
26357 @@ -689,7 +689,7 @@ static void xpad_led_set(struct led_clas
26358
26359 static int xpad_led_probe(struct usb_xpad *xpad)
26360 {
26361 - static atomic_t led_seq = ATOMIC_INIT(0);
26362 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
26363 long led_no;
26364 struct xpad_led *led;
26365 struct led_classdev *led_cdev;
26366 @@ -702,7 +702,7 @@ static int xpad_led_probe(struct usb_xpa
26367 if (!led)
26368 return -ENOMEM;
26369
26370 - led_no = (long)atomic_inc_return(&led_seq) - 1;
26371 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
26372
26373 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
26374 led->xpad = xpad;
26375 diff -urNp linux-2.6.39.4/drivers/input/mousedev.c linux-2.6.39.4/drivers/input/mousedev.c
26376 --- linux-2.6.39.4/drivers/input/mousedev.c 2011-07-09 09:18:51.000000000 -0400
26377 +++ linux-2.6.39.4/drivers/input/mousedev.c 2011-08-05 19:44:37.000000000 -0400
26378 @@ -764,7 +764,7 @@ static ssize_t mousedev_read(struct file
26379
26380 spin_unlock_irq(&client->packet_lock);
26381
26382 - if (copy_to_user(buffer, data, count))
26383 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
26384 return -EFAULT;
26385
26386 return count;
26387 diff -urNp linux-2.6.39.4/drivers/input/serio/serio.c linux-2.6.39.4/drivers/input/serio/serio.c
26388 --- linux-2.6.39.4/drivers/input/serio/serio.c 2011-05-19 00:06:34.000000000 -0400
26389 +++ linux-2.6.39.4/drivers/input/serio/serio.c 2011-08-05 19:44:37.000000000 -0400
26390 @@ -497,7 +497,7 @@ static void serio_release_port(struct de
26391 */
26392 static void serio_init_port(struct serio *serio)
26393 {
26394 - static atomic_t serio_no = ATOMIC_INIT(0);
26395 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
26396
26397 __module_get(THIS_MODULE);
26398
26399 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio
26400 mutex_init(&serio->drv_mutex);
26401 device_initialize(&serio->dev);
26402 dev_set_name(&serio->dev, "serio%ld",
26403 - (long)atomic_inc_return(&serio_no) - 1);
26404 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
26405 serio->dev.bus = &serio_bus;
26406 serio->dev.release = serio_release_port;
26407 serio->dev.groups = serio_device_attr_groups;
26408 diff -urNp linux-2.6.39.4/drivers/isdn/capi/capi.c linux-2.6.39.4/drivers/isdn/capi/capi.c
26409 --- linux-2.6.39.4/drivers/isdn/capi/capi.c 2011-05-19 00:06:34.000000000 -0400
26410 +++ linux-2.6.39.4/drivers/isdn/capi/capi.c 2011-08-05 19:44:37.000000000 -0400
26411 @@ -89,8 +89,8 @@ struct capiminor {
26412
26413 struct capi20_appl *ap;
26414 u32 ncci;
26415 - atomic_t datahandle;
26416 - atomic_t msgid;
26417 + atomic_unchecked_t datahandle;
26418 + atomic_unchecked_t msgid;
26419
26420 struct tty_port port;
26421 int ttyinstop;
26422 @@ -414,7 +414,7 @@ gen_data_b3_resp_for(struct capiminor *m
26423 capimsg_setu16(s, 2, mp->ap->applid);
26424 capimsg_setu8 (s, 4, CAPI_DATA_B3);
26425 capimsg_setu8 (s, 5, CAPI_RESP);
26426 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
26427 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
26428 capimsg_setu32(s, 8, mp->ncci);
26429 capimsg_setu16(s, 12, datahandle);
26430 }
26431 @@ -547,14 +547,14 @@ static void handle_minor_send(struct cap
26432 mp->outbytes -= len;
26433 spin_unlock_bh(&mp->outlock);
26434
26435 - datahandle = atomic_inc_return(&mp->datahandle);
26436 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
26437 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
26438 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26439 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26440 capimsg_setu16(skb->data, 2, mp->ap->applid);
26441 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
26442 capimsg_setu8 (skb->data, 5, CAPI_REQ);
26443 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
26444 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
26445 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
26446 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
26447 capimsg_setu16(skb->data, 16, len); /* Data length */
26448 diff -urNp linux-2.6.39.4/drivers/isdn/gigaset/common.c linux-2.6.39.4/drivers/isdn/gigaset/common.c
26449 --- linux-2.6.39.4/drivers/isdn/gigaset/common.c 2011-05-19 00:06:34.000000000 -0400
26450 +++ linux-2.6.39.4/drivers/isdn/gigaset/common.c 2011-08-05 19:44:37.000000000 -0400
26451 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
26452 cs->commands_pending = 0;
26453 cs->cur_at_seq = 0;
26454 cs->gotfwver = -1;
26455 - cs->open_count = 0;
26456 + local_set(&cs->open_count, 0);
26457 cs->dev = NULL;
26458 cs->tty = NULL;
26459 cs->tty_dev = NULL;
26460 diff -urNp linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h
26461 --- linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h 2011-05-19 00:06:34.000000000 -0400
26462 +++ linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h 2011-08-05 19:44:37.000000000 -0400
26463 @@ -35,6 +35,7 @@
26464 #include <linux/tty_driver.h>
26465 #include <linux/list.h>
26466 #include <asm/atomic.h>
26467 +#include <asm/local.h>
26468
26469 #define GIG_VERSION {0, 5, 0, 0}
26470 #define GIG_COMPAT {0, 4, 0, 0}
26471 @@ -433,7 +434,7 @@ struct cardstate {
26472 spinlock_t cmdlock;
26473 unsigned curlen, cmdbytes;
26474
26475 - unsigned open_count;
26476 + local_t open_count;
26477 struct tty_struct *tty;
26478 struct tasklet_struct if_wake_tasklet;
26479 unsigned control_state;
26480 diff -urNp linux-2.6.39.4/drivers/isdn/gigaset/interface.c linux-2.6.39.4/drivers/isdn/gigaset/interface.c
26481 --- linux-2.6.39.4/drivers/isdn/gigaset/interface.c 2011-05-19 00:06:34.000000000 -0400
26482 +++ linux-2.6.39.4/drivers/isdn/gigaset/interface.c 2011-08-05 19:44:37.000000000 -0400
26483 @@ -160,9 +160,7 @@ static int if_open(struct tty_struct *tt
26484 return -ERESTARTSYS;
26485 tty->driver_data = cs;
26486
26487 - ++cs->open_count;
26488 -
26489 - if (cs->open_count == 1) {
26490 + if (local_inc_return(&cs->open_count) == 1) {
26491 spin_lock_irqsave(&cs->lock, flags);
26492 cs->tty = tty;
26493 spin_unlock_irqrestore(&cs->lock, flags);
26494 @@ -190,10 +188,10 @@ static void if_close(struct tty_struct *
26495
26496 if (!cs->connected)
26497 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26498 - else if (!cs->open_count)
26499 + else if (!local_read(&cs->open_count))
26500 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26501 else {
26502 - if (!--cs->open_count) {
26503 + if (!local_dec_return(&cs->open_count)) {
26504 spin_lock_irqsave(&cs->lock, flags);
26505 cs->tty = NULL;
26506 spin_unlock_irqrestore(&cs->lock, flags);
26507 @@ -228,7 +226,7 @@ static int if_ioctl(struct tty_struct *t
26508 if (!cs->connected) {
26509 gig_dbg(DEBUG_IF, "not connected");
26510 retval = -ENODEV;
26511 - } else if (!cs->open_count)
26512 + } else if (!local_read(&cs->open_count))
26513 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26514 else {
26515 retval = 0;
26516 @@ -358,7 +356,7 @@ static int if_write(struct tty_struct *t
26517 retval = -ENODEV;
26518 goto done;
26519 }
26520 - if (!cs->open_count) {
26521 + if (!local_read(&cs->open_count)) {
26522 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26523 retval = -ENODEV;
26524 goto done;
26525 @@ -411,7 +409,7 @@ static int if_write_room(struct tty_stru
26526 if (!cs->connected) {
26527 gig_dbg(DEBUG_IF, "not connected");
26528 retval = -ENODEV;
26529 - } else if (!cs->open_count)
26530 + } else if (!local_read(&cs->open_count))
26531 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26532 else if (cs->mstate != MS_LOCKED) {
26533 dev_warn(cs->dev, "can't write to unlocked device\n");
26534 @@ -441,7 +439,7 @@ static int if_chars_in_buffer(struct tty
26535
26536 if (!cs->connected)
26537 gig_dbg(DEBUG_IF, "not connected");
26538 - else if (!cs->open_count)
26539 + else if (!local_read(&cs->open_count))
26540 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26541 else if (cs->mstate != MS_LOCKED)
26542 dev_warn(cs->dev, "can't write to unlocked device\n");
26543 @@ -469,7 +467,7 @@ static void if_throttle(struct tty_struc
26544
26545 if (!cs->connected)
26546 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26547 - else if (!cs->open_count)
26548 + else if (!local_read(&cs->open_count))
26549 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26550 else
26551 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26552 @@ -493,7 +491,7 @@ static void if_unthrottle(struct tty_str
26553
26554 if (!cs->connected)
26555 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26556 - else if (!cs->open_count)
26557 + else if (!local_read(&cs->open_count))
26558 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26559 else
26560 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26561 @@ -524,7 +522,7 @@ static void if_set_termios(struct tty_st
26562 goto out;
26563 }
26564
26565 - if (!cs->open_count) {
26566 + if (!local_read(&cs->open_count)) {
26567 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26568 goto out;
26569 }
26570 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c
26571 --- linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c 2011-05-19 00:06:34.000000000 -0400
26572 +++ linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c 2011-08-05 19:44:37.000000000 -0400
26573 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
26574 }
26575 if (left) {
26576 if (t4file->user) {
26577 - if (copy_from_user(buf, dp, left))
26578 + if (left > sizeof buf || copy_from_user(buf, dp, left))
26579 return -EFAULT;
26580 } else {
26581 memcpy(buf, dp, left);
26582 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
26583 }
26584 if (left) {
26585 if (config->user) {
26586 - if (copy_from_user(buf, dp, left))
26587 + if (left > sizeof buf || copy_from_user(buf, dp, left))
26588 return -EFAULT;
26589 } else {
26590 memcpy(buf, dp, left);
26591 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c
26592 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-19 00:06:34.000000000 -0400
26593 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-08-05 19:44:37.000000000 -0400
26594 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
26595 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
26596 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
26597
26598 + pax_track_stack();
26599
26600 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
26601 {
26602 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c
26603 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c 2011-05-19 00:06:34.000000000 -0400
26604 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c 2011-08-05 19:44:37.000000000 -0400
26605 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
26606 IDI_SYNC_REQ req;
26607 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26608
26609 + pax_track_stack();
26610 +
26611 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26612
26613 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26614 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c
26615 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-19 00:06:34.000000000 -0400
26616 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-08-05 19:44:37.000000000 -0400
26617 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
26618 IDI_SYNC_REQ req;
26619 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26620
26621 + pax_track_stack();
26622 +
26623 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26624
26625 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26626 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c
26627 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-19 00:06:34.000000000 -0400
26628 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-08-05 19:44:37.000000000 -0400
26629 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
26630 IDI_SYNC_REQ req;
26631 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26632
26633 + pax_track_stack();
26634 +
26635 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26636
26637 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26638 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h
26639 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h 2011-05-19 00:06:34.000000000 -0400
26640 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h 2011-08-05 20:34:06.000000000 -0400
26641 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
26642 } diva_didd_add_adapter_t;
26643 typedef struct _diva_didd_remove_adapter {
26644 IDI_CALL p_request;
26645 -} diva_didd_remove_adapter_t;
26646 +} __no_const diva_didd_remove_adapter_t;
26647 typedef struct _diva_didd_read_adapter_array {
26648 void * buffer;
26649 dword length;
26650 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c
26651 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c 2011-05-19 00:06:34.000000000 -0400
26652 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c 2011-08-05 19:44:37.000000000 -0400
26653 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
26654 IDI_SYNC_REQ req;
26655 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26656
26657 + pax_track_stack();
26658 +
26659 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26660
26661 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26662 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c
26663 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c 2011-05-19 00:06:34.000000000 -0400
26664 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c 2011-08-05 19:44:37.000000000 -0400
26665 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
26666 dword d;
26667 word w;
26668
26669 + pax_track_stack();
26670 +
26671 a = plci->adapter;
26672 Id = ((word)plci->Id<<8)|a->Id;
26673 PUT_WORD(&SS_Ind[4],0x0000);
26674 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
26675 word j, n, w;
26676 dword d;
26677
26678 + pax_track_stack();
26679 +
26680
26681 for(i=0;i<8;i++) bp_parms[i].length = 0;
26682 for(i=0;i<2;i++) global_config[i].length = 0;
26683 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
26684 const byte llc3[] = {4,3,2,2,6,6,0};
26685 const byte header[] = {0,2,3,3,0,0,0};
26686
26687 + pax_track_stack();
26688 +
26689 for(i=0;i<8;i++) bp_parms[i].length = 0;
26690 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
26691 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
26692 @@ -14760,6 +14766,8 @@ static void group_optimization(DIVA_CAPI
26693 word appl_number_group_type[MAX_APPL];
26694 PLCI *auxplci;
26695
26696 + pax_track_stack();
26697 +
26698 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
26699
26700 if(!a->group_optimization_enabled)
26701 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c
26702 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-19 00:06:34.000000000 -0400
26703 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-08-05 19:44:37.000000000 -0400
26704 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
26705 IDI_SYNC_REQ req;
26706 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26707
26708 + pax_track_stack();
26709 +
26710 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26711
26712 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26713 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h
26714 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-05-19 00:06:34.000000000 -0400
26715 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-05 20:34:06.000000000 -0400
26716 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
26717 typedef struct _diva_os_idi_adapter_interface {
26718 diva_init_card_proc_t cleanup_adapter_proc;
26719 diva_cmd_card_proc_t cmd_proc;
26720 -} diva_os_idi_adapter_interface_t;
26721 +} __no_const diva_os_idi_adapter_interface_t;
26722
26723 typedef struct _diva_os_xdi_adapter {
26724 struct list_head link;
26725 diff -urNp linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c
26726 --- linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c 2011-05-19 00:06:34.000000000 -0400
26727 +++ linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c 2011-08-05 19:44:37.000000000 -0400
26728 @@ -1292,6 +1292,8 @@ isdn_ioctl(struct file *file, uint cmd,
26729 } iocpar;
26730 void __user *argp = (void __user *)arg;
26731
26732 + pax_track_stack();
26733 +
26734 #define name iocpar.name
26735 #define bname iocpar.bname
26736 #define iocts iocpar.iocts
26737 diff -urNp linux-2.6.39.4/drivers/isdn/icn/icn.c linux-2.6.39.4/drivers/isdn/icn/icn.c
26738 --- linux-2.6.39.4/drivers/isdn/icn/icn.c 2011-05-19 00:06:34.000000000 -0400
26739 +++ linux-2.6.39.4/drivers/isdn/icn/icn.c 2011-08-05 19:44:37.000000000 -0400
26740 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
26741 if (count > len)
26742 count = len;
26743 if (user) {
26744 - if (copy_from_user(msg, buf, count))
26745 + if (count > sizeof msg || copy_from_user(msg, buf, count))
26746 return -EFAULT;
26747 } else
26748 memcpy(msg, buf, count);
26749 diff -urNp linux-2.6.39.4/drivers/lguest/core.c linux-2.6.39.4/drivers/lguest/core.c
26750 --- linux-2.6.39.4/drivers/lguest/core.c 2011-05-19 00:06:34.000000000 -0400
26751 +++ linux-2.6.39.4/drivers/lguest/core.c 2011-08-05 19:44:37.000000000 -0400
26752 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
26753 * it's worked so far. The end address needs +1 because __get_vm_area
26754 * allocates an extra guard page, so we need space for that.
26755 */
26756 +
26757 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26758 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26759 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
26760 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26761 +#else
26762 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26763 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
26764 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26765 +#endif
26766 +
26767 if (!switcher_vma) {
26768 err = -ENOMEM;
26769 printk("lguest: could not map switcher pages high\n");
26770 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
26771 * Now the Switcher is mapped at the right address, we can't fail!
26772 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
26773 */
26774 - memcpy(switcher_vma->addr, start_switcher_text,
26775 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
26776 end_switcher_text - start_switcher_text);
26777
26778 printk(KERN_INFO "lguest: mapped switcher at %p\n",
26779 diff -urNp linux-2.6.39.4/drivers/lguest/x86/core.c linux-2.6.39.4/drivers/lguest/x86/core.c
26780 --- linux-2.6.39.4/drivers/lguest/x86/core.c 2011-05-19 00:06:34.000000000 -0400
26781 +++ linux-2.6.39.4/drivers/lguest/x86/core.c 2011-08-05 19:44:37.000000000 -0400
26782 @@ -59,7 +59,7 @@ static struct {
26783 /* Offset from where switcher.S was compiled to where we've copied it */
26784 static unsigned long switcher_offset(void)
26785 {
26786 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
26787 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
26788 }
26789
26790 /* This cpu's struct lguest_pages. */
26791 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
26792 * These copies are pretty cheap, so we do them unconditionally: */
26793 /* Save the current Host top-level page directory.
26794 */
26795 +
26796 +#ifdef CONFIG_PAX_PER_CPU_PGD
26797 + pages->state.host_cr3 = read_cr3();
26798 +#else
26799 pages->state.host_cr3 = __pa(current->mm->pgd);
26800 +#endif
26801 +
26802 /*
26803 * Set up the Guest's page tables to see this CPU's pages (and no
26804 * other CPU's pages).
26805 @@ -547,7 +553,7 @@ void __init lguest_arch_host_init(void)
26806 * compiled-in switcher code and the high-mapped copy we just made.
26807 */
26808 for (i = 0; i < IDT_ENTRIES; i++)
26809 - default_idt_entries[i] += switcher_offset();
26810 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
26811
26812 /*
26813 * Set up the Switcher's per-cpu areas.
26814 @@ -630,7 +636,7 @@ void __init lguest_arch_host_init(void)
26815 * it will be undisturbed when we switch. To change %cs and jump we
26816 * need this structure to feed to Intel's "lcall" instruction.
26817 */
26818 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
26819 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
26820 lguest_entry.segment = LGUEST_CS;
26821
26822 /*
26823 diff -urNp linux-2.6.39.4/drivers/lguest/x86/switcher_32.S linux-2.6.39.4/drivers/lguest/x86/switcher_32.S
26824 --- linux-2.6.39.4/drivers/lguest/x86/switcher_32.S 2011-05-19 00:06:34.000000000 -0400
26825 +++ linux-2.6.39.4/drivers/lguest/x86/switcher_32.S 2011-08-05 19:44:37.000000000 -0400
26826 @@ -87,6 +87,7 @@
26827 #include <asm/page.h>
26828 #include <asm/segment.h>
26829 #include <asm/lguest.h>
26830 +#include <asm/processor-flags.h>
26831
26832 // We mark the start of the code to copy
26833 // It's placed in .text tho it's never run here
26834 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
26835 // Changes type when we load it: damn Intel!
26836 // For after we switch over our page tables
26837 // That entry will be read-only: we'd crash.
26838 +
26839 +#ifdef CONFIG_PAX_KERNEXEC
26840 + mov %cr0, %edx
26841 + xor $X86_CR0_WP, %edx
26842 + mov %edx, %cr0
26843 +#endif
26844 +
26845 movl $(GDT_ENTRY_TSS*8), %edx
26846 ltr %dx
26847
26848 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
26849 // Let's clear it again for our return.
26850 // The GDT descriptor of the Host
26851 // Points to the table after two "size" bytes
26852 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
26853 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
26854 // Clear "used" from type field (byte 5, bit 2)
26855 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
26856 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
26857 +
26858 +#ifdef CONFIG_PAX_KERNEXEC
26859 + mov %cr0, %eax
26860 + xor $X86_CR0_WP, %eax
26861 + mov %eax, %cr0
26862 +#endif
26863
26864 // Once our page table's switched, the Guest is live!
26865 // The Host fades as we run this final step.
26866 @@ -295,13 +309,12 @@ deliver_to_host:
26867 // I consulted gcc, and it gave
26868 // These instructions, which I gladly credit:
26869 leal (%edx,%ebx,8), %eax
26870 - movzwl (%eax),%edx
26871 - movl 4(%eax), %eax
26872 - xorw %ax, %ax
26873 - orl %eax, %edx
26874 + movl 4(%eax), %edx
26875 + movw (%eax), %dx
26876 // Now the address of the handler's in %edx
26877 // We call it now: its "iret" drops us home.
26878 - jmp *%edx
26879 + ljmp $__KERNEL_CS, $1f
26880 +1: jmp *%edx
26881
26882 // Every interrupt can come to us here
26883 // But we must truly tell each apart.
26884 diff -urNp linux-2.6.39.4/drivers/md/dm.c linux-2.6.39.4/drivers/md/dm.c
26885 --- linux-2.6.39.4/drivers/md/dm.c 2011-05-19 00:06:34.000000000 -0400
26886 +++ linux-2.6.39.4/drivers/md/dm.c 2011-08-05 19:44:37.000000000 -0400
26887 @@ -162,9 +162,9 @@ struct mapped_device {
26888 /*
26889 * Event handling.
26890 */
26891 - atomic_t event_nr;
26892 + atomic_unchecked_t event_nr;
26893 wait_queue_head_t eventq;
26894 - atomic_t uevent_seq;
26895 + atomic_unchecked_t uevent_seq;
26896 struct list_head uevent_list;
26897 spinlock_t uevent_lock; /* Protect access to uevent_list */
26898
26899 @@ -1836,8 +1836,8 @@ static struct mapped_device *alloc_dev(i
26900 rwlock_init(&md->map_lock);
26901 atomic_set(&md->holders, 1);
26902 atomic_set(&md->open_count, 0);
26903 - atomic_set(&md->event_nr, 0);
26904 - atomic_set(&md->uevent_seq, 0);
26905 + atomic_set_unchecked(&md->event_nr, 0);
26906 + atomic_set_unchecked(&md->uevent_seq, 0);
26907 INIT_LIST_HEAD(&md->uevent_list);
26908 spin_lock_init(&md->uevent_lock);
26909
26910 @@ -1971,7 +1971,7 @@ static void event_callback(void *context
26911
26912 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
26913
26914 - atomic_inc(&md->event_nr);
26915 + atomic_inc_unchecked(&md->event_nr);
26916 wake_up(&md->eventq);
26917 }
26918
26919 @@ -2547,18 +2547,18 @@ int dm_kobject_uevent(struct mapped_devi
26920
26921 uint32_t dm_next_uevent_seq(struct mapped_device *md)
26922 {
26923 - return atomic_add_return(1, &md->uevent_seq);
26924 + return atomic_add_return_unchecked(1, &md->uevent_seq);
26925 }
26926
26927 uint32_t dm_get_event_nr(struct mapped_device *md)
26928 {
26929 - return atomic_read(&md->event_nr);
26930 + return atomic_read_unchecked(&md->event_nr);
26931 }
26932
26933 int dm_wait_event(struct mapped_device *md, int event_nr)
26934 {
26935 return wait_event_interruptible(md->eventq,
26936 - (event_nr != atomic_read(&md->event_nr)));
26937 + (event_nr != atomic_read_unchecked(&md->event_nr)));
26938 }
26939
26940 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
26941 diff -urNp linux-2.6.39.4/drivers/md/dm-ioctl.c linux-2.6.39.4/drivers/md/dm-ioctl.c
26942 --- linux-2.6.39.4/drivers/md/dm-ioctl.c 2011-05-19 00:06:34.000000000 -0400
26943 +++ linux-2.6.39.4/drivers/md/dm-ioctl.c 2011-08-05 19:44:37.000000000 -0400
26944 @@ -1551,7 +1551,7 @@ static int validate_params(uint cmd, str
26945 cmd == DM_LIST_VERSIONS_CMD)
26946 return 0;
26947
26948 - if ((cmd == DM_DEV_CREATE_CMD)) {
26949 + if (cmd == DM_DEV_CREATE_CMD) {
26950 if (!*param->name) {
26951 DMWARN("name not supplied when creating device");
26952 return -EINVAL;
26953 diff -urNp linux-2.6.39.4/drivers/md/dm-raid1.c linux-2.6.39.4/drivers/md/dm-raid1.c
26954 --- linux-2.6.39.4/drivers/md/dm-raid1.c 2011-05-19 00:06:34.000000000 -0400
26955 +++ linux-2.6.39.4/drivers/md/dm-raid1.c 2011-08-05 19:44:37.000000000 -0400
26956 @@ -42,7 +42,7 @@ enum dm_raid1_error {
26957
26958 struct mirror {
26959 struct mirror_set *ms;
26960 - atomic_t error_count;
26961 + atomic_unchecked_t error_count;
26962 unsigned long error_type;
26963 struct dm_dev *dev;
26964 sector_t offset;
26965 @@ -187,7 +187,7 @@ static struct mirror *get_valid_mirror(s
26966 struct mirror *m;
26967
26968 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
26969 - if (!atomic_read(&m->error_count))
26970 + if (!atomic_read_unchecked(&m->error_count))
26971 return m;
26972
26973 return NULL;
26974 @@ -219,7 +219,7 @@ static void fail_mirror(struct mirror *m
26975 * simple way to tell if a device has encountered
26976 * errors.
26977 */
26978 - atomic_inc(&m->error_count);
26979 + atomic_inc_unchecked(&m->error_count);
26980
26981 if (test_and_set_bit(error_type, &m->error_type))
26982 return;
26983 @@ -410,7 +410,7 @@ static struct mirror *choose_mirror(stru
26984 struct mirror *m = get_default_mirror(ms);
26985
26986 do {
26987 - if (likely(!atomic_read(&m->error_count)))
26988 + if (likely(!atomic_read_unchecked(&m->error_count)))
26989 return m;
26990
26991 if (m-- == ms->mirror)
26992 @@ -424,7 +424,7 @@ static int default_ok(struct mirror *m)
26993 {
26994 struct mirror *default_mirror = get_default_mirror(m->ms);
26995
26996 - return !atomic_read(&default_mirror->error_count);
26997 + return !atomic_read_unchecked(&default_mirror->error_count);
26998 }
26999
27000 static int mirror_available(struct mirror_set *ms, struct bio *bio)
27001 @@ -561,7 +561,7 @@ static void do_reads(struct mirror_set *
27002 */
27003 if (likely(region_in_sync(ms, region, 1)))
27004 m = choose_mirror(ms, bio->bi_sector);
27005 - else if (m && atomic_read(&m->error_count))
27006 + else if (m && atomic_read_unchecked(&m->error_count))
27007 m = NULL;
27008
27009 if (likely(m))
27010 @@ -939,7 +939,7 @@ static int get_mirror(struct mirror_set
27011 }
27012
27013 ms->mirror[mirror].ms = ms;
27014 - atomic_set(&(ms->mirror[mirror].error_count), 0);
27015 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
27016 ms->mirror[mirror].error_type = 0;
27017 ms->mirror[mirror].offset = offset;
27018
27019 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
27020 */
27021 static char device_status_char(struct mirror *m)
27022 {
27023 - if (!atomic_read(&(m->error_count)))
27024 + if (!atomic_read_unchecked(&(m->error_count)))
27025 return 'A';
27026
27027 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
27028 diff -urNp linux-2.6.39.4/drivers/md/dm-stripe.c linux-2.6.39.4/drivers/md/dm-stripe.c
27029 --- linux-2.6.39.4/drivers/md/dm-stripe.c 2011-05-19 00:06:34.000000000 -0400
27030 +++ linux-2.6.39.4/drivers/md/dm-stripe.c 2011-08-05 19:44:37.000000000 -0400
27031 @@ -20,7 +20,7 @@ struct stripe {
27032 struct dm_dev *dev;
27033 sector_t physical_start;
27034
27035 - atomic_t error_count;
27036 + atomic_unchecked_t error_count;
27037 };
27038
27039 struct stripe_c {
27040 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
27041 kfree(sc);
27042 return r;
27043 }
27044 - atomic_set(&(sc->stripe[i].error_count), 0);
27045 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
27046 }
27047
27048 ti->private = sc;
27049 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
27050 DMEMIT("%d ", sc->stripes);
27051 for (i = 0; i < sc->stripes; i++) {
27052 DMEMIT("%s ", sc->stripe[i].dev->name);
27053 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
27054 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
27055 'D' : 'A';
27056 }
27057 buffer[i] = '\0';
27058 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
27059 */
27060 for (i = 0; i < sc->stripes; i++)
27061 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
27062 - atomic_inc(&(sc->stripe[i].error_count));
27063 - if (atomic_read(&(sc->stripe[i].error_count)) <
27064 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
27065 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
27066 DM_IO_ERROR_THRESHOLD)
27067 schedule_work(&sc->trigger_event);
27068 }
27069 diff -urNp linux-2.6.39.4/drivers/md/dm-table.c linux-2.6.39.4/drivers/md/dm-table.c
27070 --- linux-2.6.39.4/drivers/md/dm-table.c 2011-06-03 00:04:14.000000000 -0400
27071 +++ linux-2.6.39.4/drivers/md/dm-table.c 2011-08-05 19:44:37.000000000 -0400
27072 @@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
27073 if (!dev_size)
27074 return 0;
27075
27076 - if ((start >= dev_size) || (start + len > dev_size)) {
27077 + if ((start >= dev_size) || (len > dev_size - start)) {
27078 DMWARN("%s: %s too small for target: "
27079 "start=%llu, len=%llu, dev_size=%llu",
27080 dm_device_name(ti->table->md), bdevname(bdev, b),
27081 diff -urNp linux-2.6.39.4/drivers/md/md.c linux-2.6.39.4/drivers/md/md.c
27082 --- linux-2.6.39.4/drivers/md/md.c 2011-07-09 09:18:51.000000000 -0400
27083 +++ linux-2.6.39.4/drivers/md/md.c 2011-08-05 19:44:37.000000000 -0400
27084 @@ -226,10 +226,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
27085 * start build, activate spare
27086 */
27087 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
27088 -static atomic_t md_event_count;
27089 +static atomic_unchecked_t md_event_count;
27090 void md_new_event(mddev_t *mddev)
27091 {
27092 - atomic_inc(&md_event_count);
27093 + atomic_inc_unchecked(&md_event_count);
27094 wake_up(&md_event_waiters);
27095 }
27096 EXPORT_SYMBOL_GPL(md_new_event);
27097 @@ -239,7 +239,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
27098 */
27099 static void md_new_event_inintr(mddev_t *mddev)
27100 {
27101 - atomic_inc(&md_event_count);
27102 + atomic_inc_unchecked(&md_event_count);
27103 wake_up(&md_event_waiters);
27104 }
27105
27106 @@ -1454,7 +1454,7 @@ static int super_1_load(mdk_rdev_t *rdev
27107
27108 rdev->preferred_minor = 0xffff;
27109 rdev->data_offset = le64_to_cpu(sb->data_offset);
27110 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27111 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27112
27113 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
27114 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
27115 @@ -1632,7 +1632,7 @@ static void super_1_sync(mddev_t *mddev,
27116 else
27117 sb->resync_offset = cpu_to_le64(0);
27118
27119 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
27120 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
27121
27122 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
27123 sb->size = cpu_to_le64(mddev->dev_sectors);
27124 @@ -2414,7 +2414,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
27125 static ssize_t
27126 errors_show(mdk_rdev_t *rdev, char *page)
27127 {
27128 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
27129 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
27130 }
27131
27132 static ssize_t
27133 @@ -2423,7 +2423,7 @@ errors_store(mdk_rdev_t *rdev, const cha
27134 char *e;
27135 unsigned long n = simple_strtoul(buf, &e, 10);
27136 if (*buf && (*e == 0 || *e == '\n')) {
27137 - atomic_set(&rdev->corrected_errors, n);
27138 + atomic_set_unchecked(&rdev->corrected_errors, n);
27139 return len;
27140 }
27141 return -EINVAL;
27142 @@ -2779,8 +2779,8 @@ void md_rdev_init(mdk_rdev_t *rdev)
27143 rdev->last_read_error.tv_sec = 0;
27144 rdev->last_read_error.tv_nsec = 0;
27145 atomic_set(&rdev->nr_pending, 0);
27146 - atomic_set(&rdev->read_errors, 0);
27147 - atomic_set(&rdev->corrected_errors, 0);
27148 + atomic_set_unchecked(&rdev->read_errors, 0);
27149 + atomic_set_unchecked(&rdev->corrected_errors, 0);
27150
27151 INIT_LIST_HEAD(&rdev->same_set);
27152 init_waitqueue_head(&rdev->blocked_wait);
27153 @@ -6388,7 +6388,7 @@ static int md_seq_show(struct seq_file *
27154
27155 spin_unlock(&pers_lock);
27156 seq_printf(seq, "\n");
27157 - mi->event = atomic_read(&md_event_count);
27158 + mi->event = atomic_read_unchecked(&md_event_count);
27159 return 0;
27160 }
27161 if (v == (void*)2) {
27162 @@ -6477,7 +6477,7 @@ static int md_seq_show(struct seq_file *
27163 chunk_kb ? "KB" : "B");
27164 if (bitmap->file) {
27165 seq_printf(seq, ", file: ");
27166 - seq_path(seq, &bitmap->file->f_path, " \t\n");
27167 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
27168 }
27169
27170 seq_printf(seq, "\n");
27171 @@ -6511,7 +6511,7 @@ static int md_seq_open(struct inode *ino
27172 else {
27173 struct seq_file *p = file->private_data;
27174 p->private = mi;
27175 - mi->event = atomic_read(&md_event_count);
27176 + mi->event = atomic_read_unchecked(&md_event_count);
27177 }
27178 return error;
27179 }
27180 @@ -6527,7 +6527,7 @@ static unsigned int mdstat_poll(struct f
27181 /* always allow read */
27182 mask = POLLIN | POLLRDNORM;
27183
27184 - if (mi->event != atomic_read(&md_event_count))
27185 + if (mi->event != atomic_read_unchecked(&md_event_count))
27186 mask |= POLLERR | POLLPRI;
27187 return mask;
27188 }
27189 @@ -6571,7 +6571,7 @@ static int is_mddev_idle(mddev_t *mddev,
27190 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
27191 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
27192 (int)part_stat_read(&disk->part0, sectors[1]) -
27193 - atomic_read(&disk->sync_io);
27194 + atomic_read_unchecked(&disk->sync_io);
27195 /* sync IO will cause sync_io to increase before the disk_stats
27196 * as sync_io is counted when a request starts, and
27197 * disk_stats is counted when it completes.
27198 diff -urNp linux-2.6.39.4/drivers/md/md.h linux-2.6.39.4/drivers/md/md.h
27199 --- linux-2.6.39.4/drivers/md/md.h 2011-05-19 00:06:34.000000000 -0400
27200 +++ linux-2.6.39.4/drivers/md/md.h 2011-08-05 19:44:37.000000000 -0400
27201 @@ -97,13 +97,13 @@ struct mdk_rdev_s
27202 * only maintained for arrays that
27203 * support hot removal
27204 */
27205 - atomic_t read_errors; /* number of consecutive read errors that
27206 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
27207 * we have tried to ignore.
27208 */
27209 struct timespec last_read_error; /* monotonic time since our
27210 * last read error
27211 */
27212 - atomic_t corrected_errors; /* number of corrected read errors,
27213 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
27214 * for reporting to userspace and storing
27215 * in superblock.
27216 */
27217 @@ -342,7 +342,7 @@ static inline void rdev_dec_pending(mdk_
27218
27219 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
27220 {
27221 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27222 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27223 }
27224
27225 struct mdk_personality
27226 diff -urNp linux-2.6.39.4/drivers/md/raid10.c linux-2.6.39.4/drivers/md/raid10.c
27227 --- linux-2.6.39.4/drivers/md/raid10.c 2011-05-19 00:06:34.000000000 -0400
27228 +++ linux-2.6.39.4/drivers/md/raid10.c 2011-08-05 19:44:37.000000000 -0400
27229 @@ -1209,7 +1209,7 @@ static void end_sync_read(struct bio *bi
27230 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
27231 set_bit(R10BIO_Uptodate, &r10_bio->state);
27232 else {
27233 - atomic_add(r10_bio->sectors,
27234 + atomic_add_unchecked(r10_bio->sectors,
27235 &conf->mirrors[d].rdev->corrected_errors);
27236 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
27237 md_error(r10_bio->mddev,
27238 @@ -1417,7 +1417,7 @@ static void check_decay_read_errors(mdde
27239 {
27240 struct timespec cur_time_mon;
27241 unsigned long hours_since_last;
27242 - unsigned int read_errors = atomic_read(&rdev->read_errors);
27243 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
27244
27245 ktime_get_ts(&cur_time_mon);
27246
27247 @@ -1439,9 +1439,9 @@ static void check_decay_read_errors(mdde
27248 * overflowing the shift of read_errors by hours_since_last.
27249 */
27250 if (hours_since_last >= 8 * sizeof(read_errors))
27251 - atomic_set(&rdev->read_errors, 0);
27252 + atomic_set_unchecked(&rdev->read_errors, 0);
27253 else
27254 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
27255 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
27256 }
27257
27258 /*
27259 @@ -1476,8 +1476,8 @@ static void fix_read_error(conf_t *conf,
27260 }
27261
27262 check_decay_read_errors(mddev, rdev);
27263 - atomic_inc(&rdev->read_errors);
27264 - cur_read_error_count = atomic_read(&rdev->read_errors);
27265 + atomic_inc_unchecked(&rdev->read_errors);
27266 + cur_read_error_count = atomic_read_unchecked(&rdev->read_errors);
27267 if (cur_read_error_count > max_read_errors) {
27268 rcu_read_unlock();
27269 printk(KERN_NOTICE
27270 @@ -1550,7 +1550,7 @@ static void fix_read_error(conf_t *conf,
27271 test_bit(In_sync, &rdev->flags)) {
27272 atomic_inc(&rdev->nr_pending);
27273 rcu_read_unlock();
27274 - atomic_add(s, &rdev->corrected_errors);
27275 + atomic_add_unchecked(s, &rdev->corrected_errors);
27276 if (sync_page_io(rdev,
27277 r10_bio->devs[sl].addr +
27278 sect,
27279 diff -urNp linux-2.6.39.4/drivers/md/raid1.c linux-2.6.39.4/drivers/md/raid1.c
27280 --- linux-2.6.39.4/drivers/md/raid1.c 2011-05-19 00:06:34.000000000 -0400
27281 +++ linux-2.6.39.4/drivers/md/raid1.c 2011-08-05 19:44:37.000000000 -0400
27282 @@ -1342,7 +1342,7 @@ static void sync_request_write(mddev_t *
27283 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
27284 continue;
27285 rdev = conf->mirrors[d].rdev;
27286 - atomic_add(s, &rdev->corrected_errors);
27287 + atomic_add_unchecked(s, &rdev->corrected_errors);
27288 if (sync_page_io(rdev,
27289 sect,
27290 s<<9,
27291 @@ -1488,7 +1488,7 @@ static void fix_read_error(conf_t *conf,
27292 /* Well, this device is dead */
27293 md_error(mddev, rdev);
27294 else {
27295 - atomic_add(s, &rdev->corrected_errors);
27296 + atomic_add_unchecked(s, &rdev->corrected_errors);
27297 printk(KERN_INFO
27298 "md/raid1:%s: read error corrected "
27299 "(%d sectors at %llu on %s)\n",
27300 diff -urNp linux-2.6.39.4/drivers/md/raid5.c linux-2.6.39.4/drivers/md/raid5.c
27301 --- linux-2.6.39.4/drivers/md/raid5.c 2011-06-25 12:55:22.000000000 -0400
27302 +++ linux-2.6.39.4/drivers/md/raid5.c 2011-08-05 19:44:37.000000000 -0400
27303 @@ -550,7 +550,7 @@ static void ops_run_io(struct stripe_hea
27304 bi->bi_next = NULL;
27305 if ((rw & WRITE) &&
27306 test_bit(R5_ReWrite, &sh->dev[i].flags))
27307 - atomic_add(STRIPE_SECTORS,
27308 + atomic_add_unchecked(STRIPE_SECTORS,
27309 &rdev->corrected_errors);
27310 generic_make_request(bi);
27311 } else {
27312 @@ -1596,15 +1596,15 @@ static void raid5_end_read_request(struc
27313 clear_bit(R5_ReadError, &sh->dev[i].flags);
27314 clear_bit(R5_ReWrite, &sh->dev[i].flags);
27315 }
27316 - if (atomic_read(&conf->disks[i].rdev->read_errors))
27317 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
27318 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
27319 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
27320 } else {
27321 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
27322 int retry = 0;
27323 rdev = conf->disks[i].rdev;
27324
27325 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
27326 - atomic_inc(&rdev->read_errors);
27327 + atomic_inc_unchecked(&rdev->read_errors);
27328 if (conf->mddev->degraded >= conf->max_degraded)
27329 printk_rl(KERN_WARNING
27330 "md/raid:%s: read error not correctable "
27331 @@ -1622,7 +1622,7 @@ static void raid5_end_read_request(struc
27332 (unsigned long long)(sh->sector
27333 + rdev->data_offset),
27334 bdn);
27335 - else if (atomic_read(&rdev->read_errors)
27336 + else if (atomic_read_unchecked(&rdev->read_errors)
27337 > conf->max_nr_stripes)
27338 printk(KERN_WARNING
27339 "md/raid:%s: Too many read errors, failing device %s.\n",
27340 @@ -1947,6 +1947,7 @@ static sector_t compute_blocknr(struct s
27341 sector_t r_sector;
27342 struct stripe_head sh2;
27343
27344 + pax_track_stack();
27345
27346 chunk_offset = sector_div(new_sector, sectors_per_chunk);
27347 stripe = new_sector;
27348 diff -urNp linux-2.6.39.4/drivers/media/common/saa7146_hlp.c linux-2.6.39.4/drivers/media/common/saa7146_hlp.c
27349 --- linux-2.6.39.4/drivers/media/common/saa7146_hlp.c 2011-05-19 00:06:34.000000000 -0400
27350 +++ linux-2.6.39.4/drivers/media/common/saa7146_hlp.c 2011-08-05 19:44:37.000000000 -0400
27351 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
27352
27353 int x[32], y[32], w[32], h[32];
27354
27355 + pax_track_stack();
27356 +
27357 /* clear out memory */
27358 memset(&line_list[0], 0x00, sizeof(u32)*32);
27359 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
27360 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
27361 --- linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-19 00:06:34.000000000 -0400
27362 +++ linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-08-05 19:44:37.000000000 -0400
27363 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
27364 u8 buf[HOST_LINK_BUF_SIZE];
27365 int i;
27366
27367 + pax_track_stack();
27368 +
27369 dprintk("%s\n", __func__);
27370
27371 /* check if we have space for a link buf in the rx_buffer */
27372 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
27373 unsigned long timeout;
27374 int written;
27375
27376 + pax_track_stack();
27377 +
27378 dprintk("%s\n", __func__);
27379
27380 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
27381 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h
27382 --- linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-05-19 00:06:34.000000000 -0400
27383 +++ linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-05 20:34:06.000000000 -0400
27384 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
27385 union {
27386 dmx_ts_cb ts;
27387 dmx_section_cb sec;
27388 - } cb;
27389 + } __no_const cb;
27390
27391 struct dvb_demux *demux;
27392 void *priv;
27393 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c
27394 --- linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-05-19 00:06:34.000000000 -0400
27395 +++ linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-05 20:34:06.000000000 -0400
27396 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
27397 const struct dvb_device *template, void *priv, int type)
27398 {
27399 struct dvb_device *dvbdev;
27400 - struct file_operations *dvbdevfops;
27401 + file_operations_no_const *dvbdevfops;
27402 struct device *clsdev;
27403 int minor;
27404 int id;
27405 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c
27406 --- linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-05-19 00:06:34.000000000 -0400
27407 +++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-05 20:34:06.000000000 -0400
27408 @@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
27409 struct dib0700_adapter_state {
27410 int (*set_param_save) (struct dvb_frontend *,
27411 struct dvb_frontend_parameters *);
27412 -};
27413 +} __no_const;
27414
27415 static int dib7070_set_param_override(struct dvb_frontend *fe,
27416 struct dvb_frontend_parameters *fep)
27417 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c
27418 --- linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-19 00:06:34.000000000 -0400
27419 +++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-08-05 19:44:37.000000000 -0400
27420 @@ -391,6 +391,8 @@ int dib0700_download_firmware(struct usb
27421
27422 u8 buf[260];
27423
27424 + pax_track_stack();
27425 +
27426 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
27427 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
27428 hx.addr, hx.len, hx.chk);
27429 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c
27430 --- linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-05-19 00:06:34.000000000 -0400
27431 +++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-08-05 20:34:06.000000000 -0400
27432 @@ -95,7 +95,7 @@ struct su3000_state {
27433
27434 struct s6x0_state {
27435 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
27436 -};
27437 +} __no_const;
27438
27439 /* debug */
27440 static int dvb_usb_dw2102_debug;
27441 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c
27442 --- linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-05-19 00:06:34.000000000 -0400
27443 +++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-08-05 19:44:37.000000000 -0400
27444 @@ -663,6 +663,7 @@ static int lme2510_download_firmware(str
27445 packet_size = 0x31;
27446 len_in = 1;
27447
27448 + pax_track_stack();
27449
27450 info("FRM Starting Firmware Download");
27451
27452 @@ -715,6 +716,8 @@ static void lme_coldreset(struct usb_dev
27453 int ret = 0, len_in;
27454 u8 data[512] = {0};
27455
27456 + pax_track_stack();
27457 +
27458 data[0] = 0x0a;
27459 len_in = 1;
27460 info("FRM Firmware Cold Reset");
27461 diff -urNp linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h
27462 --- linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h 2011-05-19 00:06:34.000000000 -0400
27463 +++ linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h 2011-08-05 20:34:06.000000000 -0400
27464 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
27465 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
27466 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
27467 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
27468 -};
27469 +} __no_const;
27470
27471 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
27472 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27473 diff -urNp linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c
27474 --- linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c 2011-05-19 00:06:34.000000000 -0400
27475 +++ linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c 2011-08-05 19:44:37.000000000 -0400
27476 @@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
27477 int ret = -1;
27478 int sync;
27479
27480 + pax_track_stack();
27481 +
27482 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
27483
27484 fcp = 3000;
27485 diff -urNp linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c
27486 --- linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c 2011-05-19 00:06:34.000000000 -0400
27487 +++ linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c 2011-08-05 19:44:37.000000000 -0400
27488 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
27489 u8 tudata[585];
27490 int i;
27491
27492 + pax_track_stack();
27493 +
27494 dprintk("Firmware is %zd bytes\n",fw->size);
27495
27496 /* Get eprom data */
27497 diff -urNp linux-2.6.39.4/drivers/media/radio/radio-cadet.c linux-2.6.39.4/drivers/media/radio/radio-cadet.c
27498 --- linux-2.6.39.4/drivers/media/radio/radio-cadet.c 2011-05-19 00:06:34.000000000 -0400
27499 +++ linux-2.6.39.4/drivers/media/radio/radio-cadet.c 2011-08-05 19:44:37.000000000 -0400
27500 @@ -349,7 +349,7 @@ static ssize_t cadet_read(struct file *f
27501 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
27502 mutex_unlock(&dev->lock);
27503
27504 - if (copy_to_user(data, readbuf, i))
27505 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
27506 return -EFAULT;
27507 return i;
27508 }
27509 diff -urNp linux-2.6.39.4/drivers/media/rc/rc-main.c linux-2.6.39.4/drivers/media/rc/rc-main.c
27510 --- linux-2.6.39.4/drivers/media/rc/rc-main.c 2011-05-19 00:06:34.000000000 -0400
27511 +++ linux-2.6.39.4/drivers/media/rc/rc-main.c 2011-08-05 19:44:37.000000000 -0400
27512 @@ -996,7 +996,7 @@ EXPORT_SYMBOL_GPL(rc_free_device);
27513
27514 int rc_register_device(struct rc_dev *dev)
27515 {
27516 - static atomic_t devno = ATOMIC_INIT(0);
27517 + static atomic_unchecked_t devno = ATOMIC_INIT(0);
27518 struct rc_map *rc_map;
27519 const char *path;
27520 int rc;
27521 @@ -1019,7 +1019,7 @@ int rc_register_device(struct rc_dev *de
27522 if (dev->close)
27523 dev->input_dev->close = ir_close;
27524
27525 - dev->devno = (unsigned long)(atomic_inc_return(&devno) - 1);
27526 + dev->devno = (unsigned long)(atomic_inc_return_unchecked(&devno) - 1);
27527 dev_set_name(&dev->dev, "rc%ld", dev->devno);
27528 dev_set_drvdata(&dev->dev, dev);
27529 rc = device_add(&dev->dev);
27530 diff -urNp linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c
27531 --- linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c 2011-05-19 00:06:34.000000000 -0400
27532 +++ linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c 2011-08-05 19:44:37.000000000 -0400
27533 @@ -61,7 +61,7 @@ static struct pci_device_id cx18_pci_tbl
27534
27535 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
27536
27537 -static atomic_t cx18_instance = ATOMIC_INIT(0);
27538 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
27539
27540 /* Parameter declarations */
27541 static int cardtype[CX18_MAX_CARDS];
27542 @@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
27543 struct i2c_client c;
27544 u8 eedata[256];
27545
27546 + pax_track_stack();
27547 +
27548 memset(&c, 0, sizeof(c));
27549 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
27550 c.adapter = &cx->i2c_adap[0];
27551 @@ -892,7 +894,7 @@ static int __devinit cx18_probe(struct p
27552 struct cx18 *cx;
27553
27554 /* FIXME - module parameter arrays constrain max instances */
27555 - i = atomic_inc_return(&cx18_instance) - 1;
27556 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
27557 if (i >= CX18_MAX_CARDS) {
27558 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
27559 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
27560 diff -urNp linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c
27561 --- linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c 2011-05-19 00:06:34.000000000 -0400
27562 +++ linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c 2011-08-05 19:44:37.000000000 -0400
27563 @@ -53,6 +53,8 @@ static void cx23885_input_process_measur
27564 bool handle = false;
27565 struct ir_raw_event ir_core_event[64];
27566
27567 + pax_track_stack();
27568 +
27569 do {
27570 num = 0;
27571 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
27572 diff -urNp linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c
27573 --- linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c 2011-05-19 00:06:34.000000000 -0400
27574 +++ linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c 2011-08-05 19:44:37.000000000 -0400
27575 @@ -80,7 +80,7 @@ static struct pci_device_id ivtv_pci_tbl
27576 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
27577
27578 /* ivtv instance counter */
27579 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
27580 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
27581
27582 /* Parameter declarations */
27583 static int cardtype[IVTV_MAX_CARDS];
27584 diff -urNp linux-2.6.39.4/drivers/media/video/omap24xxcam.c linux-2.6.39.4/drivers/media/video/omap24xxcam.c
27585 --- linux-2.6.39.4/drivers/media/video/omap24xxcam.c 2011-05-19 00:06:34.000000000 -0400
27586 +++ linux-2.6.39.4/drivers/media/video/omap24xxcam.c 2011-08-05 19:44:37.000000000 -0400
27587 @@ -403,7 +403,7 @@ static void omap24xxcam_vbq_complete(str
27588 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
27589
27590 do_gettimeofday(&vb->ts);
27591 - vb->field_count = atomic_add_return(2, &fh->field_count);
27592 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
27593 if (csr & csr_error) {
27594 vb->state = VIDEOBUF_ERROR;
27595 if (!atomic_read(&fh->cam->in_reset)) {
27596 diff -urNp linux-2.6.39.4/drivers/media/video/omap24xxcam.h linux-2.6.39.4/drivers/media/video/omap24xxcam.h
27597 --- linux-2.6.39.4/drivers/media/video/omap24xxcam.h 2011-05-19 00:06:34.000000000 -0400
27598 +++ linux-2.6.39.4/drivers/media/video/omap24xxcam.h 2011-08-05 19:44:37.000000000 -0400
27599 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
27600 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
27601 struct videobuf_queue vbq;
27602 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
27603 - atomic_t field_count; /* field counter for videobuf_buffer */
27604 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
27605 /* accessing cam here doesn't need serialisation: it's constant */
27606 struct omap24xxcam_device *cam;
27607 };
27608 diff -urNp linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
27609 --- linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-19 00:06:34.000000000 -0400
27610 +++ linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-08-05 19:44:37.000000000 -0400
27611 @@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
27612 u8 *eeprom;
27613 struct tveeprom tvdata;
27614
27615 + pax_track_stack();
27616 +
27617 memset(&tvdata,0,sizeof(tvdata));
27618
27619 eeprom = pvr2_eeprom_fetch(hdw);
27620 diff -urNp linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
27621 --- linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-05-19 00:06:34.000000000 -0400
27622 +++ linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-08-05 20:34:06.000000000 -0400
27623 @@ -196,7 +196,7 @@ struct pvr2_hdw {
27624
27625 /* I2C stuff */
27626 struct i2c_adapter i2c_adap;
27627 - struct i2c_algorithm i2c_algo;
27628 + i2c_algorithm_no_const i2c_algo;
27629 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
27630 int i2c_cx25840_hack_state;
27631 int i2c_linked;
27632 diff -urNp linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c
27633 --- linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c 2011-05-19 00:06:34.000000000 -0400
27634 +++ linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c 2011-08-05 19:44:37.000000000 -0400
27635 @@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
27636 unsigned char localPAT[256];
27637 unsigned char localPMT[256];
27638
27639 + pax_track_stack();
27640 +
27641 /* Set video format - must be done first as it resets other settings */
27642 set_reg8(client, 0x41, h->video_format);
27643
27644 diff -urNp linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c
27645 --- linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-19 00:06:34.000000000 -0400
27646 +++ linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-08-05 19:44:37.000000000 -0400
27647 @@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
27648 u8 tmp[512];
27649 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27650
27651 + pax_track_stack();
27652 +
27653 /* While any outstand message on the bus exists... */
27654 do {
27655
27656 @@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
27657 u8 tmp[512];
27658 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27659
27660 + pax_track_stack();
27661 +
27662 while (loop) {
27663
27664 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
27665 diff -urNp linux-2.6.39.4/drivers/media/video/timblogiw.c linux-2.6.39.4/drivers/media/video/timblogiw.c
27666 --- linux-2.6.39.4/drivers/media/video/timblogiw.c 2011-05-19 00:06:34.000000000 -0400
27667 +++ linux-2.6.39.4/drivers/media/video/timblogiw.c 2011-08-05 20:34:06.000000000 -0400
27668 @@ -746,7 +746,7 @@ static int timblogiw_mmap(struct file *f
27669
27670 /* Platform device functions */
27671
27672 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
27673 +static __devinitdata struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
27674 .vidioc_querycap = timblogiw_querycap,
27675 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
27676 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
27677 @@ -768,7 +768,7 @@ static __devinitconst struct v4l2_ioctl_
27678 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
27679 };
27680
27681 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
27682 +static __devinitdata struct v4l2_file_operations timblogiw_fops = {
27683 .owner = THIS_MODULE,
27684 .open = timblogiw_open,
27685 .release = timblogiw_close,
27686 diff -urNp linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c
27687 --- linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c 2011-05-19 00:06:34.000000000 -0400
27688 +++ linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c 2011-08-05 19:44:37.000000000 -0400
27689 @@ -799,6 +799,8 @@ static enum parse_state usbvision_parse_
27690 unsigned char rv, gv, bv;
27691 static unsigned char *Y, *U, *V;
27692
27693 + pax_track_stack();
27694 +
27695 frame = usbvision->cur_frame;
27696 image_size = frame->frmwidth * frame->frmheight;
27697 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
27698 diff -urNp linux-2.6.39.4/drivers/media/video/v4l2-device.c linux-2.6.39.4/drivers/media/video/v4l2-device.c
27699 --- linux-2.6.39.4/drivers/media/video/v4l2-device.c 2011-05-19 00:06:34.000000000 -0400
27700 +++ linux-2.6.39.4/drivers/media/video/v4l2-device.c 2011-08-05 19:44:37.000000000 -0400
27701 @@ -71,9 +71,9 @@ int v4l2_device_put(struct v4l2_device *
27702 EXPORT_SYMBOL_GPL(v4l2_device_put);
27703
27704 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
27705 - atomic_t *instance)
27706 + atomic_unchecked_t *instance)
27707 {
27708 - int num = atomic_inc_return(instance) - 1;
27709 + int num = atomic_inc_return_unchecked(instance) - 1;
27710 int len = strlen(basename);
27711
27712 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
27713 diff -urNp linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c
27714 --- linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c 2011-05-19 00:06:34.000000000 -0400
27715 +++ linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c 2011-08-05 19:44:37.000000000 -0400
27716 @@ -606,6 +606,8 @@ void *videobuf_sg_alloc(size_t size)
27717 {
27718 struct videobuf_queue q;
27719
27720 + pax_track_stack();
27721 +
27722 /* Required to make generic handler to call __videobuf_alloc */
27723 q.int_ops = &sg_ops;
27724
27725 diff -urNp linux-2.6.39.4/drivers/message/fusion/mptbase.c linux-2.6.39.4/drivers/message/fusion/mptbase.c
27726 --- linux-2.6.39.4/drivers/message/fusion/mptbase.c 2011-05-19 00:06:34.000000000 -0400
27727 +++ linux-2.6.39.4/drivers/message/fusion/mptbase.c 2011-08-05 20:34:06.000000000 -0400
27728 @@ -6683,8 +6683,13 @@ static int mpt_iocinfo_proc_show(struct
27729 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
27730 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
27731
27732 +#ifdef CONFIG_GRKERNSEC_HIDESYM
27733 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
27734 +#else
27735 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
27736 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
27737 +#endif
27738 +
27739 /*
27740 * Rounding UP to nearest 4-kB boundary here...
27741 */
27742 diff -urNp linux-2.6.39.4/drivers/message/fusion/mptsas.c linux-2.6.39.4/drivers/message/fusion/mptsas.c
27743 --- linux-2.6.39.4/drivers/message/fusion/mptsas.c 2011-05-19 00:06:34.000000000 -0400
27744 +++ linux-2.6.39.4/drivers/message/fusion/mptsas.c 2011-08-05 19:44:37.000000000 -0400
27745 @@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
27746 return 0;
27747 }
27748
27749 +static inline void
27750 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27751 +{
27752 + if (phy_info->port_details) {
27753 + phy_info->port_details->rphy = rphy;
27754 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27755 + ioc->name, rphy));
27756 + }
27757 +
27758 + if (rphy) {
27759 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27760 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27761 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27762 + ioc->name, rphy, rphy->dev.release));
27763 + }
27764 +}
27765 +
27766 /* no mutex */
27767 static void
27768 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
27769 @@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
27770 return NULL;
27771 }
27772
27773 -static inline void
27774 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27775 -{
27776 - if (phy_info->port_details) {
27777 - phy_info->port_details->rphy = rphy;
27778 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27779 - ioc->name, rphy));
27780 - }
27781 -
27782 - if (rphy) {
27783 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27784 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27785 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27786 - ioc->name, rphy, rphy->dev.release));
27787 - }
27788 -}
27789 -
27790 static inline struct sas_port *
27791 mptsas_get_port(struct mptsas_phyinfo *phy_info)
27792 {
27793 diff -urNp linux-2.6.39.4/drivers/message/fusion/mptscsih.c linux-2.6.39.4/drivers/message/fusion/mptscsih.c
27794 --- linux-2.6.39.4/drivers/message/fusion/mptscsih.c 2011-05-19 00:06:34.000000000 -0400
27795 +++ linux-2.6.39.4/drivers/message/fusion/mptscsih.c 2011-08-05 19:44:37.000000000 -0400
27796 @@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
27797
27798 h = shost_priv(SChost);
27799
27800 - if (h) {
27801 - if (h->info_kbuf == NULL)
27802 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27803 - return h->info_kbuf;
27804 - h->info_kbuf[0] = '\0';
27805 + if (!h)
27806 + return NULL;
27807
27808 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27809 - h->info_kbuf[size-1] = '\0';
27810 - }
27811 + if (h->info_kbuf == NULL)
27812 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27813 + return h->info_kbuf;
27814 + h->info_kbuf[0] = '\0';
27815 +
27816 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27817 + h->info_kbuf[size-1] = '\0';
27818
27819 return h->info_kbuf;
27820 }
27821 diff -urNp linux-2.6.39.4/drivers/message/i2o/i2o_config.c linux-2.6.39.4/drivers/message/i2o/i2o_config.c
27822 --- linux-2.6.39.4/drivers/message/i2o/i2o_config.c 2011-05-19 00:06:34.000000000 -0400
27823 +++ linux-2.6.39.4/drivers/message/i2o/i2o_config.c 2011-08-05 19:44:37.000000000 -0400
27824 @@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
27825 struct i2o_message *msg;
27826 unsigned int iop;
27827
27828 + pax_track_stack();
27829 +
27830 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
27831 return -EFAULT;
27832
27833 diff -urNp linux-2.6.39.4/drivers/message/i2o/i2o_proc.c linux-2.6.39.4/drivers/message/i2o/i2o_proc.c
27834 --- linux-2.6.39.4/drivers/message/i2o/i2o_proc.c 2011-05-19 00:06:34.000000000 -0400
27835 +++ linux-2.6.39.4/drivers/message/i2o/i2o_proc.c 2011-08-05 19:44:37.000000000 -0400
27836 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
27837 "Array Controller Device"
27838 };
27839
27840 -static char *chtostr(u8 * chars, int n)
27841 -{
27842 - char tmp[256];
27843 - tmp[0] = 0;
27844 - return strncat(tmp, (char *)chars, n);
27845 -}
27846 -
27847 static int i2o_report_query_status(struct seq_file *seq, int block_status,
27848 char *group)
27849 {
27850 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
27851
27852 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
27853 seq_printf(seq, "%-#8x", ddm_table.module_id);
27854 - seq_printf(seq, "%-29s",
27855 - chtostr(ddm_table.module_name_version, 28));
27856 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
27857 seq_printf(seq, "%9d ", ddm_table.data_size);
27858 seq_printf(seq, "%8d", ddm_table.code_size);
27859
27860 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
27861
27862 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
27863 seq_printf(seq, "%-#8x", dst->module_id);
27864 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
27865 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
27866 + seq_printf(seq, "%-.28s", dst->module_name_version);
27867 + seq_printf(seq, "%-.8s", dst->date);
27868 seq_printf(seq, "%8d ", dst->module_size);
27869 seq_printf(seq, "%8d ", dst->mpb_size);
27870 seq_printf(seq, "0x%04x", dst->module_flags);
27871 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
27872 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
27873 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
27874 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
27875 - seq_printf(seq, "Vendor info : %s\n",
27876 - chtostr((u8 *) (work32 + 2), 16));
27877 - seq_printf(seq, "Product info : %s\n",
27878 - chtostr((u8 *) (work32 + 6), 16));
27879 - seq_printf(seq, "Description : %s\n",
27880 - chtostr((u8 *) (work32 + 10), 16));
27881 - seq_printf(seq, "Product rev. : %s\n",
27882 - chtostr((u8 *) (work32 + 14), 8));
27883 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
27884 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
27885 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
27886 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
27887
27888 seq_printf(seq, "Serial number : ");
27889 print_serial_number(seq, (u8 *) (work32 + 16),
27890 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
27891 }
27892
27893 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
27894 - seq_printf(seq, "Module name : %s\n",
27895 - chtostr(result.module_name, 24));
27896 - seq_printf(seq, "Module revision : %s\n",
27897 - chtostr(result.module_rev, 8));
27898 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
27899 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
27900
27901 seq_printf(seq, "Serial number : ");
27902 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
27903 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
27904 return 0;
27905 }
27906
27907 - seq_printf(seq, "Device name : %s\n",
27908 - chtostr(result.device_name, 64));
27909 - seq_printf(seq, "Service name : %s\n",
27910 - chtostr(result.service_name, 64));
27911 - seq_printf(seq, "Physical name : %s\n",
27912 - chtostr(result.physical_location, 64));
27913 - seq_printf(seq, "Instance number : %s\n",
27914 - chtostr(result.instance_number, 4));
27915 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
27916 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
27917 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
27918 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
27919
27920 return 0;
27921 }
27922 diff -urNp linux-2.6.39.4/drivers/message/i2o/iop.c linux-2.6.39.4/drivers/message/i2o/iop.c
27923 --- linux-2.6.39.4/drivers/message/i2o/iop.c 2011-05-19 00:06:34.000000000 -0400
27924 +++ linux-2.6.39.4/drivers/message/i2o/iop.c 2011-08-05 19:44:37.000000000 -0400
27925 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
27926
27927 spin_lock_irqsave(&c->context_list_lock, flags);
27928
27929 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
27930 - atomic_inc(&c->context_list_counter);
27931 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
27932 + atomic_inc_unchecked(&c->context_list_counter);
27933
27934 - entry->context = atomic_read(&c->context_list_counter);
27935 + entry->context = atomic_read_unchecked(&c->context_list_counter);
27936
27937 list_add(&entry->list, &c->context_list);
27938
27939 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
27940
27941 #if BITS_PER_LONG == 64
27942 spin_lock_init(&c->context_list_lock);
27943 - atomic_set(&c->context_list_counter, 0);
27944 + atomic_set_unchecked(&c->context_list_counter, 0);
27945 INIT_LIST_HEAD(&c->context_list);
27946 #endif
27947
27948 diff -urNp linux-2.6.39.4/drivers/mfd/abx500-core.c linux-2.6.39.4/drivers/mfd/abx500-core.c
27949 --- linux-2.6.39.4/drivers/mfd/abx500-core.c 2011-05-19 00:06:34.000000000 -0400
27950 +++ linux-2.6.39.4/drivers/mfd/abx500-core.c 2011-08-05 20:34:06.000000000 -0400
27951 @@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
27952
27953 struct abx500_device_entry {
27954 struct list_head list;
27955 - struct abx500_ops ops;
27956 + abx500_ops_no_const ops;
27957 struct device *dev;
27958 };
27959
27960 diff -urNp linux-2.6.39.4/drivers/mfd/janz-cmodio.c linux-2.6.39.4/drivers/mfd/janz-cmodio.c
27961 --- linux-2.6.39.4/drivers/mfd/janz-cmodio.c 2011-05-19 00:06:34.000000000 -0400
27962 +++ linux-2.6.39.4/drivers/mfd/janz-cmodio.c 2011-08-05 19:44:37.000000000 -0400
27963 @@ -13,6 +13,7 @@
27964
27965 #include <linux/kernel.h>
27966 #include <linux/module.h>
27967 +#include <linux/slab.h>
27968 #include <linux/init.h>
27969 #include <linux/pci.h>
27970 #include <linux/interrupt.h>
27971 diff -urNp linux-2.6.39.4/drivers/mfd/wm8350-i2c.c linux-2.6.39.4/drivers/mfd/wm8350-i2c.c
27972 --- linux-2.6.39.4/drivers/mfd/wm8350-i2c.c 2011-05-19 00:06:34.000000000 -0400
27973 +++ linux-2.6.39.4/drivers/mfd/wm8350-i2c.c 2011-08-05 19:44:37.000000000 -0400
27974 @@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
27975 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
27976 int ret;
27977
27978 + pax_track_stack();
27979 +
27980 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
27981 return -EINVAL;
27982
27983 diff -urNp linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c
27984 --- linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-05-19 00:06:34.000000000 -0400
27985 +++ linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-08-05 19:44:37.000000000 -0400
27986 @@ -435,7 +435,7 @@ static irqreturn_t lis302dl_interrupt(in
27987 * the lid is closed. This leads to interrupts as soon as a little move
27988 * is done.
27989 */
27990 - atomic_inc(&lis3_dev.count);
27991 + atomic_inc_unchecked(&lis3_dev.count);
27992
27993 wake_up_interruptible(&lis3_dev.misc_wait);
27994 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
27995 @@ -518,7 +518,7 @@ static int lis3lv02d_misc_open(struct in
27996 if (lis3_dev.pm_dev)
27997 pm_runtime_get_sync(lis3_dev.pm_dev);
27998
27999 - atomic_set(&lis3_dev.count, 0);
28000 + atomic_set_unchecked(&lis3_dev.count, 0);
28001 return 0;
28002 }
28003
28004 @@ -545,7 +545,7 @@ static ssize_t lis3lv02d_misc_read(struc
28005 add_wait_queue(&lis3_dev.misc_wait, &wait);
28006 while (true) {
28007 set_current_state(TASK_INTERRUPTIBLE);
28008 - data = atomic_xchg(&lis3_dev.count, 0);
28009 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
28010 if (data)
28011 break;
28012
28013 @@ -583,7 +583,7 @@ out:
28014 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
28015 {
28016 poll_wait(file, &lis3_dev.misc_wait, wait);
28017 - if (atomic_read(&lis3_dev.count))
28018 + if (atomic_read_unchecked(&lis3_dev.count))
28019 return POLLIN | POLLRDNORM;
28020 return 0;
28021 }
28022 diff -urNp linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h
28023 --- linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-05-19 00:06:34.000000000 -0400
28024 +++ linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-08-05 19:44:37.000000000 -0400
28025 @@ -265,7 +265,7 @@ struct lis3lv02d {
28026 struct input_polled_dev *idev; /* input device */
28027 struct platform_device *pdev; /* platform device */
28028 struct regulator_bulk_data regulators[2];
28029 - atomic_t count; /* interrupt count after last read */
28030 + atomic_unchecked_t count; /* interrupt count after last read */
28031 union axis_conversion ac; /* hw -> logical axis */
28032 int mapped_btns[3];
28033
28034 diff -urNp linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c
28035 --- linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c 2011-05-19 00:06:34.000000000 -0400
28036 +++ linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c 2011-08-05 19:44:37.000000000 -0400
28037 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
28038 unsigned long nsec;
28039
28040 nsec = CLKS2NSEC(clks);
28041 - atomic_long_inc(&mcs_op_statistics[op].count);
28042 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
28043 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
28044 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
28045 if (mcs_op_statistics[op].max < nsec)
28046 mcs_op_statistics[op].max = nsec;
28047 }
28048 diff -urNp linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c
28049 --- linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c 2011-05-19 00:06:34.000000000 -0400
28050 +++ linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c 2011-08-05 19:44:37.000000000 -0400
28051 @@ -32,9 +32,9 @@
28052
28053 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
28054
28055 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
28056 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
28057 {
28058 - unsigned long val = atomic_long_read(v);
28059 + unsigned long val = atomic_long_read_unchecked(v);
28060
28061 seq_printf(s, "%16lu %s\n", val, id);
28062 }
28063 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
28064
28065 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
28066 for (op = 0; op < mcsop_last; op++) {
28067 - count = atomic_long_read(&mcs_op_statistics[op].count);
28068 - total = atomic_long_read(&mcs_op_statistics[op].total);
28069 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
28070 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
28071 max = mcs_op_statistics[op].max;
28072 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
28073 count ? total / count : 0, max);
28074 diff -urNp linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h
28075 --- linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h 2011-05-19 00:06:34.000000000 -0400
28076 +++ linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h 2011-08-05 19:44:37.000000000 -0400
28077 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
28078 * GRU statistics.
28079 */
28080 struct gru_stats_s {
28081 - atomic_long_t vdata_alloc;
28082 - atomic_long_t vdata_free;
28083 - atomic_long_t gts_alloc;
28084 - atomic_long_t gts_free;
28085 - atomic_long_t gms_alloc;
28086 - atomic_long_t gms_free;
28087 - atomic_long_t gts_double_allocate;
28088 - atomic_long_t assign_context;
28089 - atomic_long_t assign_context_failed;
28090 - atomic_long_t free_context;
28091 - atomic_long_t load_user_context;
28092 - atomic_long_t load_kernel_context;
28093 - atomic_long_t lock_kernel_context;
28094 - atomic_long_t unlock_kernel_context;
28095 - atomic_long_t steal_user_context;
28096 - atomic_long_t steal_kernel_context;
28097 - atomic_long_t steal_context_failed;
28098 - atomic_long_t nopfn;
28099 - atomic_long_t asid_new;
28100 - atomic_long_t asid_next;
28101 - atomic_long_t asid_wrap;
28102 - atomic_long_t asid_reuse;
28103 - atomic_long_t intr;
28104 - atomic_long_t intr_cbr;
28105 - atomic_long_t intr_tfh;
28106 - atomic_long_t intr_spurious;
28107 - atomic_long_t intr_mm_lock_failed;
28108 - atomic_long_t call_os;
28109 - atomic_long_t call_os_wait_queue;
28110 - atomic_long_t user_flush_tlb;
28111 - atomic_long_t user_unload_context;
28112 - atomic_long_t user_exception;
28113 - atomic_long_t set_context_option;
28114 - atomic_long_t check_context_retarget_intr;
28115 - atomic_long_t check_context_unload;
28116 - atomic_long_t tlb_dropin;
28117 - atomic_long_t tlb_preload_page;
28118 - atomic_long_t tlb_dropin_fail_no_asid;
28119 - atomic_long_t tlb_dropin_fail_upm;
28120 - atomic_long_t tlb_dropin_fail_invalid;
28121 - atomic_long_t tlb_dropin_fail_range_active;
28122 - atomic_long_t tlb_dropin_fail_idle;
28123 - atomic_long_t tlb_dropin_fail_fmm;
28124 - atomic_long_t tlb_dropin_fail_no_exception;
28125 - atomic_long_t tfh_stale_on_fault;
28126 - atomic_long_t mmu_invalidate_range;
28127 - atomic_long_t mmu_invalidate_page;
28128 - atomic_long_t flush_tlb;
28129 - atomic_long_t flush_tlb_gru;
28130 - atomic_long_t flush_tlb_gru_tgh;
28131 - atomic_long_t flush_tlb_gru_zero_asid;
28132 -
28133 - atomic_long_t copy_gpa;
28134 - atomic_long_t read_gpa;
28135 -
28136 - atomic_long_t mesq_receive;
28137 - atomic_long_t mesq_receive_none;
28138 - atomic_long_t mesq_send;
28139 - atomic_long_t mesq_send_failed;
28140 - atomic_long_t mesq_noop;
28141 - atomic_long_t mesq_send_unexpected_error;
28142 - atomic_long_t mesq_send_lb_overflow;
28143 - atomic_long_t mesq_send_qlimit_reached;
28144 - atomic_long_t mesq_send_amo_nacked;
28145 - atomic_long_t mesq_send_put_nacked;
28146 - atomic_long_t mesq_page_overflow;
28147 - atomic_long_t mesq_qf_locked;
28148 - atomic_long_t mesq_qf_noop_not_full;
28149 - atomic_long_t mesq_qf_switch_head_failed;
28150 - atomic_long_t mesq_qf_unexpected_error;
28151 - atomic_long_t mesq_noop_unexpected_error;
28152 - atomic_long_t mesq_noop_lb_overflow;
28153 - atomic_long_t mesq_noop_qlimit_reached;
28154 - atomic_long_t mesq_noop_amo_nacked;
28155 - atomic_long_t mesq_noop_put_nacked;
28156 - atomic_long_t mesq_noop_page_overflow;
28157 + atomic_long_unchecked_t vdata_alloc;
28158 + atomic_long_unchecked_t vdata_free;
28159 + atomic_long_unchecked_t gts_alloc;
28160 + atomic_long_unchecked_t gts_free;
28161 + atomic_long_unchecked_t gms_alloc;
28162 + atomic_long_unchecked_t gms_free;
28163 + atomic_long_unchecked_t gts_double_allocate;
28164 + atomic_long_unchecked_t assign_context;
28165 + atomic_long_unchecked_t assign_context_failed;
28166 + atomic_long_unchecked_t free_context;
28167 + atomic_long_unchecked_t load_user_context;
28168 + atomic_long_unchecked_t load_kernel_context;
28169 + atomic_long_unchecked_t lock_kernel_context;
28170 + atomic_long_unchecked_t unlock_kernel_context;
28171 + atomic_long_unchecked_t steal_user_context;
28172 + atomic_long_unchecked_t steal_kernel_context;
28173 + atomic_long_unchecked_t steal_context_failed;
28174 + atomic_long_unchecked_t nopfn;
28175 + atomic_long_unchecked_t asid_new;
28176 + atomic_long_unchecked_t asid_next;
28177 + atomic_long_unchecked_t asid_wrap;
28178 + atomic_long_unchecked_t asid_reuse;
28179 + atomic_long_unchecked_t intr;
28180 + atomic_long_unchecked_t intr_cbr;
28181 + atomic_long_unchecked_t intr_tfh;
28182 + atomic_long_unchecked_t intr_spurious;
28183 + atomic_long_unchecked_t intr_mm_lock_failed;
28184 + atomic_long_unchecked_t call_os;
28185 + atomic_long_unchecked_t call_os_wait_queue;
28186 + atomic_long_unchecked_t user_flush_tlb;
28187 + atomic_long_unchecked_t user_unload_context;
28188 + atomic_long_unchecked_t user_exception;
28189 + atomic_long_unchecked_t set_context_option;
28190 + atomic_long_unchecked_t check_context_retarget_intr;
28191 + atomic_long_unchecked_t check_context_unload;
28192 + atomic_long_unchecked_t tlb_dropin;
28193 + atomic_long_unchecked_t tlb_preload_page;
28194 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
28195 + atomic_long_unchecked_t tlb_dropin_fail_upm;
28196 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
28197 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
28198 + atomic_long_unchecked_t tlb_dropin_fail_idle;
28199 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
28200 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
28201 + atomic_long_unchecked_t tfh_stale_on_fault;
28202 + atomic_long_unchecked_t mmu_invalidate_range;
28203 + atomic_long_unchecked_t mmu_invalidate_page;
28204 + atomic_long_unchecked_t flush_tlb;
28205 + atomic_long_unchecked_t flush_tlb_gru;
28206 + atomic_long_unchecked_t flush_tlb_gru_tgh;
28207 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
28208 +
28209 + atomic_long_unchecked_t copy_gpa;
28210 + atomic_long_unchecked_t read_gpa;
28211 +
28212 + atomic_long_unchecked_t mesq_receive;
28213 + atomic_long_unchecked_t mesq_receive_none;
28214 + atomic_long_unchecked_t mesq_send;
28215 + atomic_long_unchecked_t mesq_send_failed;
28216 + atomic_long_unchecked_t mesq_noop;
28217 + atomic_long_unchecked_t mesq_send_unexpected_error;
28218 + atomic_long_unchecked_t mesq_send_lb_overflow;
28219 + atomic_long_unchecked_t mesq_send_qlimit_reached;
28220 + atomic_long_unchecked_t mesq_send_amo_nacked;
28221 + atomic_long_unchecked_t mesq_send_put_nacked;
28222 + atomic_long_unchecked_t mesq_page_overflow;
28223 + atomic_long_unchecked_t mesq_qf_locked;
28224 + atomic_long_unchecked_t mesq_qf_noop_not_full;
28225 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
28226 + atomic_long_unchecked_t mesq_qf_unexpected_error;
28227 + atomic_long_unchecked_t mesq_noop_unexpected_error;
28228 + atomic_long_unchecked_t mesq_noop_lb_overflow;
28229 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
28230 + atomic_long_unchecked_t mesq_noop_amo_nacked;
28231 + atomic_long_unchecked_t mesq_noop_put_nacked;
28232 + atomic_long_unchecked_t mesq_noop_page_overflow;
28233
28234 };
28235
28236 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
28237 tghop_invalidate, mcsop_last};
28238
28239 struct mcs_op_statistic {
28240 - atomic_long_t count;
28241 - atomic_long_t total;
28242 + atomic_long_unchecked_t count;
28243 + atomic_long_unchecked_t total;
28244 unsigned long max;
28245 };
28246
28247 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
28248
28249 #define STAT(id) do { \
28250 if (gru_options & OPT_STATS) \
28251 - atomic_long_inc(&gru_stats.id); \
28252 + atomic_long_inc_unchecked(&gru_stats.id); \
28253 } while (0)
28254
28255 #ifdef CONFIG_SGI_GRU_DEBUG
28256 diff -urNp linux-2.6.39.4/drivers/misc/sgi-xp/xp.h linux-2.6.39.4/drivers/misc/sgi-xp/xp.h
28257 --- linux-2.6.39.4/drivers/misc/sgi-xp/xp.h 2011-05-19 00:06:34.000000000 -0400
28258 +++ linux-2.6.39.4/drivers/misc/sgi-xp/xp.h 2011-08-05 20:34:06.000000000 -0400
28259 @@ -289,7 +289,7 @@ struct xpc_interface {
28260 xpc_notify_func, void *);
28261 void (*received) (short, int, void *);
28262 enum xp_retval (*partid_to_nasids) (short, void *);
28263 -};
28264 +} __no_const;
28265
28266 extern struct xpc_interface xpc_interface;
28267
28268 diff -urNp linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c
28269 --- linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-19 00:06:34.000000000 -0400
28270 +++ linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-08-05 19:44:37.000000000 -0400
28271 @@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
28272 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
28273 unsigned long timeo = jiffies + HZ;
28274
28275 + pax_track_stack();
28276 +
28277 /* Prevent setting state FL_SYNCING for chip in suspended state. */
28278 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
28279 goto sleep;
28280 @@ -1657,6 +1659,8 @@ static int __xipram do_write_buffer(stru
28281 unsigned long initial_adr;
28282 int initial_len = len;
28283
28284 + pax_track_stack();
28285 +
28286 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
28287 adr += chip->start;
28288 initial_adr = adr;
28289 @@ -1875,6 +1879,8 @@ static int __xipram do_erase_oneblock(st
28290 int retries = 3;
28291 int ret;
28292
28293 + pax_track_stack();
28294 +
28295 adr += chip->start;
28296
28297 retry:
28298 diff -urNp linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c
28299 --- linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-19 00:06:34.000000000 -0400
28300 +++ linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-08-05 19:44:37.000000000 -0400
28301 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
28302 unsigned long cmd_addr;
28303 struct cfi_private *cfi = map->fldrv_priv;
28304
28305 + pax_track_stack();
28306 +
28307 adr += chip->start;
28308
28309 /* Ensure cmd read/writes are aligned. */
28310 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
28311 DECLARE_WAITQUEUE(wait, current);
28312 int wbufsize, z;
28313
28314 + pax_track_stack();
28315 +
28316 /* M58LW064A requires bus alignment for buffer wriets -- saw */
28317 if (adr & (map_bankwidth(map)-1))
28318 return -EINVAL;
28319 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
28320 DECLARE_WAITQUEUE(wait, current);
28321 int ret = 0;
28322
28323 + pax_track_stack();
28324 +
28325 adr += chip->start;
28326
28327 /* Let's determine this according to the interleave only once */
28328 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
28329 unsigned long timeo = jiffies + HZ;
28330 DECLARE_WAITQUEUE(wait, current);
28331
28332 + pax_track_stack();
28333 +
28334 adr += chip->start;
28335
28336 /* Let's determine this according to the interleave only once */
28337 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
28338 unsigned long timeo = jiffies + HZ;
28339 DECLARE_WAITQUEUE(wait, current);
28340
28341 + pax_track_stack();
28342 +
28343 adr += chip->start;
28344
28345 /* Let's determine this according to the interleave only once */
28346 diff -urNp linux-2.6.39.4/drivers/mtd/devices/doc2000.c linux-2.6.39.4/drivers/mtd/devices/doc2000.c
28347 --- linux-2.6.39.4/drivers/mtd/devices/doc2000.c 2011-05-19 00:06:34.000000000 -0400
28348 +++ linux-2.6.39.4/drivers/mtd/devices/doc2000.c 2011-08-05 19:44:37.000000000 -0400
28349 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
28350
28351 /* The ECC will not be calculated correctly if less than 512 is written */
28352 /* DBB-
28353 - if (len != 0x200 && eccbuf)
28354 + if (len != 0x200)
28355 printk(KERN_WARNING
28356 "ECC needs a full sector write (adr: %lx size %lx)\n",
28357 (long) to, (long) len);
28358 diff -urNp linux-2.6.39.4/drivers/mtd/devices/doc2001.c linux-2.6.39.4/drivers/mtd/devices/doc2001.c
28359 --- linux-2.6.39.4/drivers/mtd/devices/doc2001.c 2011-05-19 00:06:34.000000000 -0400
28360 +++ linux-2.6.39.4/drivers/mtd/devices/doc2001.c 2011-08-05 19:44:37.000000000 -0400
28361 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
28362 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
28363
28364 /* Don't allow read past end of device */
28365 - if (from >= this->totlen)
28366 + if (from >= this->totlen || !len)
28367 return -EINVAL;
28368
28369 /* Don't allow a single read to cross a 512-byte block boundary */
28370 diff -urNp linux-2.6.39.4/drivers/mtd/ftl.c linux-2.6.39.4/drivers/mtd/ftl.c
28371 --- linux-2.6.39.4/drivers/mtd/ftl.c 2011-05-19 00:06:34.000000000 -0400
28372 +++ linux-2.6.39.4/drivers/mtd/ftl.c 2011-08-05 19:44:37.000000000 -0400
28373 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
28374 loff_t offset;
28375 uint16_t srcunitswap = cpu_to_le16(srcunit);
28376
28377 + pax_track_stack();
28378 +
28379 eun = &part->EUNInfo[srcunit];
28380 xfer = &part->XferInfo[xferunit];
28381 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
28382 diff -urNp linux-2.6.39.4/drivers/mtd/inftlcore.c linux-2.6.39.4/drivers/mtd/inftlcore.c
28383 --- linux-2.6.39.4/drivers/mtd/inftlcore.c 2011-05-19 00:06:34.000000000 -0400
28384 +++ linux-2.6.39.4/drivers/mtd/inftlcore.c 2011-08-05 19:44:37.000000000 -0400
28385 @@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
28386 struct inftl_oob oob;
28387 size_t retlen;
28388
28389 + pax_track_stack();
28390 +
28391 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
28392 "pending=%d)\n", inftl, thisVUC, pendingblock);
28393
28394 diff -urNp linux-2.6.39.4/drivers/mtd/inftlmount.c linux-2.6.39.4/drivers/mtd/inftlmount.c
28395 --- linux-2.6.39.4/drivers/mtd/inftlmount.c 2011-05-19 00:06:34.000000000 -0400
28396 +++ linux-2.6.39.4/drivers/mtd/inftlmount.c 2011-08-05 19:44:37.000000000 -0400
28397 @@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
28398 struct INFTLPartition *ip;
28399 size_t retlen;
28400
28401 + pax_track_stack();
28402 +
28403 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
28404
28405 /*
28406 diff -urNp linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c
28407 --- linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c 2011-05-19 00:06:34.000000000 -0400
28408 +++ linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c 2011-08-05 19:44:37.000000000 -0400
28409 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
28410 {
28411 map_word pfow_val[4];
28412
28413 + pax_track_stack();
28414 +
28415 /* Check identification string */
28416 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
28417 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
28418 diff -urNp linux-2.6.39.4/drivers/mtd/mtdchar.c linux-2.6.39.4/drivers/mtd/mtdchar.c
28419 --- linux-2.6.39.4/drivers/mtd/mtdchar.c 2011-05-19 00:06:34.000000000 -0400
28420 +++ linux-2.6.39.4/drivers/mtd/mtdchar.c 2011-08-05 19:44:37.000000000 -0400
28421 @@ -560,6 +560,8 @@ static int mtd_ioctl(struct file *file,
28422 u_long size;
28423 struct mtd_info_user info;
28424
28425 + pax_track_stack();
28426 +
28427 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
28428
28429 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
28430 diff -urNp linux-2.6.39.4/drivers/mtd/nand/denali.c linux-2.6.39.4/drivers/mtd/nand/denali.c
28431 --- linux-2.6.39.4/drivers/mtd/nand/denali.c 2011-05-19 00:06:34.000000000 -0400
28432 +++ linux-2.6.39.4/drivers/mtd/nand/denali.c 2011-08-05 19:44:37.000000000 -0400
28433 @@ -25,6 +25,7 @@
28434 #include <linux/pci.h>
28435 #include <linux/mtd/mtd.h>
28436 #include <linux/module.h>
28437 +#include <linux/slab.h>
28438
28439 #include "denali.h"
28440
28441 diff -urNp linux-2.6.39.4/drivers/mtd/nftlcore.c linux-2.6.39.4/drivers/mtd/nftlcore.c
28442 --- linux-2.6.39.4/drivers/mtd/nftlcore.c 2011-05-19 00:06:34.000000000 -0400
28443 +++ linux-2.6.39.4/drivers/mtd/nftlcore.c 2011-08-05 19:44:37.000000000 -0400
28444 @@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
28445 int inplace = 1;
28446 size_t retlen;
28447
28448 + pax_track_stack();
28449 +
28450 memset(BlockMap, 0xff, sizeof(BlockMap));
28451 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
28452
28453 diff -urNp linux-2.6.39.4/drivers/mtd/nftlmount.c linux-2.6.39.4/drivers/mtd/nftlmount.c
28454 --- linux-2.6.39.4/drivers/mtd/nftlmount.c 2011-05-19 00:06:34.000000000 -0400
28455 +++ linux-2.6.39.4/drivers/mtd/nftlmount.c 2011-08-05 19:44:37.000000000 -0400
28456 @@ -24,6 +24,7 @@
28457 #include <asm/errno.h>
28458 #include <linux/delay.h>
28459 #include <linux/slab.h>
28460 +#include <linux/sched.h>
28461 #include <linux/mtd/mtd.h>
28462 #include <linux/mtd/nand.h>
28463 #include <linux/mtd/nftl.h>
28464 @@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
28465 struct mtd_info *mtd = nftl->mbd.mtd;
28466 unsigned int i;
28467
28468 + pax_track_stack();
28469 +
28470 /* Assume logical EraseSize == physical erasesize for starting the scan.
28471 We'll sort it out later if we find a MediaHeader which says otherwise */
28472 /* Actually, we won't. The new DiskOnChip driver has already scanned
28473 diff -urNp linux-2.6.39.4/drivers/mtd/ubi/build.c linux-2.6.39.4/drivers/mtd/ubi/build.c
28474 --- linux-2.6.39.4/drivers/mtd/ubi/build.c 2011-05-19 00:06:34.000000000 -0400
28475 +++ linux-2.6.39.4/drivers/mtd/ubi/build.c 2011-08-05 19:44:37.000000000 -0400
28476 @@ -1287,7 +1287,7 @@ module_exit(ubi_exit);
28477 static int __init bytes_str_to_int(const char *str)
28478 {
28479 char *endp;
28480 - unsigned long result;
28481 + unsigned long result, scale = 1;
28482
28483 result = simple_strtoul(str, &endp, 0);
28484 if (str == endp || result >= INT_MAX) {
28485 @@ -1298,11 +1298,11 @@ static int __init bytes_str_to_int(const
28486
28487 switch (*endp) {
28488 case 'G':
28489 - result *= 1024;
28490 + scale *= 1024;
28491 case 'M':
28492 - result *= 1024;
28493 + scale *= 1024;
28494 case 'K':
28495 - result *= 1024;
28496 + scale *= 1024;
28497 if (endp[1] == 'i' && endp[2] == 'B')
28498 endp += 2;
28499 case '\0':
28500 @@ -1313,7 +1313,13 @@ static int __init bytes_str_to_int(const
28501 return -EINVAL;
28502 }
28503
28504 - return result;
28505 + if ((intoverflow_t)result*scale >= INT_MAX) {
28506 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
28507 + str);
28508 + return -EINVAL;
28509 + }
28510 +
28511 + return result*scale;
28512 }
28513
28514 /**
28515 diff -urNp linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c
28516 --- linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c 2011-05-19 00:06:34.000000000 -0400
28517 +++ linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c 2011-08-05 20:34:06.000000000 -0400
28518 @@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
28519 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
28520 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
28521
28522 -static struct bfa_ioc_hwif nw_hwif_ct;
28523 +static struct bfa_ioc_hwif nw_hwif_ct = {
28524 + .ioc_pll_init = bfa_ioc_ct_pll_init,
28525 + .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
28526 + .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
28527 + .ioc_reg_init = bfa_ioc_ct_reg_init,
28528 + .ioc_map_port = bfa_ioc_ct_map_port,
28529 + .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
28530 + .ioc_notify_fail = bfa_ioc_ct_notify_fail,
28531 + .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
28532 + .ioc_sync_start = bfa_ioc_ct_sync_start,
28533 + .ioc_sync_join = bfa_ioc_ct_sync_join,
28534 + .ioc_sync_leave = bfa_ioc_ct_sync_leave,
28535 + .ioc_sync_ack = bfa_ioc_ct_sync_ack,
28536 + .ioc_sync_complete = bfa_ioc_ct_sync_complete
28537 +};
28538
28539 /**
28540 * Called from bfa_ioc_attach() to map asic specific calls.
28541 @@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
28542 void
28543 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
28544 {
28545 - nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
28546 - nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
28547 - nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
28548 - nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
28549 - nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
28550 - nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
28551 - nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
28552 - nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
28553 - nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
28554 - nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
28555 - nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
28556 - nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
28557 - nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
28558 -
28559 ioc->ioc_hwif = &nw_hwif_ct;
28560 }
28561
28562 diff -urNp linux-2.6.39.4/drivers/net/bna/bnad.c linux-2.6.39.4/drivers/net/bna/bnad.c
28563 --- linux-2.6.39.4/drivers/net/bna/bnad.c 2011-05-19 00:06:34.000000000 -0400
28564 +++ linux-2.6.39.4/drivers/net/bna/bnad.c 2011-08-05 20:34:06.000000000 -0400
28565 @@ -1681,7 +1681,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28566 struct bna_intr_info *intr_info =
28567 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
28568 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
28569 - struct bna_tx_event_cbfn tx_cbfn;
28570 + static struct bna_tx_event_cbfn tx_cbfn = {
28571 + /* Initialize the tx event handlers */
28572 + .tcb_setup_cbfn = bnad_cb_tcb_setup,
28573 + .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
28574 + .tx_stall_cbfn = bnad_cb_tx_stall,
28575 + .tx_resume_cbfn = bnad_cb_tx_resume,
28576 + .tx_cleanup_cbfn = bnad_cb_tx_cleanup
28577 + };
28578 struct bna_tx *tx;
28579 unsigned long flags;
28580
28581 @@ -1690,13 +1697,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28582 tx_config->txq_depth = bnad->txq_depth;
28583 tx_config->tx_type = BNA_TX_T_REGULAR;
28584
28585 - /* Initialize the tx event handlers */
28586 - tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
28587 - tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
28588 - tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
28589 - tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
28590 - tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
28591 -
28592 /* Get BNA's resource requirement for one tx object */
28593 spin_lock_irqsave(&bnad->bna_lock, flags);
28594 bna_tx_res_req(bnad->num_txq_per_tx,
28595 @@ -1827,21 +1827,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
28596 struct bna_intr_info *intr_info =
28597 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
28598 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
28599 - struct bna_rx_event_cbfn rx_cbfn;
28600 + static struct bna_rx_event_cbfn rx_cbfn = {
28601 + /* Initialize the Rx event handlers */
28602 + .rcb_setup_cbfn = bnad_cb_rcb_setup,
28603 + .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
28604 + .ccb_setup_cbfn = bnad_cb_ccb_setup,
28605 + .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
28606 + .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
28607 + .rx_post_cbfn = bnad_cb_rx_post
28608 + };
28609 struct bna_rx *rx;
28610 unsigned long flags;
28611
28612 /* Initialize the Rx object configuration */
28613 bnad_init_rx_config(bnad, rx_config);
28614
28615 - /* Initialize the Rx event handlers */
28616 - rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
28617 - rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
28618 - rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
28619 - rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
28620 - rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
28621 - rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
28622 -
28623 /* Get BNA's resource requirement for one Rx object */
28624 spin_lock_irqsave(&bnad->bna_lock, flags);
28625 bna_rx_res_req(rx_config, res_info);
28626 diff -urNp linux-2.6.39.4/drivers/net/bnx2.c linux-2.6.39.4/drivers/net/bnx2.c
28627 --- linux-2.6.39.4/drivers/net/bnx2.c 2011-05-19 00:06:34.000000000 -0400
28628 +++ linux-2.6.39.4/drivers/net/bnx2.c 2011-08-05 19:44:37.000000000 -0400
28629 @@ -5828,6 +5828,8 @@ bnx2_test_nvram(struct bnx2 *bp)
28630 int rc = 0;
28631 u32 magic, csum;
28632
28633 + pax_track_stack();
28634 +
28635 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
28636 goto test_nvram_done;
28637
28638 diff -urNp linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c
28639 --- linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-05-19 00:06:34.000000000 -0400
28640 +++ linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-08-05 19:44:37.000000000 -0400
28641 @@ -1788,6 +1788,8 @@ static int bnx2x_test_nvram(struct bnx2x
28642 int i, rc;
28643 u32 magic, crc;
28644
28645 + pax_track_stack();
28646 +
28647 if (BP_NOMCP(bp))
28648 return 0;
28649
28650 diff -urNp linux-2.6.39.4/drivers/net/cxgb3/l2t.h linux-2.6.39.4/drivers/net/cxgb3/l2t.h
28651 --- linux-2.6.39.4/drivers/net/cxgb3/l2t.h 2011-05-19 00:06:34.000000000 -0400
28652 +++ linux-2.6.39.4/drivers/net/cxgb3/l2t.h 2011-08-05 20:34:06.000000000 -0400
28653 @@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
28654 */
28655 struct l2t_skb_cb {
28656 arp_failure_handler_func arp_failure_handler;
28657 -};
28658 +} __no_const;
28659
28660 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
28661
28662 diff -urNp linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c
28663 --- linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c 2011-05-19 00:06:34.000000000 -0400
28664 +++ linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c 2011-08-05 19:44:37.000000000 -0400
28665 @@ -3428,6 +3428,8 @@ static int __devinit enable_msix(struct
28666 unsigned int nchan = adap->params.nports;
28667 struct msix_entry entries[MAX_INGQ + 1];
28668
28669 + pax_track_stack();
28670 +
28671 for (i = 0; i < ARRAY_SIZE(entries); ++i)
28672 entries[i].entry = i;
28673
28674 diff -urNp linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c
28675 --- linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c 2011-05-19 00:06:34.000000000 -0400
28676 +++ linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c 2011-08-05 19:44:37.000000000 -0400
28677 @@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
28678 u8 vpd[VPD_LEN], csum;
28679 unsigned int vpdr_len, kw_offset, id_len;
28680
28681 + pax_track_stack();
28682 +
28683 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
28684 if (ret < 0)
28685 return ret;
28686 diff -urNp linux-2.6.39.4/drivers/net/e1000e/82571.c linux-2.6.39.4/drivers/net/e1000e/82571.c
28687 --- linux-2.6.39.4/drivers/net/e1000e/82571.c 2011-05-19 00:06:34.000000000 -0400
28688 +++ linux-2.6.39.4/drivers/net/e1000e/82571.c 2011-08-05 20:34:06.000000000 -0400
28689 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
28690 {
28691 struct e1000_hw *hw = &adapter->hw;
28692 struct e1000_mac_info *mac = &hw->mac;
28693 - struct e1000_mac_operations *func = &mac->ops;
28694 + e1000_mac_operations_no_const *func = &mac->ops;
28695 u32 swsm = 0;
28696 u32 swsm2 = 0;
28697 bool force_clear_smbi = false;
28698 diff -urNp linux-2.6.39.4/drivers/net/e1000e/es2lan.c linux-2.6.39.4/drivers/net/e1000e/es2lan.c
28699 --- linux-2.6.39.4/drivers/net/e1000e/es2lan.c 2011-05-19 00:06:34.000000000 -0400
28700 +++ linux-2.6.39.4/drivers/net/e1000e/es2lan.c 2011-08-05 20:34:06.000000000 -0400
28701 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
28702 {
28703 struct e1000_hw *hw = &adapter->hw;
28704 struct e1000_mac_info *mac = &hw->mac;
28705 - struct e1000_mac_operations *func = &mac->ops;
28706 + e1000_mac_operations_no_const *func = &mac->ops;
28707
28708 /* Set media type */
28709 switch (adapter->pdev->device) {
28710 diff -urNp linux-2.6.39.4/drivers/net/e1000e/hw.h linux-2.6.39.4/drivers/net/e1000e/hw.h
28711 --- linux-2.6.39.4/drivers/net/e1000e/hw.h 2011-05-19 00:06:34.000000000 -0400
28712 +++ linux-2.6.39.4/drivers/net/e1000e/hw.h 2011-08-05 20:34:06.000000000 -0400
28713 @@ -775,6 +775,7 @@ struct e1000_mac_operations {
28714 void (*write_vfta)(struct e1000_hw *, u32, u32);
28715 s32 (*read_mac_addr)(struct e1000_hw *);
28716 };
28717 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28718
28719 /* Function pointers for the PHY. */
28720 struct e1000_phy_operations {
28721 @@ -798,6 +799,7 @@ struct e1000_phy_operations {
28722 void (*power_up)(struct e1000_hw *);
28723 void (*power_down)(struct e1000_hw *);
28724 };
28725 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28726
28727 /* Function pointers for the NVM. */
28728 struct e1000_nvm_operations {
28729 @@ -809,9 +811,10 @@ struct e1000_nvm_operations {
28730 s32 (*validate)(struct e1000_hw *);
28731 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
28732 };
28733 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28734
28735 struct e1000_mac_info {
28736 - struct e1000_mac_operations ops;
28737 + e1000_mac_operations_no_const ops;
28738 u8 addr[ETH_ALEN];
28739 u8 perm_addr[ETH_ALEN];
28740
28741 @@ -852,7 +855,7 @@ struct e1000_mac_info {
28742 };
28743
28744 struct e1000_phy_info {
28745 - struct e1000_phy_operations ops;
28746 + e1000_phy_operations_no_const ops;
28747
28748 enum e1000_phy_type type;
28749
28750 @@ -886,7 +889,7 @@ struct e1000_phy_info {
28751 };
28752
28753 struct e1000_nvm_info {
28754 - struct e1000_nvm_operations ops;
28755 + e1000_nvm_operations_no_const ops;
28756
28757 enum e1000_nvm_type type;
28758 enum e1000_nvm_override override;
28759 diff -urNp linux-2.6.39.4/drivers/net/hamradio/6pack.c linux-2.6.39.4/drivers/net/hamradio/6pack.c
28760 --- linux-2.6.39.4/drivers/net/hamradio/6pack.c 2011-07-09 09:18:51.000000000 -0400
28761 +++ linux-2.6.39.4/drivers/net/hamradio/6pack.c 2011-08-05 19:44:37.000000000 -0400
28762 @@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
28763 unsigned char buf[512];
28764 int count1;
28765
28766 + pax_track_stack();
28767 +
28768 if (!count)
28769 return;
28770
28771 diff -urNp linux-2.6.39.4/drivers/net/igb/e1000_hw.h linux-2.6.39.4/drivers/net/igb/e1000_hw.h
28772 --- linux-2.6.39.4/drivers/net/igb/e1000_hw.h 2011-05-19 00:06:34.000000000 -0400
28773 +++ linux-2.6.39.4/drivers/net/igb/e1000_hw.h 2011-08-05 20:34:06.000000000 -0400
28774 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
28775 s32 (*read_mac_addr)(struct e1000_hw *);
28776 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
28777 };
28778 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28779
28780 struct e1000_phy_operations {
28781 s32 (*acquire)(struct e1000_hw *);
28782 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
28783 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
28784 s32 (*write_reg)(struct e1000_hw *, u32, u16);
28785 };
28786 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28787
28788 struct e1000_nvm_operations {
28789 s32 (*acquire)(struct e1000_hw *);
28790 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
28791 s32 (*update)(struct e1000_hw *);
28792 s32 (*validate)(struct e1000_hw *);
28793 };
28794 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28795
28796 struct e1000_info {
28797 s32 (*get_invariants)(struct e1000_hw *);
28798 @@ -350,7 +353,7 @@ struct e1000_info {
28799 extern const struct e1000_info e1000_82575_info;
28800
28801 struct e1000_mac_info {
28802 - struct e1000_mac_operations ops;
28803 + e1000_mac_operations_no_const ops;
28804
28805 u8 addr[6];
28806 u8 perm_addr[6];
28807 @@ -388,7 +391,7 @@ struct e1000_mac_info {
28808 };
28809
28810 struct e1000_phy_info {
28811 - struct e1000_phy_operations ops;
28812 + e1000_phy_operations_no_const ops;
28813
28814 enum e1000_phy_type type;
28815
28816 @@ -423,7 +426,7 @@ struct e1000_phy_info {
28817 };
28818
28819 struct e1000_nvm_info {
28820 - struct e1000_nvm_operations ops;
28821 + e1000_nvm_operations_no_const ops;
28822 enum e1000_nvm_type type;
28823 enum e1000_nvm_override override;
28824
28825 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
28826 s32 (*check_for_ack)(struct e1000_hw *, u16);
28827 s32 (*check_for_rst)(struct e1000_hw *, u16);
28828 };
28829 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28830
28831 struct e1000_mbx_stats {
28832 u32 msgs_tx;
28833 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
28834 };
28835
28836 struct e1000_mbx_info {
28837 - struct e1000_mbx_operations ops;
28838 + e1000_mbx_operations_no_const ops;
28839 struct e1000_mbx_stats stats;
28840 u32 timeout;
28841 u32 usec_delay;
28842 diff -urNp linux-2.6.39.4/drivers/net/igbvf/vf.h linux-2.6.39.4/drivers/net/igbvf/vf.h
28843 --- linux-2.6.39.4/drivers/net/igbvf/vf.h 2011-05-19 00:06:34.000000000 -0400
28844 +++ linux-2.6.39.4/drivers/net/igbvf/vf.h 2011-08-05 20:34:06.000000000 -0400
28845 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
28846 s32 (*read_mac_addr)(struct e1000_hw *);
28847 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
28848 };
28849 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28850
28851 struct e1000_mac_info {
28852 - struct e1000_mac_operations ops;
28853 + e1000_mac_operations_no_const ops;
28854 u8 addr[6];
28855 u8 perm_addr[6];
28856
28857 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
28858 s32 (*check_for_ack)(struct e1000_hw *);
28859 s32 (*check_for_rst)(struct e1000_hw *);
28860 };
28861 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28862
28863 struct e1000_mbx_stats {
28864 u32 msgs_tx;
28865 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
28866 };
28867
28868 struct e1000_mbx_info {
28869 - struct e1000_mbx_operations ops;
28870 + e1000_mbx_operations_no_const ops;
28871 struct e1000_mbx_stats stats;
28872 u32 timeout;
28873 u32 usec_delay;
28874 diff -urNp linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c
28875 --- linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c 2011-05-19 00:06:34.000000000 -0400
28876 +++ linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c 2011-08-05 19:44:37.000000000 -0400
28877 @@ -1069,6 +1069,8 @@ ixgb_set_multi(struct net_device *netdev
28878 u32 rctl;
28879 int i;
28880
28881 + pax_track_stack();
28882 +
28883 /* Check for Promiscuous and All Multicast modes */
28884
28885 rctl = IXGB_READ_REG(hw, RCTL);
28886 diff -urNp linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c
28887 --- linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c 2011-05-19 00:06:34.000000000 -0400
28888 +++ linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c 2011-08-05 19:44:37.000000000 -0400
28889 @@ -261,6 +261,9 @@ void __devinit
28890 ixgb_check_options(struct ixgb_adapter *adapter)
28891 {
28892 int bd = adapter->bd_number;
28893 +
28894 + pax_track_stack();
28895 +
28896 if (bd >= IXGB_MAX_NIC) {
28897 pr_notice("Warning: no configuration for board #%i\n", bd);
28898 pr_notice("Using defaults for all values\n");
28899 diff -urNp linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h
28900 --- linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h 2011-05-19 00:06:34.000000000 -0400
28901 +++ linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h 2011-08-05 20:34:06.000000000 -0400
28902 @@ -2496,6 +2496,7 @@ struct ixgbe_eeprom_operations {
28903 s32 (*update_checksum)(struct ixgbe_hw *);
28904 u16 (*calc_checksum)(struct ixgbe_hw *);
28905 };
28906 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
28907
28908 struct ixgbe_mac_operations {
28909 s32 (*init_hw)(struct ixgbe_hw *);
28910 @@ -2551,6 +2552,7 @@ struct ixgbe_mac_operations {
28911 /* Flow Control */
28912 s32 (*fc_enable)(struct ixgbe_hw *, s32);
28913 };
28914 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28915
28916 struct ixgbe_phy_operations {
28917 s32 (*identify)(struct ixgbe_hw *);
28918 @@ -2570,9 +2572,10 @@ struct ixgbe_phy_operations {
28919 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
28920 s32 (*check_overtemp)(struct ixgbe_hw *);
28921 };
28922 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
28923
28924 struct ixgbe_eeprom_info {
28925 - struct ixgbe_eeprom_operations ops;
28926 + ixgbe_eeprom_operations_no_const ops;
28927 enum ixgbe_eeprom_type type;
28928 u32 semaphore_delay;
28929 u16 word_size;
28930 @@ -2581,7 +2584,7 @@ struct ixgbe_eeprom_info {
28931
28932 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
28933 struct ixgbe_mac_info {
28934 - struct ixgbe_mac_operations ops;
28935 + ixgbe_mac_operations_no_const ops;
28936 enum ixgbe_mac_type type;
28937 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28938 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28939 @@ -2608,7 +2611,7 @@ struct ixgbe_mac_info {
28940 };
28941
28942 struct ixgbe_phy_info {
28943 - struct ixgbe_phy_operations ops;
28944 + ixgbe_phy_operations_no_const ops;
28945 struct mdio_if_info mdio;
28946 enum ixgbe_phy_type type;
28947 u32 id;
28948 @@ -2636,6 +2639,7 @@ struct ixgbe_mbx_operations {
28949 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
28950 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
28951 };
28952 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28953
28954 struct ixgbe_mbx_stats {
28955 u32 msgs_tx;
28956 @@ -2647,7 +2651,7 @@ struct ixgbe_mbx_stats {
28957 };
28958
28959 struct ixgbe_mbx_info {
28960 - struct ixgbe_mbx_operations ops;
28961 + ixgbe_mbx_operations_no_const ops;
28962 struct ixgbe_mbx_stats stats;
28963 u32 timeout;
28964 u32 usec_delay;
28965 diff -urNp linux-2.6.39.4/drivers/net/ixgbevf/vf.h linux-2.6.39.4/drivers/net/ixgbevf/vf.h
28966 --- linux-2.6.39.4/drivers/net/ixgbevf/vf.h 2011-05-19 00:06:34.000000000 -0400
28967 +++ linux-2.6.39.4/drivers/net/ixgbevf/vf.h 2011-08-05 20:34:06.000000000 -0400
28968 @@ -69,6 +69,7 @@ struct ixgbe_mac_operations {
28969 s32 (*clear_vfta)(struct ixgbe_hw *);
28970 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
28971 };
28972 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28973
28974 enum ixgbe_mac_type {
28975 ixgbe_mac_unknown = 0,
28976 @@ -78,7 +79,7 @@ enum ixgbe_mac_type {
28977 };
28978
28979 struct ixgbe_mac_info {
28980 - struct ixgbe_mac_operations ops;
28981 + ixgbe_mac_operations_no_const ops;
28982 u8 addr[6];
28983 u8 perm_addr[6];
28984
28985 @@ -102,6 +103,7 @@ struct ixgbe_mbx_operations {
28986 s32 (*check_for_ack)(struct ixgbe_hw *);
28987 s32 (*check_for_rst)(struct ixgbe_hw *);
28988 };
28989 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28990
28991 struct ixgbe_mbx_stats {
28992 u32 msgs_tx;
28993 @@ -113,7 +115,7 @@ struct ixgbe_mbx_stats {
28994 };
28995
28996 struct ixgbe_mbx_info {
28997 - struct ixgbe_mbx_operations ops;
28998 + ixgbe_mbx_operations_no_const ops;
28999 struct ixgbe_mbx_stats stats;
29000 u32 timeout;
29001 u32 udelay;
29002 diff -urNp linux-2.6.39.4/drivers/net/ksz884x.c linux-2.6.39.4/drivers/net/ksz884x.c
29003 --- linux-2.6.39.4/drivers/net/ksz884x.c 2011-05-19 00:06:34.000000000 -0400
29004 +++ linux-2.6.39.4/drivers/net/ksz884x.c 2011-08-05 20:34:06.000000000 -0400
29005 @@ -6536,6 +6536,8 @@ static void netdev_get_ethtool_stats(str
29006 int rc;
29007 u64 counter[TOTAL_PORT_COUNTER_NUM];
29008
29009 + pax_track_stack();
29010 +
29011 mutex_lock(&hw_priv->lock);
29012 n = SWITCH_PORT_NUM;
29013 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
29014 diff -urNp linux-2.6.39.4/drivers/net/mlx4/main.c linux-2.6.39.4/drivers/net/mlx4/main.c
29015 --- linux-2.6.39.4/drivers/net/mlx4/main.c 2011-05-19 00:06:34.000000000 -0400
29016 +++ linux-2.6.39.4/drivers/net/mlx4/main.c 2011-08-05 19:44:37.000000000 -0400
29017 @@ -40,6 +40,7 @@
29018 #include <linux/dma-mapping.h>
29019 #include <linux/slab.h>
29020 #include <linux/io-mapping.h>
29021 +#include <linux/sched.h>
29022
29023 #include <linux/mlx4/device.h>
29024 #include <linux/mlx4/doorbell.h>
29025 @@ -764,6 +765,8 @@ static int mlx4_init_hca(struct mlx4_dev
29026 u64 icm_size;
29027 int err;
29028
29029 + pax_track_stack();
29030 +
29031 err = mlx4_QUERY_FW(dev);
29032 if (err) {
29033 if (err == -EACCES)
29034 diff -urNp linux-2.6.39.4/drivers/net/niu.c linux-2.6.39.4/drivers/net/niu.c
29035 --- linux-2.6.39.4/drivers/net/niu.c 2011-05-19 00:06:34.000000000 -0400
29036 +++ linux-2.6.39.4/drivers/net/niu.c 2011-08-05 19:44:37.000000000 -0400
29037 @@ -9067,6 +9067,8 @@ static void __devinit niu_try_msix(struc
29038 int i, num_irqs, err;
29039 u8 first_ldg;
29040
29041 + pax_track_stack();
29042 +
29043 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
29044 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
29045 ldg_num_map[i] = first_ldg + i;
29046 diff -urNp linux-2.6.39.4/drivers/net/pcnet32.c linux-2.6.39.4/drivers/net/pcnet32.c
29047 --- linux-2.6.39.4/drivers/net/pcnet32.c 2011-05-19 00:06:34.000000000 -0400
29048 +++ linux-2.6.39.4/drivers/net/pcnet32.c 2011-08-05 20:34:06.000000000 -0400
29049 @@ -82,7 +82,7 @@ static int cards_found;
29050 /*
29051 * VLB I/O addresses
29052 */
29053 -static unsigned int pcnet32_portlist[] __initdata =
29054 +static unsigned int pcnet32_portlist[] __devinitdata =
29055 { 0x300, 0x320, 0x340, 0x360, 0 };
29056
29057 static int pcnet32_debug;
29058 @@ -270,7 +270,7 @@ struct pcnet32_private {
29059 struct sk_buff **rx_skbuff;
29060 dma_addr_t *tx_dma_addr;
29061 dma_addr_t *rx_dma_addr;
29062 - struct pcnet32_access a;
29063 + struct pcnet32_access *a;
29064 spinlock_t lock; /* Guard lock */
29065 unsigned int cur_rx, cur_tx; /* The next free ring entry */
29066 unsigned int rx_ring_size; /* current rx ring size */
29067 @@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
29068 u16 val;
29069
29070 netif_wake_queue(dev);
29071 - val = lp->a.read_csr(ioaddr, CSR3);
29072 + val = lp->a->read_csr(ioaddr, CSR3);
29073 val &= 0x00ff;
29074 - lp->a.write_csr(ioaddr, CSR3, val);
29075 + lp->a->write_csr(ioaddr, CSR3, val);
29076 napi_enable(&lp->napi);
29077 }
29078
29079 @@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
29080 r = mii_link_ok(&lp->mii_if);
29081 } else if (lp->chip_version >= PCNET32_79C970A) {
29082 ulong ioaddr = dev->base_addr; /* card base I/O address */
29083 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
29084 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
29085 } else { /* can not detect link on really old chips */
29086 r = 1;
29087 }
29088 @@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
29089 pcnet32_netif_stop(dev);
29090
29091 spin_lock_irqsave(&lp->lock, flags);
29092 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29093 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29094
29095 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
29096
29097 @@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
29098 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
29099 {
29100 struct pcnet32_private *lp = netdev_priv(dev);
29101 - struct pcnet32_access *a = &lp->a; /* access to registers */
29102 + struct pcnet32_access *a = lp->a; /* access to registers */
29103 ulong ioaddr = dev->base_addr; /* card base I/O address */
29104 struct sk_buff *skb; /* sk buff */
29105 int x, i; /* counters */
29106 @@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
29107 pcnet32_netif_stop(dev);
29108
29109 spin_lock_irqsave(&lp->lock, flags);
29110 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29111 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29112
29113 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
29114
29115 /* Reset the PCNET32 */
29116 - lp->a.reset(ioaddr);
29117 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29118 + lp->a->reset(ioaddr);
29119 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29120
29121 /* switch pcnet32 to 32bit mode */
29122 - lp->a.write_bcr(ioaddr, 20, 2);
29123 + lp->a->write_bcr(ioaddr, 20, 2);
29124
29125 /* purge & init rings but don't actually restart */
29126 pcnet32_restart(dev, 0x0000);
29127
29128 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29129 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29130
29131 /* Initialize Transmit buffers. */
29132 size = data_len + 15;
29133 @@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
29134
29135 /* set int loopback in CSR15 */
29136 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
29137 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
29138 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
29139
29140 teststatus = cpu_to_le16(0x8000);
29141 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
29142 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
29143
29144 /* Check status of descriptors */
29145 for (x = 0; x < numbuffs; x++) {
29146 @@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
29147 }
29148 }
29149
29150 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29151 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29152 wmb();
29153 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
29154 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
29155 @@ -1015,7 +1015,7 @@ clean_up:
29156 pcnet32_restart(dev, CSR0_NORMAL);
29157 } else {
29158 pcnet32_purge_rx_ring(dev);
29159 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
29160 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
29161 }
29162 spin_unlock_irqrestore(&lp->lock, flags);
29163
29164 @@ -1025,7 +1025,7 @@ clean_up:
29165 static void pcnet32_led_blink_callback(struct net_device *dev)
29166 {
29167 struct pcnet32_private *lp = netdev_priv(dev);
29168 - struct pcnet32_access *a = &lp->a;
29169 + struct pcnet32_access *a = lp->a;
29170 ulong ioaddr = dev->base_addr;
29171 unsigned long flags;
29172 int i;
29173 @@ -1041,7 +1041,7 @@ static void pcnet32_led_blink_callback(s
29174 static int pcnet32_phys_id(struct net_device *dev, u32 data)
29175 {
29176 struct pcnet32_private *lp = netdev_priv(dev);
29177 - struct pcnet32_access *a = &lp->a;
29178 + struct pcnet32_access *a = lp->a;
29179 ulong ioaddr = dev->base_addr;
29180 unsigned long flags;
29181 int i, regs[4];
29182 @@ -1085,7 +1085,7 @@ static int pcnet32_suspend(struct net_de
29183 {
29184 int csr5;
29185 struct pcnet32_private *lp = netdev_priv(dev);
29186 - struct pcnet32_access *a = &lp->a;
29187 + struct pcnet32_access *a = lp->a;
29188 ulong ioaddr = dev->base_addr;
29189 int ticks;
29190
29191 @@ -1342,8 +1342,8 @@ static int pcnet32_poll(struct napi_stru
29192 spin_lock_irqsave(&lp->lock, flags);
29193 if (pcnet32_tx(dev)) {
29194 /* reset the chip to clear the error condition, then restart */
29195 - lp->a.reset(ioaddr);
29196 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29197 + lp->a->reset(ioaddr);
29198 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29199 pcnet32_restart(dev, CSR0_START);
29200 netif_wake_queue(dev);
29201 }
29202 @@ -1355,12 +1355,12 @@ static int pcnet32_poll(struct napi_stru
29203 __napi_complete(napi);
29204
29205 /* clear interrupt masks */
29206 - val = lp->a.read_csr(ioaddr, CSR3);
29207 + val = lp->a->read_csr(ioaddr, CSR3);
29208 val &= 0x00ff;
29209 - lp->a.write_csr(ioaddr, CSR3, val);
29210 + lp->a->write_csr(ioaddr, CSR3, val);
29211
29212 /* Set interrupt enable. */
29213 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
29214 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
29215
29216 spin_unlock_irqrestore(&lp->lock, flags);
29217 }
29218 @@ -1383,7 +1383,7 @@ static void pcnet32_get_regs(struct net_
29219 int i, csr0;
29220 u16 *buff = ptr;
29221 struct pcnet32_private *lp = netdev_priv(dev);
29222 - struct pcnet32_access *a = &lp->a;
29223 + struct pcnet32_access *a = lp->a;
29224 ulong ioaddr = dev->base_addr;
29225 unsigned long flags;
29226
29227 @@ -1419,9 +1419,9 @@ static void pcnet32_get_regs(struct net_
29228 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
29229 if (lp->phymask & (1 << j)) {
29230 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
29231 - lp->a.write_bcr(ioaddr, 33,
29232 + lp->a->write_bcr(ioaddr, 33,
29233 (j << 5) | i);
29234 - *buff++ = lp->a.read_bcr(ioaddr, 34);
29235 + *buff++ = lp->a->read_bcr(ioaddr, 34);
29236 }
29237 }
29238 }
29239 @@ -1803,7 +1803,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29240 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
29241 lp->options |= PCNET32_PORT_FD;
29242
29243 - lp->a = *a;
29244 + lp->a = a;
29245
29246 /* prior to register_netdev, dev->name is not yet correct */
29247 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
29248 @@ -1862,7 +1862,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29249 if (lp->mii) {
29250 /* lp->phycount and lp->phymask are set to 0 by memset above */
29251
29252 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29253 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29254 /* scan for PHYs */
29255 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29256 unsigned short id1, id2;
29257 @@ -1882,7 +1882,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29258 pr_info("Found PHY %04x:%04x at address %d\n",
29259 id1, id2, i);
29260 }
29261 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29262 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29263 if (lp->phycount > 1)
29264 lp->options |= PCNET32_PORT_MII;
29265 }
29266 @@ -2038,10 +2038,10 @@ static int pcnet32_open(struct net_devic
29267 }
29268
29269 /* Reset the PCNET32 */
29270 - lp->a.reset(ioaddr);
29271 + lp->a->reset(ioaddr);
29272
29273 /* switch pcnet32 to 32bit mode */
29274 - lp->a.write_bcr(ioaddr, 20, 2);
29275 + lp->a->write_bcr(ioaddr, 20, 2);
29276
29277 netif_printk(lp, ifup, KERN_DEBUG, dev,
29278 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
29279 @@ -2050,14 +2050,14 @@ static int pcnet32_open(struct net_devic
29280 (u32) (lp->init_dma_addr));
29281
29282 /* set/reset autoselect bit */
29283 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
29284 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
29285 if (lp->options & PCNET32_PORT_ASEL)
29286 val |= 2;
29287 - lp->a.write_bcr(ioaddr, 2, val);
29288 + lp->a->write_bcr(ioaddr, 2, val);
29289
29290 /* handle full duplex setting */
29291 if (lp->mii_if.full_duplex) {
29292 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
29293 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
29294 if (lp->options & PCNET32_PORT_FD) {
29295 val |= 1;
29296 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
29297 @@ -2067,14 +2067,14 @@ static int pcnet32_open(struct net_devic
29298 if (lp->chip_version == 0x2627)
29299 val |= 3;
29300 }
29301 - lp->a.write_bcr(ioaddr, 9, val);
29302 + lp->a->write_bcr(ioaddr, 9, val);
29303 }
29304
29305 /* set/reset GPSI bit in test register */
29306 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
29307 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
29308 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
29309 val |= 0x10;
29310 - lp->a.write_csr(ioaddr, 124, val);
29311 + lp->a->write_csr(ioaddr, 124, val);
29312
29313 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
29314 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
29315 @@ -2093,24 +2093,24 @@ static int pcnet32_open(struct net_devic
29316 * duplex, and/or enable auto negotiation, and clear DANAS
29317 */
29318 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
29319 - lp->a.write_bcr(ioaddr, 32,
29320 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
29321 + lp->a->write_bcr(ioaddr, 32,
29322 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
29323 /* disable Auto Negotiation, set 10Mpbs, HD */
29324 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
29325 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
29326 if (lp->options & PCNET32_PORT_FD)
29327 val |= 0x10;
29328 if (lp->options & PCNET32_PORT_100)
29329 val |= 0x08;
29330 - lp->a.write_bcr(ioaddr, 32, val);
29331 + lp->a->write_bcr(ioaddr, 32, val);
29332 } else {
29333 if (lp->options & PCNET32_PORT_ASEL) {
29334 - lp->a.write_bcr(ioaddr, 32,
29335 - lp->a.read_bcr(ioaddr,
29336 + lp->a->write_bcr(ioaddr, 32,
29337 + lp->a->read_bcr(ioaddr,
29338 32) | 0x0080);
29339 /* enable auto negotiate, setup, disable fd */
29340 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
29341 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
29342 val |= 0x20;
29343 - lp->a.write_bcr(ioaddr, 32, val);
29344 + lp->a->write_bcr(ioaddr, 32, val);
29345 }
29346 }
29347 } else {
29348 @@ -2123,10 +2123,10 @@ static int pcnet32_open(struct net_devic
29349 * There is really no good other way to handle multiple PHYs
29350 * other than turning off all automatics
29351 */
29352 - val = lp->a.read_bcr(ioaddr, 2);
29353 - lp->a.write_bcr(ioaddr, 2, val & ~2);
29354 - val = lp->a.read_bcr(ioaddr, 32);
29355 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29356 + val = lp->a->read_bcr(ioaddr, 2);
29357 + lp->a->write_bcr(ioaddr, 2, val & ~2);
29358 + val = lp->a->read_bcr(ioaddr, 32);
29359 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29360
29361 if (!(lp->options & PCNET32_PORT_ASEL)) {
29362 /* setup ecmd */
29363 @@ -2136,7 +2136,7 @@ static int pcnet32_open(struct net_devic
29364 ecmd.speed =
29365 lp->
29366 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
29367 - bcr9 = lp->a.read_bcr(ioaddr, 9);
29368 + bcr9 = lp->a->read_bcr(ioaddr, 9);
29369
29370 if (lp->options & PCNET32_PORT_FD) {
29371 ecmd.duplex = DUPLEX_FULL;
29372 @@ -2145,7 +2145,7 @@ static int pcnet32_open(struct net_devic
29373 ecmd.duplex = DUPLEX_HALF;
29374 bcr9 |= ~(1 << 0);
29375 }
29376 - lp->a.write_bcr(ioaddr, 9, bcr9);
29377 + lp->a->write_bcr(ioaddr, 9, bcr9);
29378 }
29379
29380 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29381 @@ -2176,9 +2176,9 @@ static int pcnet32_open(struct net_devic
29382
29383 #ifdef DO_DXSUFLO
29384 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
29385 - val = lp->a.read_csr(ioaddr, CSR3);
29386 + val = lp->a->read_csr(ioaddr, CSR3);
29387 val |= 0x40;
29388 - lp->a.write_csr(ioaddr, CSR3, val);
29389 + lp->a->write_csr(ioaddr, CSR3, val);
29390 }
29391 #endif
29392
29393 @@ -2194,11 +2194,11 @@ static int pcnet32_open(struct net_devic
29394 napi_enable(&lp->napi);
29395
29396 /* Re-initialize the PCNET32, and start it when done. */
29397 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29398 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29399 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29400 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29401
29402 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29403 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29404 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29405 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29406
29407 netif_start_queue(dev);
29408
29409 @@ -2210,19 +2210,19 @@ static int pcnet32_open(struct net_devic
29410
29411 i = 0;
29412 while (i++ < 100)
29413 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29414 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29415 break;
29416 /*
29417 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
29418 * reports that doing so triggers a bug in the '974.
29419 */
29420 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
29421 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
29422
29423 netif_printk(lp, ifup, KERN_DEBUG, dev,
29424 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
29425 i,
29426 (u32) (lp->init_dma_addr),
29427 - lp->a.read_csr(ioaddr, CSR0));
29428 + lp->a->read_csr(ioaddr, CSR0));
29429
29430 spin_unlock_irqrestore(&lp->lock, flags);
29431
29432 @@ -2236,7 +2236,7 @@ err_free_ring:
29433 * Switch back to 16bit mode to avoid problems with dumb
29434 * DOS packet driver after a warm reboot
29435 */
29436 - lp->a.write_bcr(ioaddr, 20, 4);
29437 + lp->a->write_bcr(ioaddr, 20, 4);
29438
29439 err_free_irq:
29440 spin_unlock_irqrestore(&lp->lock, flags);
29441 @@ -2341,7 +2341,7 @@ static void pcnet32_restart(struct net_d
29442
29443 /* wait for stop */
29444 for (i = 0; i < 100; i++)
29445 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
29446 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
29447 break;
29448
29449 if (i >= 100)
29450 @@ -2353,13 +2353,13 @@ static void pcnet32_restart(struct net_d
29451 return;
29452
29453 /* ReInit Ring */
29454 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29455 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29456 i = 0;
29457 while (i++ < 1000)
29458 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29459 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29460 break;
29461
29462 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
29463 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
29464 }
29465
29466 static void pcnet32_tx_timeout(struct net_device *dev)
29467 @@ -2371,8 +2371,8 @@ static void pcnet32_tx_timeout(struct ne
29468 /* Transmitter timeout, serious problems. */
29469 if (pcnet32_debug & NETIF_MSG_DRV)
29470 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
29471 - dev->name, lp->a.read_csr(ioaddr, CSR0));
29472 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29473 + dev->name, lp->a->read_csr(ioaddr, CSR0));
29474 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29475 dev->stats.tx_errors++;
29476 if (netif_msg_tx_err(lp)) {
29477 int i;
29478 @@ -2415,7 +2415,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29479
29480 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
29481 "%s() called, csr0 %4.4x\n",
29482 - __func__, lp->a.read_csr(ioaddr, CSR0));
29483 + __func__, lp->a->read_csr(ioaddr, CSR0));
29484
29485 /* Default status -- will not enable Successful-TxDone
29486 * interrupt when that option is available to us.
29487 @@ -2445,7 +2445,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29488 dev->stats.tx_bytes += skb->len;
29489
29490 /* Trigger an immediate send poll. */
29491 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29492 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29493
29494 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
29495 lp->tx_full = 1;
29496 @@ -2470,16 +2470,16 @@ pcnet32_interrupt(int irq, void *dev_id)
29497
29498 spin_lock(&lp->lock);
29499
29500 - csr0 = lp->a.read_csr(ioaddr, CSR0);
29501 + csr0 = lp->a->read_csr(ioaddr, CSR0);
29502 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
29503 if (csr0 == 0xffff)
29504 break; /* PCMCIA remove happened */
29505 /* Acknowledge all of the current interrupt sources ASAP. */
29506 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29507 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29508
29509 netif_printk(lp, intr, KERN_DEBUG, dev,
29510 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
29511 - csr0, lp->a.read_csr(ioaddr, CSR0));
29512 + csr0, lp->a->read_csr(ioaddr, CSR0));
29513
29514 /* Log misc errors. */
29515 if (csr0 & 0x4000)
29516 @@ -2506,19 +2506,19 @@ pcnet32_interrupt(int irq, void *dev_id)
29517 if (napi_schedule_prep(&lp->napi)) {
29518 u16 val;
29519 /* set interrupt masks */
29520 - val = lp->a.read_csr(ioaddr, CSR3);
29521 + val = lp->a->read_csr(ioaddr, CSR3);
29522 val |= 0x5f00;
29523 - lp->a.write_csr(ioaddr, CSR3, val);
29524 + lp->a->write_csr(ioaddr, CSR3, val);
29525
29526 __napi_schedule(&lp->napi);
29527 break;
29528 }
29529 - csr0 = lp->a.read_csr(ioaddr, CSR0);
29530 + csr0 = lp->a->read_csr(ioaddr, CSR0);
29531 }
29532
29533 netif_printk(lp, intr, KERN_DEBUG, dev,
29534 "exiting interrupt, csr0=%#4.4x\n",
29535 - lp->a.read_csr(ioaddr, CSR0));
29536 + lp->a->read_csr(ioaddr, CSR0));
29537
29538 spin_unlock(&lp->lock);
29539
29540 @@ -2538,20 +2538,20 @@ static int pcnet32_close(struct net_devi
29541
29542 spin_lock_irqsave(&lp->lock, flags);
29543
29544 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29545 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29546
29547 netif_printk(lp, ifdown, KERN_DEBUG, dev,
29548 "Shutting down ethercard, status was %2.2x\n",
29549 - lp->a.read_csr(ioaddr, CSR0));
29550 + lp->a->read_csr(ioaddr, CSR0));
29551
29552 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
29553 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29554 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29555
29556 /*
29557 * Switch back to 16bit mode to avoid problems with dumb
29558 * DOS packet driver after a warm reboot
29559 */
29560 - lp->a.write_bcr(ioaddr, 20, 4);
29561 + lp->a->write_bcr(ioaddr, 20, 4);
29562
29563 spin_unlock_irqrestore(&lp->lock, flags);
29564
29565 @@ -2574,7 +2574,7 @@ static struct net_device_stats *pcnet32_
29566 unsigned long flags;
29567
29568 spin_lock_irqsave(&lp->lock, flags);
29569 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29570 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29571 spin_unlock_irqrestore(&lp->lock, flags);
29572
29573 return &dev->stats;
29574 @@ -2596,10 +2596,10 @@ static void pcnet32_load_multicast(struc
29575 if (dev->flags & IFF_ALLMULTI) {
29576 ib->filter[0] = cpu_to_le32(~0U);
29577 ib->filter[1] = cpu_to_le32(~0U);
29578 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29579 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29580 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29581 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29582 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29583 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29584 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29585 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29586 return;
29587 }
29588 /* clear the multicast filter */
29589 @@ -2619,7 +2619,7 @@ static void pcnet32_load_multicast(struc
29590 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
29591 }
29592 for (i = 0; i < 4; i++)
29593 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
29594 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
29595 le16_to_cpu(mcast_table[i]));
29596 }
29597
29598 @@ -2634,28 +2634,28 @@ static void pcnet32_set_multicast_list(s
29599
29600 spin_lock_irqsave(&lp->lock, flags);
29601 suspended = pcnet32_suspend(dev, &flags, 0);
29602 - csr15 = lp->a.read_csr(ioaddr, CSR15);
29603 + csr15 = lp->a->read_csr(ioaddr, CSR15);
29604 if (dev->flags & IFF_PROMISC) {
29605 /* Log any net taps. */
29606 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
29607 lp->init_block->mode =
29608 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
29609 7);
29610 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
29611 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
29612 } else {
29613 lp->init_block->mode =
29614 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
29615 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29616 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29617 pcnet32_load_multicast(dev);
29618 }
29619
29620 if (suspended) {
29621 int csr5;
29622 /* clear SUSPEND (SPND) - CSR5 bit 0 */
29623 - csr5 = lp->a.read_csr(ioaddr, CSR5);
29624 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29625 + csr5 = lp->a->read_csr(ioaddr, CSR5);
29626 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29627 } else {
29628 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29629 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29630 pcnet32_restart(dev, CSR0_NORMAL);
29631 netif_wake_queue(dev);
29632 }
29633 @@ -2673,8 +2673,8 @@ static int mdio_read(struct net_device *
29634 if (!lp->mii)
29635 return 0;
29636
29637 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29638 - val_out = lp->a.read_bcr(ioaddr, 34);
29639 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29640 + val_out = lp->a->read_bcr(ioaddr, 34);
29641
29642 return val_out;
29643 }
29644 @@ -2688,8 +2688,8 @@ static void mdio_write(struct net_device
29645 if (!lp->mii)
29646 return;
29647
29648 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29649 - lp->a.write_bcr(ioaddr, 34, val);
29650 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29651 + lp->a->write_bcr(ioaddr, 34, val);
29652 }
29653
29654 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
29655 @@ -2766,7 +2766,7 @@ static void pcnet32_check_media(struct n
29656 curr_link = mii_link_ok(&lp->mii_if);
29657 } else {
29658 ulong ioaddr = dev->base_addr; /* card base I/O address */
29659 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
29660 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
29661 }
29662 if (!curr_link) {
29663 if (prev_link || verbose) {
29664 @@ -2789,13 +2789,13 @@ static void pcnet32_check_media(struct n
29665 (ecmd.duplex == DUPLEX_FULL)
29666 ? "full" : "half");
29667 }
29668 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
29669 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
29670 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
29671 if (lp->mii_if.full_duplex)
29672 bcr9 |= (1 << 0);
29673 else
29674 bcr9 &= ~(1 << 0);
29675 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
29676 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
29677 }
29678 } else {
29679 netif_info(lp, link, dev, "link up\n");
29680 diff -urNp linux-2.6.39.4/drivers/net/ppp_generic.c linux-2.6.39.4/drivers/net/ppp_generic.c
29681 --- linux-2.6.39.4/drivers/net/ppp_generic.c 2011-05-19 00:06:34.000000000 -0400
29682 +++ linux-2.6.39.4/drivers/net/ppp_generic.c 2011-08-05 19:44:37.000000000 -0400
29683 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
29684 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
29685 struct ppp_stats stats;
29686 struct ppp_comp_stats cstats;
29687 - char *vers;
29688
29689 switch (cmd) {
29690 case SIOCGPPPSTATS:
29691 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
29692 break;
29693
29694 case SIOCGPPPVER:
29695 - vers = PPP_VERSION;
29696 - if (copy_to_user(addr, vers, strlen(vers) + 1))
29697 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
29698 break;
29699 err = 0;
29700 break;
29701 diff -urNp linux-2.6.39.4/drivers/net/r8169.c linux-2.6.39.4/drivers/net/r8169.c
29702 --- linux-2.6.39.4/drivers/net/r8169.c 2011-05-19 00:06:34.000000000 -0400
29703 +++ linux-2.6.39.4/drivers/net/r8169.c 2011-08-05 20:34:06.000000000 -0400
29704 @@ -552,12 +552,12 @@ struct rtl8169_private {
29705 struct mdio_ops {
29706 void (*write)(void __iomem *, int, int);
29707 int (*read)(void __iomem *, int);
29708 - } mdio_ops;
29709 + } __no_const mdio_ops;
29710
29711 struct pll_power_ops {
29712 void (*down)(struct rtl8169_private *);
29713 void (*up)(struct rtl8169_private *);
29714 - } pll_power_ops;
29715 + } __no_const pll_power_ops;
29716
29717 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
29718 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
29719 diff -urNp linux-2.6.39.4/drivers/net/tg3.h linux-2.6.39.4/drivers/net/tg3.h
29720 --- linux-2.6.39.4/drivers/net/tg3.h 2011-05-19 00:06:34.000000000 -0400
29721 +++ linux-2.6.39.4/drivers/net/tg3.h 2011-08-05 19:44:37.000000000 -0400
29722 @@ -131,6 +131,7 @@
29723 #define CHIPREV_ID_5750_A0 0x4000
29724 #define CHIPREV_ID_5750_A1 0x4001
29725 #define CHIPREV_ID_5750_A3 0x4003
29726 +#define CHIPREV_ID_5750_C1 0x4201
29727 #define CHIPREV_ID_5750_C2 0x4202
29728 #define CHIPREV_ID_5752_A0_HW 0x5000
29729 #define CHIPREV_ID_5752_A0 0x6000
29730 diff -urNp linux-2.6.39.4/drivers/net/tokenring/abyss.c linux-2.6.39.4/drivers/net/tokenring/abyss.c
29731 --- linux-2.6.39.4/drivers/net/tokenring/abyss.c 2011-05-19 00:06:34.000000000 -0400
29732 +++ linux-2.6.39.4/drivers/net/tokenring/abyss.c 2011-08-05 20:34:06.000000000 -0400
29733 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
29734
29735 static int __init abyss_init (void)
29736 {
29737 - abyss_netdev_ops = tms380tr_netdev_ops;
29738 + pax_open_kernel();
29739 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29740
29741 - abyss_netdev_ops.ndo_open = abyss_open;
29742 - abyss_netdev_ops.ndo_stop = abyss_close;
29743 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
29744 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
29745 + pax_close_kernel();
29746
29747 return pci_register_driver(&abyss_driver);
29748 }
29749 diff -urNp linux-2.6.39.4/drivers/net/tokenring/madgemc.c linux-2.6.39.4/drivers/net/tokenring/madgemc.c
29750 --- linux-2.6.39.4/drivers/net/tokenring/madgemc.c 2011-05-19 00:06:34.000000000 -0400
29751 +++ linux-2.6.39.4/drivers/net/tokenring/madgemc.c 2011-08-05 20:34:06.000000000 -0400
29752 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
29753
29754 static int __init madgemc_init (void)
29755 {
29756 - madgemc_netdev_ops = tms380tr_netdev_ops;
29757 - madgemc_netdev_ops.ndo_open = madgemc_open;
29758 - madgemc_netdev_ops.ndo_stop = madgemc_close;
29759 + pax_open_kernel();
29760 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29761 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
29762 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
29763 + pax_close_kernel();
29764
29765 return mca_register_driver (&madgemc_driver);
29766 }
29767 diff -urNp linux-2.6.39.4/drivers/net/tokenring/proteon.c linux-2.6.39.4/drivers/net/tokenring/proteon.c
29768 --- linux-2.6.39.4/drivers/net/tokenring/proteon.c 2011-05-19 00:06:34.000000000 -0400
29769 +++ linux-2.6.39.4/drivers/net/tokenring/proteon.c 2011-08-05 20:34:06.000000000 -0400
29770 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
29771 struct platform_device *pdev;
29772 int i, num = 0, err = 0;
29773
29774 - proteon_netdev_ops = tms380tr_netdev_ops;
29775 - proteon_netdev_ops.ndo_open = proteon_open;
29776 - proteon_netdev_ops.ndo_stop = tms380tr_close;
29777 + pax_open_kernel();
29778 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29779 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
29780 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
29781 + pax_close_kernel();
29782
29783 err = platform_driver_register(&proteon_driver);
29784 if (err)
29785 diff -urNp linux-2.6.39.4/drivers/net/tokenring/skisa.c linux-2.6.39.4/drivers/net/tokenring/skisa.c
29786 --- linux-2.6.39.4/drivers/net/tokenring/skisa.c 2011-05-19 00:06:34.000000000 -0400
29787 +++ linux-2.6.39.4/drivers/net/tokenring/skisa.c 2011-08-05 20:34:06.000000000 -0400
29788 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
29789 struct platform_device *pdev;
29790 int i, num = 0, err = 0;
29791
29792 - sk_isa_netdev_ops = tms380tr_netdev_ops;
29793 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
29794 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29795 + pax_open_kernel();
29796 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29797 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
29798 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29799 + pax_close_kernel();
29800
29801 err = platform_driver_register(&sk_isa_driver);
29802 if (err)
29803 diff -urNp linux-2.6.39.4/drivers/net/tulip/de2104x.c linux-2.6.39.4/drivers/net/tulip/de2104x.c
29804 --- linux-2.6.39.4/drivers/net/tulip/de2104x.c 2011-05-19 00:06:34.000000000 -0400
29805 +++ linux-2.6.39.4/drivers/net/tulip/de2104x.c 2011-08-05 19:44:37.000000000 -0400
29806 @@ -1817,6 +1817,8 @@ static void __devinit de21041_get_srom_i
29807 struct de_srom_info_leaf *il;
29808 void *bufp;
29809
29810 + pax_track_stack();
29811 +
29812 /* download entire eeprom */
29813 for (i = 0; i < DE_EEPROM_WORDS; i++)
29814 ((__le16 *)ee_data)[i] =
29815 diff -urNp linux-2.6.39.4/drivers/net/tulip/de4x5.c linux-2.6.39.4/drivers/net/tulip/de4x5.c
29816 --- linux-2.6.39.4/drivers/net/tulip/de4x5.c 2011-05-19 00:06:34.000000000 -0400
29817 +++ linux-2.6.39.4/drivers/net/tulip/de4x5.c 2011-08-05 19:44:37.000000000 -0400
29818 @@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
29819 for (i=0; i<ETH_ALEN; i++) {
29820 tmp.addr[i] = dev->dev_addr[i];
29821 }
29822 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29823 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29824 break;
29825
29826 case DE4X5_SET_HWADDR: /* Set the hardware address */
29827 @@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
29828 spin_lock_irqsave(&lp->lock, flags);
29829 memcpy(&statbuf, &lp->pktStats, ioc->len);
29830 spin_unlock_irqrestore(&lp->lock, flags);
29831 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
29832 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
29833 return -EFAULT;
29834 break;
29835 }
29836 diff -urNp linux-2.6.39.4/drivers/net/usb/hso.c linux-2.6.39.4/drivers/net/usb/hso.c
29837 --- linux-2.6.39.4/drivers/net/usb/hso.c 2011-05-19 00:06:34.000000000 -0400
29838 +++ linux-2.6.39.4/drivers/net/usb/hso.c 2011-08-05 19:44:37.000000000 -0400
29839 @@ -71,7 +71,7 @@
29840 #include <asm/byteorder.h>
29841 #include <linux/serial_core.h>
29842 #include <linux/serial.h>
29843 -
29844 +#include <asm/local.h>
29845
29846 #define MOD_AUTHOR "Option Wireless"
29847 #define MOD_DESCRIPTION "USB High Speed Option driver"
29848 @@ -257,7 +257,7 @@ struct hso_serial {
29849
29850 /* from usb_serial_port */
29851 struct tty_struct *tty;
29852 - int open_count;
29853 + local_t open_count;
29854 spinlock_t serial_lock;
29855
29856 int (*write_data) (struct hso_serial *serial);
29857 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
29858 struct urb *urb;
29859
29860 urb = serial->rx_urb[0];
29861 - if (serial->open_count > 0) {
29862 + if (local_read(&serial->open_count) > 0) {
29863 count = put_rxbuf_data(urb, serial);
29864 if (count == -1)
29865 return;
29866 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
29867 DUMP1(urb->transfer_buffer, urb->actual_length);
29868
29869 /* Anyone listening? */
29870 - if (serial->open_count == 0)
29871 + if (local_read(&serial->open_count) == 0)
29872 return;
29873
29874 if (status == 0) {
29875 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
29876 spin_unlock_irq(&serial->serial_lock);
29877
29878 /* check for port already opened, if not set the termios */
29879 - serial->open_count++;
29880 - if (serial->open_count == 1) {
29881 + if (local_inc_return(&serial->open_count) == 1) {
29882 serial->rx_state = RX_IDLE;
29883 /* Force default termio settings */
29884 _hso_serial_set_termios(tty, NULL);
29885 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
29886 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
29887 if (result) {
29888 hso_stop_serial_device(serial->parent);
29889 - serial->open_count--;
29890 + local_dec(&serial->open_count);
29891 kref_put(&serial->parent->ref, hso_serial_ref_free);
29892 }
29893 } else {
29894 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
29895
29896 /* reset the rts and dtr */
29897 /* do the actual close */
29898 - serial->open_count--;
29899 + local_dec(&serial->open_count);
29900
29901 - if (serial->open_count <= 0) {
29902 - serial->open_count = 0;
29903 + if (local_read(&serial->open_count) <= 0) {
29904 + local_set(&serial->open_count, 0);
29905 spin_lock_irq(&serial->serial_lock);
29906 if (serial->tty == tty) {
29907 serial->tty->driver_data = NULL;
29908 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
29909
29910 /* the actual setup */
29911 spin_lock_irqsave(&serial->serial_lock, flags);
29912 - if (serial->open_count)
29913 + if (local_read(&serial->open_count))
29914 _hso_serial_set_termios(tty, old);
29915 else
29916 tty->termios = old;
29917 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
29918 D1("Pending read interrupt on port %d\n", i);
29919 spin_lock(&serial->serial_lock);
29920 if (serial->rx_state == RX_IDLE &&
29921 - serial->open_count > 0) {
29922 + local_read(&serial->open_count) > 0) {
29923 /* Setup and send a ctrl req read on
29924 * port i */
29925 if (!serial->rx_urb_filled[0]) {
29926 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
29927 /* Start all serial ports */
29928 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
29929 if (serial_table[i] && (serial_table[i]->interface == iface)) {
29930 - if (dev2ser(serial_table[i])->open_count) {
29931 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
29932 result =
29933 hso_start_serial_device(serial_table[i], GFP_NOIO);
29934 hso_kick_transmit(dev2ser(serial_table[i]));
29935 diff -urNp linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c
29936 --- linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-05-19 00:06:34.000000000 -0400
29937 +++ linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-08-05 19:44:37.000000000 -0400
29938 @@ -631,8 +631,7 @@ vmxnet3_set_rss_indir(struct net_device
29939 * Return with error code if any of the queue indices
29940 * is out of range
29941 */
29942 - if (p->ring_index[i] < 0 ||
29943 - p->ring_index[i] >= adapter->num_rx_queues)
29944 + if (p->ring_index[i] >= adapter->num_rx_queues)
29945 return -EINVAL;
29946 }
29947
29948 diff -urNp linux-2.6.39.4/drivers/net/vxge/vxge-config.h linux-2.6.39.4/drivers/net/vxge/vxge-config.h
29949 --- linux-2.6.39.4/drivers/net/vxge/vxge-config.h 2011-05-19 00:06:34.000000000 -0400
29950 +++ linux-2.6.39.4/drivers/net/vxge/vxge-config.h 2011-08-05 20:34:06.000000000 -0400
29951 @@ -508,7 +508,7 @@ struct vxge_hw_uld_cbs {
29952 void (*link_down)(struct __vxge_hw_device *devh);
29953 void (*crit_err)(struct __vxge_hw_device *devh,
29954 enum vxge_hw_event type, u64 ext_data);
29955 -};
29956 +} __no_const;
29957
29958 /*
29959 * struct __vxge_hw_blockpool_entry - Block private data structure
29960 diff -urNp linux-2.6.39.4/drivers/net/vxge/vxge-main.c linux-2.6.39.4/drivers/net/vxge/vxge-main.c
29961 --- linux-2.6.39.4/drivers/net/vxge/vxge-main.c 2011-05-19 00:06:34.000000000 -0400
29962 +++ linux-2.6.39.4/drivers/net/vxge/vxge-main.c 2011-08-05 19:44:37.000000000 -0400
29963 @@ -97,6 +97,8 @@ static inline void VXGE_COMPLETE_VPATH_T
29964 struct sk_buff *completed[NR_SKB_COMPLETED];
29965 int more;
29966
29967 + pax_track_stack();
29968 +
29969 do {
29970 more = 0;
29971 skb_ptr = completed;
29972 @@ -1927,6 +1929,8 @@ static enum vxge_hw_status vxge_rth_conf
29973 u8 mtable[256] = {0}; /* CPU to vpath mapping */
29974 int index;
29975
29976 + pax_track_stack();
29977 +
29978 /*
29979 * Filling
29980 * - itable with bucket numbers
29981 diff -urNp linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h
29982 --- linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h 2011-05-19 00:06:34.000000000 -0400
29983 +++ linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h 2011-08-05 20:34:06.000000000 -0400
29984 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
29985 struct vxge_hw_mempool_dma *dma_object,
29986 u32 index,
29987 u32 is_last);
29988 -};
29989 +} __no_const;
29990
29991 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
29992 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
29993 diff -urNp linux-2.6.39.4/drivers/net/wan/cycx_x25.c linux-2.6.39.4/drivers/net/wan/cycx_x25.c
29994 --- linux-2.6.39.4/drivers/net/wan/cycx_x25.c 2011-05-19 00:06:34.000000000 -0400
29995 +++ linux-2.6.39.4/drivers/net/wan/cycx_x25.c 2011-08-05 19:44:37.000000000 -0400
29996 @@ -1018,6 +1018,8 @@ static void hex_dump(char *msg, unsigned
29997 unsigned char hex[1024],
29998 * phex = hex;
29999
30000 + pax_track_stack();
30001 +
30002 if (len >= (sizeof(hex) / 2))
30003 len = (sizeof(hex) / 2) - 1;
30004
30005 diff -urNp linux-2.6.39.4/drivers/net/wan/hdlc_x25.c linux-2.6.39.4/drivers/net/wan/hdlc_x25.c
30006 --- linux-2.6.39.4/drivers/net/wan/hdlc_x25.c 2011-05-19 00:06:34.000000000 -0400
30007 +++ linux-2.6.39.4/drivers/net/wan/hdlc_x25.c 2011-08-05 20:34:06.000000000 -0400
30008 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
30009
30010 static int x25_open(struct net_device *dev)
30011 {
30012 - struct lapb_register_struct cb;
30013 + static struct lapb_register_struct cb = {
30014 + .connect_confirmation = x25_connected,
30015 + .connect_indication = x25_connected,
30016 + .disconnect_confirmation = x25_disconnected,
30017 + .disconnect_indication = x25_disconnected,
30018 + .data_indication = x25_data_indication,
30019 + .data_transmit = x25_data_transmit
30020 + };
30021 int result;
30022
30023 - cb.connect_confirmation = x25_connected;
30024 - cb.connect_indication = x25_connected;
30025 - cb.disconnect_confirmation = x25_disconnected;
30026 - cb.disconnect_indication = x25_disconnected;
30027 - cb.data_indication = x25_data_indication;
30028 - cb.data_transmit = x25_data_transmit;
30029 -
30030 result = lapb_register(dev, &cb);
30031 if (result != LAPB_OK)
30032 return result;
30033 diff -urNp linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c
30034 --- linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c 2011-05-19 00:06:34.000000000 -0400
30035 +++ linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c 2011-08-05 19:44:37.000000000 -0400
30036 @@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
30037 int do_autopm = 1;
30038 DECLARE_COMPLETION_ONSTACK(notif_completion);
30039
30040 + pax_track_stack();
30041 +
30042 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
30043 i2400m, ack, ack_size);
30044 BUG_ON(_ack == i2400m->bm_ack_buf);
30045 diff -urNp linux-2.6.39.4/drivers/net/wireless/airo.c linux-2.6.39.4/drivers/net/wireless/airo.c
30046 --- linux-2.6.39.4/drivers/net/wireless/airo.c 2011-05-19 00:06:34.000000000 -0400
30047 +++ linux-2.6.39.4/drivers/net/wireless/airo.c 2011-08-05 19:44:37.000000000 -0400
30048 @@ -3001,6 +3001,8 @@ static void airo_process_scan_results (s
30049 BSSListElement * loop_net;
30050 BSSListElement * tmp_net;
30051
30052 + pax_track_stack();
30053 +
30054 /* Blow away current list of scan results */
30055 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
30056 list_move_tail (&loop_net->list, &ai->network_free_list);
30057 @@ -3792,6 +3794,8 @@ static u16 setup_card(struct airo_info *
30058 WepKeyRid wkr;
30059 int rc;
30060
30061 + pax_track_stack();
30062 +
30063 memset( &mySsid, 0, sizeof( mySsid ) );
30064 kfree (ai->flash);
30065 ai->flash = NULL;
30066 @@ -4760,6 +4764,8 @@ static int proc_stats_rid_open( struct i
30067 __le32 *vals = stats.vals;
30068 int len;
30069
30070 + pax_track_stack();
30071 +
30072 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
30073 return -ENOMEM;
30074 data = file->private_data;
30075 @@ -5483,6 +5489,8 @@ static int proc_BSSList_open( struct ino
30076 /* If doLoseSync is not 1, we won't do a Lose Sync */
30077 int doLoseSync = -1;
30078
30079 + pax_track_stack();
30080 +
30081 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
30082 return -ENOMEM;
30083 data = file->private_data;
30084 @@ -7190,6 +7198,8 @@ static int airo_get_aplist(struct net_de
30085 int i;
30086 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
30087
30088 + pax_track_stack();
30089 +
30090 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
30091 if (!qual)
30092 return -ENOMEM;
30093 @@ -7750,6 +7760,8 @@ static void airo_read_wireless_stats(str
30094 CapabilityRid cap_rid;
30095 __le32 *vals = stats_rid.vals;
30096
30097 + pax_track_stack();
30098 +
30099 /* Get stats out of the card */
30100 clear_bit(JOB_WSTATS, &local->jobs);
30101 if (local->power.event) {
30102 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c
30103 --- linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c 2011-05-19 00:06:34.000000000 -0400
30104 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c 2011-08-05 19:44:37.000000000 -0400
30105 @@ -204,6 +204,8 @@ static ssize_t read_file_beacon(struct f
30106 unsigned int v;
30107 u64 tsf;
30108
30109 + pax_track_stack();
30110 +
30111 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
30112 len += snprintf(buf+len, sizeof(buf)-len,
30113 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
30114 @@ -323,6 +325,8 @@ static ssize_t read_file_debug(struct fi
30115 unsigned int len = 0;
30116 unsigned int i;
30117
30118 + pax_track_stack();
30119 +
30120 len += snprintf(buf+len, sizeof(buf)-len,
30121 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
30122
30123 @@ -384,6 +388,8 @@ static ssize_t read_file_antenna(struct
30124 unsigned int i;
30125 unsigned int v;
30126
30127 + pax_track_stack();
30128 +
30129 len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
30130 sc->ah->ah_ant_mode);
30131 len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
30132 @@ -494,6 +500,8 @@ static ssize_t read_file_misc(struct fil
30133 unsigned int len = 0;
30134 u32 filt = ath5k_hw_get_rx_filter(sc->ah);
30135
30136 + pax_track_stack();
30137 +
30138 len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
30139 sc->bssidmask);
30140 len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
30141 @@ -550,6 +558,8 @@ static ssize_t read_file_frameerrors(str
30142 unsigned int len = 0;
30143 int i;
30144
30145 + pax_track_stack();
30146 +
30147 len += snprintf(buf+len, sizeof(buf)-len,
30148 "RX\n---------------------\n");
30149 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
30150 @@ -667,6 +677,8 @@ static ssize_t read_file_ani(struct file
30151 char buf[700];
30152 unsigned int len = 0;
30153
30154 + pax_track_stack();
30155 +
30156 len += snprintf(buf+len, sizeof(buf)-len,
30157 "HW has PHY error counters:\t%s\n",
30158 sc->ah->ah_capabilities.cap_has_phyerr_counters ?
30159 @@ -827,6 +839,8 @@ static ssize_t read_file_queue(struct fi
30160 struct ath5k_buf *bf, *bf0;
30161 int i, n;
30162
30163 + pax_track_stack();
30164 +
30165 len += snprintf(buf+len, sizeof(buf)-len,
30166 "available txbuffers: %d\n", sc->txbuf_len);
30167
30168 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c
30169 --- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-05-19 00:06:34.000000000 -0400
30170 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-08-05 19:44:37.000000000 -0400
30171 @@ -734,6 +734,8 @@ static void ar9003_hw_tx_iq_cal(struct a
30172 s32 i, j, ip, im, nmeasurement;
30173 u8 nchains = get_streams(common->tx_chainmask);
30174
30175 + pax_track_stack();
30176 +
30177 for (ip = 0; ip < MPASS; ip++) {
30178 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
30179 AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
30180 @@ -856,6 +858,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
30181 int i, ip, im, j;
30182 int nmeasurement;
30183
30184 + pax_track_stack();
30185 +
30186 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
30187 if (ah->txchainmask & (1 << i))
30188 num_chains++;
30189 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
30190 --- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-05-19 00:06:34.000000000 -0400
30191 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-08-05 19:44:37.000000000 -0400
30192 @@ -356,6 +356,8 @@ static bool create_pa_curve(u32 *data_L,
30193 int theta_low_bin = 0;
30194 int i;
30195
30196 + pax_track_stack();
30197 +
30198 /* disregard any bin that contains <= 16 samples */
30199 thresh_accum_cnt = 16;
30200 scale_factor = 5;
30201 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c
30202 --- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c 2011-05-19 00:06:34.000000000 -0400
30203 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c 2011-08-05 19:44:37.000000000 -0400
30204 @@ -335,6 +335,8 @@ static ssize_t read_file_interrupt(struc
30205 char buf[512];
30206 unsigned int len = 0;
30207
30208 + pax_track_stack();
30209 +
30210 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
30211 len += snprintf(buf + len, sizeof(buf) - len,
30212 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
30213 @@ -422,6 +424,8 @@ static ssize_t read_file_wiphy(struct fi
30214 u8 addr[ETH_ALEN];
30215 u32 tmp;
30216
30217 + pax_track_stack();
30218 +
30219 len += snprintf(buf + len, sizeof(buf) - len,
30220 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
30221 wiphy_name(sc->hw->wiphy),
30222 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c
30223 --- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c 2011-05-19 00:06:34.000000000 -0400
30224 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c 2011-08-05 20:34:06.000000000 -0400
30225 @@ -737,6 +737,8 @@ static ssize_t read_file_tgt_stats(struc
30226 unsigned int len = 0;
30227 int ret = 0;
30228
30229 + pax_track_stack();
30230 +
30231 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30232
30233 WMI_CMD(WMI_TGT_STATS_CMDID);
30234 @@ -782,6 +784,8 @@ static ssize_t read_file_xmit(struct fil
30235 char buf[512];
30236 unsigned int len = 0;
30237
30238 + pax_track_stack();
30239 +
30240 len += snprintf(buf + len, sizeof(buf) - len,
30241 "%20s : %10u\n", "Buffers queued",
30242 priv->debug.tx_stats.buf_queued);
30243 @@ -831,6 +835,8 @@ static ssize_t read_file_recv(struct fil
30244 char buf[512];
30245 unsigned int len = 0;
30246
30247 + pax_track_stack();
30248 +
30249 len += snprintf(buf + len, sizeof(buf) - len,
30250 "%20s : %10u\n", "SKBs allocated",
30251 priv->debug.rx_stats.skb_allocated);
30252 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h
30253 --- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h 2011-05-19 00:06:34.000000000 -0400
30254 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h 2011-08-05 20:34:06.000000000 -0400
30255 @@ -592,7 +592,7 @@ struct ath_hw_private_ops {
30256
30257 /* ANI */
30258 void (*ani_cache_ini_regs)(struct ath_hw *ah);
30259 -};
30260 +} __no_const;
30261
30262 /**
30263 * struct ath_hw_ops - callbacks used by hardware code and driver code
30264 @@ -642,7 +642,7 @@ struct ath_hw_ops {
30265 u32 burstDuration);
30266 void (*set11n_virtualmorefrag)(struct ath_hw *ah, void *ds,
30267 u32 vmf);
30268 -};
30269 +} __no_const;
30270
30271 struct ath_nf_limits {
30272 s16 max;
30273 diff -urNp linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c
30274 --- linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-19 00:06:34.000000000 -0400
30275 +++ linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-08-05 19:44:37.000000000 -0400
30276 @@ -2100,6 +2100,8 @@ static int ipw2100_set_essid(struct ipw2
30277 int err;
30278 DECLARE_SSID_BUF(ssid);
30279
30280 + pax_track_stack();
30281 +
30282 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
30283
30284 if (ssid_len)
30285 @@ -5449,6 +5451,8 @@ static int ipw2100_set_key(struct ipw210
30286 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
30287 int err;
30288
30289 + pax_track_stack();
30290 +
30291 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
30292 idx, keylen, len);
30293
30294 diff -urNp linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c
30295 --- linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-19 00:06:34.000000000 -0400
30296 +++ linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-08-05 19:44:37.000000000 -0400
30297 @@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
30298 unsigned long flags;
30299 DECLARE_SSID_BUF(ssid);
30300
30301 + pax_track_stack();
30302 +
30303 LIBIPW_DEBUG_SCAN("'%s' (%pM"
30304 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
30305 print_ssid(ssid, info_element->data, info_element->len),
30306 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c
30307 --- linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-05-19 00:06:34.000000000 -0400
30308 +++ linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-08-05 20:34:06.000000000 -0400
30309 @@ -3958,7 +3958,9 @@ static int iwl3945_pci_probe(struct pci_
30310 */
30311 if (iwl3945_mod_params.disable_hw_scan) {
30312 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
30313 - iwl3945_hw_ops.hw_scan = NULL;
30314 + pax_open_kernel();
30315 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
30316 + pax_close_kernel();
30317 }
30318
30319 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
30320 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c
30321 --- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-06-25 12:55:22.000000000 -0400
30322 +++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-08-05 20:34:06.000000000 -0400
30323 @@ -3974,7 +3974,9 @@ static int iwl_pci_probe(struct pci_dev
30324 if (cfg->mod_params->disable_hw_scan) {
30325 dev_printk(KERN_DEBUG, &(pdev->dev),
30326 "sw scan support is deprecated\n");
30327 - iwlagn_hw_ops.hw_scan = NULL;
30328 + pax_open_kernel();
30329 + *(void **)&iwlagn_hw_ops.hw_scan = NULL;
30330 + pax_close_kernel();
30331 }
30332
30333 hw = iwl_alloc_all(cfg);
30334 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
30335 --- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-19 00:06:34.000000000 -0400
30336 +++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-08-05 19:44:37.000000000 -0400
30337 @@ -883,6 +883,8 @@ static void rs_tx_status(void *priv_r, s
30338 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
30339 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
30340
30341 + pax_track_stack();
30342 +
30343 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
30344
30345 /* Treat uninitialized rate scaling data same as non-existing. */
30346 @@ -2894,6 +2896,8 @@ static void rs_fill_link_cmd(struct iwl_
30347 container_of(lq_sta, struct iwl_station_priv, lq_sta);
30348 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
30349
30350 + pax_track_stack();
30351 +
30352 /* Override starting rate (index 0) if needed for debug purposes */
30353 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
30354
30355 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c
30356 --- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-19 00:06:34.000000000 -0400
30357 +++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-08-05 19:44:37.000000000 -0400
30358 @@ -549,6 +549,8 @@ static ssize_t iwl_dbgfs_status_read(str
30359 int pos = 0;
30360 const size_t bufsz = sizeof(buf);
30361
30362 + pax_track_stack();
30363 +
30364 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
30365 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
30366 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
30367 @@ -681,6 +683,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
30368 char buf[256 * NUM_IWL_RXON_CTX];
30369 const size_t bufsz = sizeof(buf);
30370
30371 + pax_track_stack();
30372 +
30373 for_each_context(priv, ctx) {
30374 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
30375 ctx->ctxid);
30376 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h
30377 --- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-05-19 00:06:34.000000000 -0400
30378 +++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-08-05 19:44:37.000000000 -0400
30379 @@ -68,8 +68,8 @@ do {
30380 } while (0)
30381
30382 #else
30383 -#define IWL_DEBUG(__priv, level, fmt, args...)
30384 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
30385 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
30386 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
30387 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
30388 const void *p, u32 len)
30389 {}
30390 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c
30391 --- linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-19 00:06:34.000000000 -0400
30392 +++ linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-08-05 19:44:37.000000000 -0400
30393 @@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
30394 int buf_len = 512;
30395 size_t len = 0;
30396
30397 + pax_track_stack();
30398 +
30399 if (*ppos != 0)
30400 return 0;
30401 if (count < sizeof(buf))
30402 diff -urNp linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c
30403 --- linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c 2011-05-19 00:06:34.000000000 -0400
30404 +++ linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c 2011-08-05 20:34:06.000000000 -0400
30405 @@ -1260,9 +1260,11 @@ static int __init init_mac80211_hwsim(vo
30406 return -EINVAL;
30407
30408 if (fake_hw_scan) {
30409 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30410 - mac80211_hwsim_ops.sw_scan_start = NULL;
30411 - mac80211_hwsim_ops.sw_scan_complete = NULL;
30412 + pax_open_kernel();
30413 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30414 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
30415 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
30416 + pax_close_kernel();
30417 }
30418
30419 spin_lock_init(&hwsim_radio_lock);
30420 diff -urNp linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c
30421 --- linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c 2011-05-19 00:06:34.000000000 -0400
30422 +++ linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c 2011-08-05 19:44:37.000000000 -0400
30423 @@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
30424
30425 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
30426
30427 - if (rts_threshold < 0 || rts_threshold > 2347)
30428 + if (rts_threshold > 2347)
30429 rts_threshold = 2347;
30430
30431 tmp = cpu_to_le32(rts_threshold);
30432 diff -urNp linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
30433 --- linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-05-19 00:06:34.000000000 -0400
30434 +++ linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-08-05 19:44:37.000000000 -0400
30435 @@ -827,6 +827,8 @@ static bool _rtl92c_phy_sw_chnl_step_by_
30436 u8 rfpath;
30437 u8 num_total_rfpath = rtlphy->num_total_rfpath;
30438
30439 + pax_track_stack();
30440 +
30441 precommoncmdcnt = 0;
30442 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
30443 MAX_PRECMD_CNT,
30444 diff -urNp linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h
30445 --- linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h 2011-05-19 00:06:34.000000000 -0400
30446 +++ linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h 2011-08-05 20:34:06.000000000 -0400
30447 @@ -260,7 +260,7 @@ struct wl1251_if_operations {
30448 void (*reset)(struct wl1251 *wl);
30449 void (*enable_irq)(struct wl1251 *wl);
30450 void (*disable_irq)(struct wl1251 *wl);
30451 -};
30452 +} __no_const;
30453
30454 struct wl1251 {
30455 struct ieee80211_hw *hw;
30456 diff -urNp linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c
30457 --- linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c 2011-05-19 00:06:34.000000000 -0400
30458 +++ linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c 2011-08-05 19:44:37.000000000 -0400
30459 @@ -280,6 +280,8 @@ static void wl1271_spi_raw_write(struct
30460 u32 chunk_len;
30461 int i;
30462
30463 + pax_track_stack();
30464 +
30465 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
30466
30467 spi_message_init(&m);
30468 diff -urNp linux-2.6.39.4/drivers/oprofile/buffer_sync.c linux-2.6.39.4/drivers/oprofile/buffer_sync.c
30469 --- linux-2.6.39.4/drivers/oprofile/buffer_sync.c 2011-06-25 12:55:22.000000000 -0400
30470 +++ linux-2.6.39.4/drivers/oprofile/buffer_sync.c 2011-08-05 19:44:37.000000000 -0400
30471 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
30472 if (cookie == NO_COOKIE)
30473 offset = pc;
30474 if (cookie == INVALID_COOKIE) {
30475 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30476 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30477 offset = pc;
30478 }
30479 if (cookie != last_cookie) {
30480 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
30481 /* add userspace sample */
30482
30483 if (!mm) {
30484 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
30485 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
30486 return 0;
30487 }
30488
30489 cookie = lookup_dcookie(mm, s->eip, &offset);
30490
30491 if (cookie == INVALID_COOKIE) {
30492 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30493 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30494 return 0;
30495 }
30496
30497 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
30498 /* ignore backtraces if failed to add a sample */
30499 if (state == sb_bt_start) {
30500 state = sb_bt_ignore;
30501 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
30502 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
30503 }
30504 }
30505 release_mm(mm);
30506 diff -urNp linux-2.6.39.4/drivers/oprofile/event_buffer.c linux-2.6.39.4/drivers/oprofile/event_buffer.c
30507 --- linux-2.6.39.4/drivers/oprofile/event_buffer.c 2011-05-19 00:06:34.000000000 -0400
30508 +++ linux-2.6.39.4/drivers/oprofile/event_buffer.c 2011-08-05 19:44:37.000000000 -0400
30509 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
30510 }
30511
30512 if (buffer_pos == buffer_size) {
30513 - atomic_inc(&oprofile_stats.event_lost_overflow);
30514 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
30515 return;
30516 }
30517
30518 diff -urNp linux-2.6.39.4/drivers/oprofile/oprof.c linux-2.6.39.4/drivers/oprofile/oprof.c
30519 --- linux-2.6.39.4/drivers/oprofile/oprof.c 2011-05-19 00:06:34.000000000 -0400
30520 +++ linux-2.6.39.4/drivers/oprofile/oprof.c 2011-08-05 19:44:37.000000000 -0400
30521 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
30522 if (oprofile_ops.switch_events())
30523 return;
30524
30525 - atomic_inc(&oprofile_stats.multiplex_counter);
30526 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
30527 start_switch_worker();
30528 }
30529
30530 diff -urNp linux-2.6.39.4/drivers/oprofile/oprofilefs.c linux-2.6.39.4/drivers/oprofile/oprofilefs.c
30531 --- linux-2.6.39.4/drivers/oprofile/oprofilefs.c 2011-05-19 00:06:34.000000000 -0400
30532 +++ linux-2.6.39.4/drivers/oprofile/oprofilefs.c 2011-08-05 19:44:37.000000000 -0400
30533 @@ -186,7 +186,7 @@ static const struct file_operations atom
30534
30535
30536 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
30537 - char const *name, atomic_t *val)
30538 + char const *name, atomic_unchecked_t *val)
30539 {
30540 return __oprofilefs_create_file(sb, root, name,
30541 &atomic_ro_fops, 0444, val);
30542 diff -urNp linux-2.6.39.4/drivers/oprofile/oprofile_stats.c linux-2.6.39.4/drivers/oprofile/oprofile_stats.c
30543 --- linux-2.6.39.4/drivers/oprofile/oprofile_stats.c 2011-05-19 00:06:34.000000000 -0400
30544 +++ linux-2.6.39.4/drivers/oprofile/oprofile_stats.c 2011-08-05 19:44:37.000000000 -0400
30545 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
30546 cpu_buf->sample_invalid_eip = 0;
30547 }
30548
30549 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
30550 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
30551 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
30552 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
30553 - atomic_set(&oprofile_stats.multiplex_counter, 0);
30554 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
30555 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
30556 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
30557 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
30558 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
30559 }
30560
30561
30562 diff -urNp linux-2.6.39.4/drivers/oprofile/oprofile_stats.h linux-2.6.39.4/drivers/oprofile/oprofile_stats.h
30563 --- linux-2.6.39.4/drivers/oprofile/oprofile_stats.h 2011-05-19 00:06:34.000000000 -0400
30564 +++ linux-2.6.39.4/drivers/oprofile/oprofile_stats.h 2011-08-05 19:44:37.000000000 -0400
30565 @@ -13,11 +13,11 @@
30566 #include <asm/atomic.h>
30567
30568 struct oprofile_stat_struct {
30569 - atomic_t sample_lost_no_mm;
30570 - atomic_t sample_lost_no_mapping;
30571 - atomic_t bt_lost_no_mapping;
30572 - atomic_t event_lost_overflow;
30573 - atomic_t multiplex_counter;
30574 + atomic_unchecked_t sample_lost_no_mm;
30575 + atomic_unchecked_t sample_lost_no_mapping;
30576 + atomic_unchecked_t bt_lost_no_mapping;
30577 + atomic_unchecked_t event_lost_overflow;
30578 + atomic_unchecked_t multiplex_counter;
30579 };
30580
30581 extern struct oprofile_stat_struct oprofile_stats;
30582 diff -urNp linux-2.6.39.4/drivers/parport/procfs.c linux-2.6.39.4/drivers/parport/procfs.c
30583 --- linux-2.6.39.4/drivers/parport/procfs.c 2011-05-19 00:06:34.000000000 -0400
30584 +++ linux-2.6.39.4/drivers/parport/procfs.c 2011-08-05 19:44:37.000000000 -0400
30585 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
30586
30587 *ppos += len;
30588
30589 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
30590 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
30591 }
30592
30593 #ifdef CONFIG_PARPORT_1284
30594 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
30595
30596 *ppos += len;
30597
30598 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
30599 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
30600 }
30601 #endif /* IEEE1284.3 support. */
30602
30603 diff -urNp linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h
30604 --- linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h 2011-05-19 00:06:34.000000000 -0400
30605 +++ linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h 2011-08-05 20:34:06.000000000 -0400
30606 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
30607 int (*hardware_test) (struct slot* slot, u32 value);
30608 u8 (*get_power) (struct slot* slot);
30609 int (*set_power) (struct slot* slot, int value);
30610 -};
30611 +} __no_const;
30612
30613 struct cpci_hp_controller {
30614 unsigned int irq;
30615 diff -urNp linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c
30616 --- linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-05-19 00:06:34.000000000 -0400
30617 +++ linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-08-05 19:44:37.000000000 -0400
30618 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
30619
30620 void compaq_nvram_init (void __iomem *rom_start)
30621 {
30622 +
30623 +#ifndef CONFIG_PAX_KERNEXEC
30624 if (rom_start) {
30625 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
30626 }
30627 +#endif
30628 +
30629 dbg("int15 entry = %p\n", compaq_int15_entry_point);
30630
30631 /* initialize our int15 lock */
30632 diff -urNp linux-2.6.39.4/drivers/pci/pcie/aspm.c linux-2.6.39.4/drivers/pci/pcie/aspm.c
30633 --- linux-2.6.39.4/drivers/pci/pcie/aspm.c 2011-05-19 00:06:34.000000000 -0400
30634 +++ linux-2.6.39.4/drivers/pci/pcie/aspm.c 2011-08-05 19:44:37.000000000 -0400
30635 @@ -27,9 +27,9 @@
30636 #define MODULE_PARAM_PREFIX "pcie_aspm."
30637
30638 /* Note: those are not register definitions */
30639 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
30640 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
30641 -#define ASPM_STATE_L1 (4) /* L1 state */
30642 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
30643 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
30644 +#define ASPM_STATE_L1 (4U) /* L1 state */
30645 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
30646 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
30647
30648 diff -urNp linux-2.6.39.4/drivers/pci/probe.c linux-2.6.39.4/drivers/pci/probe.c
30649 --- linux-2.6.39.4/drivers/pci/probe.c 2011-05-19 00:06:34.000000000 -0400
30650 +++ linux-2.6.39.4/drivers/pci/probe.c 2011-08-05 20:34:06.000000000 -0400
30651 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
30652 return ret;
30653 }
30654
30655 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
30656 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
30657 struct device_attribute *attr,
30658 char *buf)
30659 {
30660 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
30661 }
30662
30663 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
30664 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
30665 struct device_attribute *attr,
30666 char *buf)
30667 {
30668 @@ -165,7 +165,7 @@ int __pci_read_base(struct pci_dev *dev,
30669 u32 l, sz, mask;
30670 u16 orig_cmd;
30671
30672 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
30673 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
30674
30675 if (!dev->mmio_always_on) {
30676 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
30677 diff -urNp linux-2.6.39.4/drivers/pci/proc.c linux-2.6.39.4/drivers/pci/proc.c
30678 --- linux-2.6.39.4/drivers/pci/proc.c 2011-05-19 00:06:34.000000000 -0400
30679 +++ linux-2.6.39.4/drivers/pci/proc.c 2011-08-05 19:44:37.000000000 -0400
30680 @@ -476,7 +476,16 @@ static const struct file_operations proc
30681 static int __init pci_proc_init(void)
30682 {
30683 struct pci_dev *dev = NULL;
30684 +
30685 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
30686 +#ifdef CONFIG_GRKERNSEC_PROC_USER
30687 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
30688 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
30689 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
30690 +#endif
30691 +#else
30692 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
30693 +#endif
30694 proc_create("devices", 0, proc_bus_pci_dir,
30695 &proc_bus_pci_dev_operations);
30696 proc_initialized = 1;
30697 diff -urNp linux-2.6.39.4/drivers/pci/xen-pcifront.c linux-2.6.39.4/drivers/pci/xen-pcifront.c
30698 --- linux-2.6.39.4/drivers/pci/xen-pcifront.c 2011-05-19 00:06:34.000000000 -0400
30699 +++ linux-2.6.39.4/drivers/pci/xen-pcifront.c 2011-08-05 20:34:06.000000000 -0400
30700 @@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
30701 struct pcifront_sd *sd = bus->sysdata;
30702 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30703
30704 + pax_track_stack();
30705 +
30706 if (verbose_request)
30707 dev_info(&pdev->xdev->dev,
30708 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
30709 @@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
30710 struct pcifront_sd *sd = bus->sysdata;
30711 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30712
30713 + pax_track_stack();
30714 +
30715 if (verbose_request)
30716 dev_info(&pdev->xdev->dev,
30717 "write dev=%04x:%02x:%02x.%01x - "
30718 @@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
30719 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30720 struct msi_desc *entry;
30721
30722 + pax_track_stack();
30723 +
30724 if (nvec > SH_INFO_MAX_VEC) {
30725 dev_err(&dev->dev, "too much vector for pci frontend: %x."
30726 " Increase SH_INFO_MAX_VEC.\n", nvec);
30727 @@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
30728 struct pcifront_sd *sd = dev->bus->sysdata;
30729 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30730
30731 + pax_track_stack();
30732 +
30733 err = do_pci_op(pdev, &op);
30734
30735 /* What should do for error ? */
30736 @@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
30737 struct pcifront_sd *sd = dev->bus->sysdata;
30738 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30739
30740 + pax_track_stack();
30741 +
30742 err = do_pci_op(pdev, &op);
30743 if (likely(!err)) {
30744 vector[0] = op.value;
30745 diff -urNp linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c
30746 --- linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c 2011-05-19 00:06:34.000000000 -0400
30747 +++ linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c 2011-08-05 20:34:06.000000000 -0400
30748 @@ -2109,7 +2109,7 @@ static int hotkey_mask_get(void)
30749 return 0;
30750 }
30751
30752 -void static hotkey_mask_warn_incomplete_mask(void)
30753 +static void hotkey_mask_warn_incomplete_mask(void)
30754 {
30755 /* log only what the user can fix... */
30756 const u32 wantedmask = hotkey_driver_mask &
30757 diff -urNp linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c
30758 --- linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c 2011-05-19 00:06:34.000000000 -0400
30759 +++ linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c 2011-08-05 19:44:37.000000000 -0400
30760 @@ -59,7 +59,7 @@ do { \
30761 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
30762 } while(0)
30763
30764 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
30765 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
30766 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
30767
30768 /*
30769 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
30770
30771 cpu = get_cpu();
30772 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
30773 +
30774 + pax_open_kernel();
30775 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
30776 + pax_close_kernel();
30777
30778 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
30779 spin_lock_irqsave(&pnp_bios_lock, flags);
30780 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
30781 :"memory");
30782 spin_unlock_irqrestore(&pnp_bios_lock, flags);
30783
30784 + pax_open_kernel();
30785 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
30786 + pax_close_kernel();
30787 +
30788 put_cpu();
30789
30790 /* If we get here and this is set then the PnP BIOS faulted on us. */
30791 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
30792 return status;
30793 }
30794
30795 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
30796 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
30797 {
30798 int i;
30799
30800 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
30801 pnp_bios_callpoint.offset = header->fields.pm16offset;
30802 pnp_bios_callpoint.segment = PNP_CS16;
30803
30804 + pax_open_kernel();
30805 +
30806 for_each_possible_cpu(i) {
30807 struct desc_struct *gdt = get_cpu_gdt_table(i);
30808 if (!gdt)
30809 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
30810 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
30811 (unsigned long)__va(header->fields.pm16dseg));
30812 }
30813 +
30814 + pax_close_kernel();
30815 }
30816 diff -urNp linux-2.6.39.4/drivers/pnp/resource.c linux-2.6.39.4/drivers/pnp/resource.c
30817 --- linux-2.6.39.4/drivers/pnp/resource.c 2011-05-19 00:06:34.000000000 -0400
30818 +++ linux-2.6.39.4/drivers/pnp/resource.c 2011-08-05 19:44:37.000000000 -0400
30819 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
30820 return 1;
30821
30822 /* check if the resource is valid */
30823 - if (*irq < 0 || *irq > 15)
30824 + if (*irq > 15)
30825 return 0;
30826
30827 /* check if the resource is reserved */
30828 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
30829 return 1;
30830
30831 /* check if the resource is valid */
30832 - if (*dma < 0 || *dma == 4 || *dma > 7)
30833 + if (*dma == 4 || *dma > 7)
30834 return 0;
30835
30836 /* check if the resource is reserved */
30837 diff -urNp linux-2.6.39.4/drivers/power/bq27x00_battery.c linux-2.6.39.4/drivers/power/bq27x00_battery.c
30838 --- linux-2.6.39.4/drivers/power/bq27x00_battery.c 2011-05-19 00:06:34.000000000 -0400
30839 +++ linux-2.6.39.4/drivers/power/bq27x00_battery.c 2011-08-05 20:34:06.000000000 -0400
30840 @@ -66,7 +66,7 @@
30841 struct bq27x00_device_info;
30842 struct bq27x00_access_methods {
30843 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
30844 -};
30845 +} __no_const;
30846
30847 enum bq27x00_chip { BQ27000, BQ27500 };
30848
30849 diff -urNp linux-2.6.39.4/drivers/regulator/max8660.c linux-2.6.39.4/drivers/regulator/max8660.c
30850 --- linux-2.6.39.4/drivers/regulator/max8660.c 2011-05-19 00:06:34.000000000 -0400
30851 +++ linux-2.6.39.4/drivers/regulator/max8660.c 2011-08-05 20:34:06.000000000 -0400
30852 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
30853 max8660->shadow_regs[MAX8660_OVER1] = 5;
30854 } else {
30855 /* Otherwise devices can be toggled via software */
30856 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
30857 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
30858 + pax_open_kernel();
30859 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
30860 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
30861 + pax_close_kernel();
30862 }
30863
30864 /*
30865 diff -urNp linux-2.6.39.4/drivers/regulator/mc13892-regulator.c linux-2.6.39.4/drivers/regulator/mc13892-regulator.c
30866 --- linux-2.6.39.4/drivers/regulator/mc13892-regulator.c 2011-05-19 00:06:34.000000000 -0400
30867 +++ linux-2.6.39.4/drivers/regulator/mc13892-regulator.c 2011-08-05 20:34:06.000000000 -0400
30868 @@ -560,10 +560,12 @@ static int __devinit mc13892_regulator_p
30869 }
30870 mc13xxx_unlock(mc13892);
30871
30872 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30873 + pax_open_kernel();
30874 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30875 = mc13892_vcam_set_mode;
30876 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30877 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30878 = mc13892_vcam_get_mode;
30879 + pax_close_kernel();
30880 for (i = 0; i < pdata->num_regulators; i++) {
30881 init_data = &pdata->regulators[i];
30882 priv->regulators[i] = regulator_register(
30883 diff -urNp linux-2.6.39.4/drivers/rtc/rtc-dev.c linux-2.6.39.4/drivers/rtc/rtc-dev.c
30884 --- linux-2.6.39.4/drivers/rtc/rtc-dev.c 2011-05-19 00:06:34.000000000 -0400
30885 +++ linux-2.6.39.4/drivers/rtc/rtc-dev.c 2011-08-05 19:44:37.000000000 -0400
30886 @@ -14,6 +14,7 @@
30887 #include <linux/module.h>
30888 #include <linux/rtc.h>
30889 #include <linux/sched.h>
30890 +#include <linux/grsecurity.h>
30891 #include "rtc-core.h"
30892
30893 static dev_t rtc_devt;
30894 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
30895 if (copy_from_user(&tm, uarg, sizeof(tm)))
30896 return -EFAULT;
30897
30898 + gr_log_timechange();
30899 +
30900 return rtc_set_time(rtc, &tm);
30901
30902 case RTC_PIE_ON:
30903 diff -urNp linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h
30904 --- linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h 2011-05-19 00:06:34.000000000 -0400
30905 +++ linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h 2011-08-05 20:34:06.000000000 -0400
30906 @@ -492,7 +492,7 @@ struct adapter_ops
30907 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
30908 /* Administrative operations */
30909 int (*adapter_comm)(struct aac_dev * dev, int comm);
30910 -};
30911 +} __no_const;
30912
30913 /*
30914 * Define which interrupt handler needs to be installed
30915 diff -urNp linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c
30916 --- linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c 2011-05-19 00:06:34.000000000 -0400
30917 +++ linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c 2011-08-05 19:44:37.000000000 -0400
30918 @@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
30919 u32 actual_fibsize64, actual_fibsize = 0;
30920 int i;
30921
30922 + pax_track_stack();
30923
30924 if (dev->in_reset) {
30925 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
30926 diff -urNp linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c
30927 --- linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c 2011-05-19 00:06:34.000000000 -0400
30928 +++ linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c 2011-08-05 19:44:37.000000000 -0400
30929 @@ -486,7 +486,7 @@ static ssize_t asd_show_update_bios(stru
30930 flash_error_table[i].reason);
30931 }
30932
30933 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
30934 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
30935 asd_show_update_bios, asd_store_update_bios);
30936
30937 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
30938 diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfad.c linux-2.6.39.4/drivers/scsi/bfa/bfad.c
30939 --- linux-2.6.39.4/drivers/scsi/bfa/bfad.c 2011-05-19 00:06:34.000000000 -0400
30940 +++ linux-2.6.39.4/drivers/scsi/bfa/bfad.c 2011-08-05 19:44:37.000000000 -0400
30941 @@ -1027,6 +1027,8 @@ bfad_start_ops(struct bfad_s *bfad) {
30942 struct bfad_vport_s *vport, *vport_new;
30943 struct bfa_fcs_driver_info_s driver_info;
30944
30945 + pax_track_stack();
30946 +
30947 /* Fill the driver_info info to fcs*/
30948 memset(&driver_info, 0, sizeof(driver_info));
30949 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
30950 diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c
30951 --- linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-05-19 00:06:34.000000000 -0400
30952 +++ linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-08-05 19:44:37.000000000 -0400
30953 @@ -1559,6 +1559,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
30954 u16 len, count;
30955 u16 templen;
30956
30957 + pax_track_stack();
30958 +
30959 /*
30960 * get hba attributes
30961 */
30962 @@ -1836,6 +1838,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
30963 u8 count = 0;
30964 u16 templen;
30965
30966 + pax_track_stack();
30967 +
30968 /*
30969 * get port attributes
30970 */
30971 diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c
30972 --- linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-05-19 00:06:34.000000000 -0400
30973 +++ linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-08-05 19:44:37.000000000 -0400
30974 @@ -1844,6 +1844,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
30975 struct fc_rpsc_speed_info_s speeds;
30976 struct bfa_port_attr_s pport_attr;
30977
30978 + pax_track_stack();
30979 +
30980 bfa_trc(port->fcs, rx_fchs->s_id);
30981 bfa_trc(port->fcs, rx_fchs->d_id);
30982
30983 diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa.h linux-2.6.39.4/drivers/scsi/bfa/bfa.h
30984 --- linux-2.6.39.4/drivers/scsi/bfa/bfa.h 2011-05-19 00:06:34.000000000 -0400
30985 +++ linux-2.6.39.4/drivers/scsi/bfa/bfa.h 2011-08-05 20:34:06.000000000 -0400
30986 @@ -238,7 +238,7 @@ struct bfa_hwif_s {
30987 u32 *nvecs, u32 *maxvec);
30988 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
30989 u32 *end);
30990 -};
30991 +} __no_const;
30992 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
30993
30994 struct bfa_iocfc_s {
30995 diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h
30996 --- linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h 2011-05-19 00:06:34.000000000 -0400
30997 +++ linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h 2011-08-05 20:34:06.000000000 -0400
30998 @@ -196,7 +196,7 @@ struct bfa_ioc_cbfn_s {
30999 bfa_ioc_disable_cbfn_t disable_cbfn;
31000 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
31001 bfa_ioc_reset_cbfn_t reset_cbfn;
31002 -};
31003 +} __no_const;
31004
31005 /*
31006 * Heartbeat failure notification queue element.
31007 @@ -267,7 +267,7 @@ struct bfa_ioc_hwif_s {
31008 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
31009 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
31010 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
31011 -};
31012 +} __no_const;
31013
31014 #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
31015 #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
31016 diff -urNp linux-2.6.39.4/drivers/scsi/BusLogic.c linux-2.6.39.4/drivers/scsi/BusLogic.c
31017 --- linux-2.6.39.4/drivers/scsi/BusLogic.c 2011-05-19 00:06:34.000000000 -0400
31018 +++ linux-2.6.39.4/drivers/scsi/BusLogic.c 2011-08-05 19:44:37.000000000 -0400
31019 @@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
31020 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
31021 *PrototypeHostAdapter)
31022 {
31023 + pax_track_stack();
31024 +
31025 /*
31026 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
31027 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
31028 diff -urNp linux-2.6.39.4/drivers/scsi/dpt_i2o.c linux-2.6.39.4/drivers/scsi/dpt_i2o.c
31029 --- linux-2.6.39.4/drivers/scsi/dpt_i2o.c 2011-05-19 00:06:34.000000000 -0400
31030 +++ linux-2.6.39.4/drivers/scsi/dpt_i2o.c 2011-08-05 19:44:37.000000000 -0400
31031 @@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
31032 dma_addr_t addr;
31033 ulong flags = 0;
31034
31035 + pax_track_stack();
31036 +
31037 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
31038 // get user msg size in u32s
31039 if(get_user(size, &user_msg[0])){
31040 @@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
31041 s32 rcode;
31042 dma_addr_t addr;
31043
31044 + pax_track_stack();
31045 +
31046 memset(msg, 0 , sizeof(msg));
31047 len = scsi_bufflen(cmd);
31048 direction = 0x00000000;
31049 diff -urNp linux-2.6.39.4/drivers/scsi/eata.c linux-2.6.39.4/drivers/scsi/eata.c
31050 --- linux-2.6.39.4/drivers/scsi/eata.c 2011-05-19 00:06:34.000000000 -0400
31051 +++ linux-2.6.39.4/drivers/scsi/eata.c 2011-08-05 19:44:37.000000000 -0400
31052 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
31053 struct hostdata *ha;
31054 char name[16];
31055
31056 + pax_track_stack();
31057 +
31058 sprintf(name, "%s%d", driver_name, j);
31059
31060 if (!request_region(port_base, REGION_SIZE, driver_name)) {
31061 diff -urNp linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c
31062 --- linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-05-19 00:06:34.000000000 -0400
31063 +++ linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-08-05 20:34:06.000000000 -0400
31064 @@ -2458,6 +2458,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
31065 } buf;
31066 int rc;
31067
31068 + pax_track_stack();
31069 +
31070 fiph = (struct fip_header *)skb->data;
31071 sub = fiph->fip_subcode;
31072
31073 diff -urNp linux-2.6.39.4/drivers/scsi/gdth.c linux-2.6.39.4/drivers/scsi/gdth.c
31074 --- linux-2.6.39.4/drivers/scsi/gdth.c 2011-05-19 00:06:34.000000000 -0400
31075 +++ linux-2.6.39.4/drivers/scsi/gdth.c 2011-08-05 19:44:37.000000000 -0400
31076 @@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
31077 unsigned long flags;
31078 gdth_ha_str *ha;
31079
31080 + pax_track_stack();
31081 +
31082 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
31083 return -EFAULT;
31084 ha = gdth_find_ha(ldrv.ionode);
31085 @@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
31086 gdth_ha_str *ha;
31087 int rval;
31088
31089 + pax_track_stack();
31090 +
31091 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
31092 res.number >= MAX_HDRIVES)
31093 return -EFAULT;
31094 @@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
31095 gdth_ha_str *ha;
31096 int rval;
31097
31098 + pax_track_stack();
31099 +
31100 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
31101 return -EFAULT;
31102 ha = gdth_find_ha(gen.ionode);
31103 @@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
31104 int i;
31105 gdth_cmd_str gdtcmd;
31106 char cmnd[MAX_COMMAND_SIZE];
31107 +
31108 + pax_track_stack();
31109 +
31110 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
31111
31112 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
31113 diff -urNp linux-2.6.39.4/drivers/scsi/gdth_proc.c linux-2.6.39.4/drivers/scsi/gdth_proc.c
31114 --- linux-2.6.39.4/drivers/scsi/gdth_proc.c 2011-05-19 00:06:34.000000000 -0400
31115 +++ linux-2.6.39.4/drivers/scsi/gdth_proc.c 2011-08-05 19:44:37.000000000 -0400
31116 @@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
31117 u64 paddr;
31118
31119 char cmnd[MAX_COMMAND_SIZE];
31120 +
31121 + pax_track_stack();
31122 +
31123 memset(cmnd, 0xff, 12);
31124 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
31125
31126 @@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
31127 gdth_hget_str *phg;
31128 char cmnd[MAX_COMMAND_SIZE];
31129
31130 + pax_track_stack();
31131 +
31132 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
31133 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
31134 if (!gdtcmd || !estr)
31135 diff -urNp linux-2.6.39.4/drivers/scsi/hosts.c linux-2.6.39.4/drivers/scsi/hosts.c
31136 --- linux-2.6.39.4/drivers/scsi/hosts.c 2011-05-19 00:06:34.000000000 -0400
31137 +++ linux-2.6.39.4/drivers/scsi/hosts.c 2011-08-05 19:44:37.000000000 -0400
31138 @@ -42,7 +42,7 @@
31139 #include "scsi_logging.h"
31140
31141
31142 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
31143 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
31144
31145
31146 static void scsi_host_cls_release(struct device *dev)
31147 @@ -354,7 +354,7 @@ struct Scsi_Host *scsi_host_alloc(struct
31148 * subtract one because we increment first then return, but we need to
31149 * know what the next host number was before increment
31150 */
31151 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
31152 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
31153 shost->dma_channel = 0xff;
31154
31155 /* These three are default values which can be overridden */
31156 diff -urNp linux-2.6.39.4/drivers/scsi/hpsa.c linux-2.6.39.4/drivers/scsi/hpsa.c
31157 --- linux-2.6.39.4/drivers/scsi/hpsa.c 2011-05-19 00:06:34.000000000 -0400
31158 +++ linux-2.6.39.4/drivers/scsi/hpsa.c 2011-08-05 20:34:06.000000000 -0400
31159 @@ -469,7 +469,7 @@ static inline u32 next_command(struct ct
31160 u32 a;
31161
31162 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
31163 - return h->access.command_completed(h);
31164 + return h->access->command_completed(h);
31165
31166 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
31167 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
31168 @@ -2889,7 +2889,7 @@ static void start_io(struct ctlr_info *h
31169 while (!list_empty(&h->reqQ)) {
31170 c = list_entry(h->reqQ.next, struct CommandList, list);
31171 /* can't do anything if fifo is full */
31172 - if ((h->access.fifo_full(h))) {
31173 + if ((h->access->fifo_full(h))) {
31174 dev_warn(&h->pdev->dev, "fifo full\n");
31175 break;
31176 }
31177 @@ -2899,7 +2899,7 @@ static void start_io(struct ctlr_info *h
31178 h->Qdepth--;
31179
31180 /* Tell the controller execute command */
31181 - h->access.submit_command(h, c);
31182 + h->access->submit_command(h, c);
31183
31184 /* Put job onto the completed Q */
31185 addQ(&h->cmpQ, c);
31186 @@ -2908,17 +2908,17 @@ static void start_io(struct ctlr_info *h
31187
31188 static inline unsigned long get_next_completion(struct ctlr_info *h)
31189 {
31190 - return h->access.command_completed(h);
31191 + return h->access->command_completed(h);
31192 }
31193
31194 static inline bool interrupt_pending(struct ctlr_info *h)
31195 {
31196 - return h->access.intr_pending(h);
31197 + return h->access->intr_pending(h);
31198 }
31199
31200 static inline long interrupt_not_for_us(struct ctlr_info *h)
31201 {
31202 - return (h->access.intr_pending(h) == 0) ||
31203 + return (h->access->intr_pending(h) == 0) ||
31204 (h->interrupts_enabled == 0);
31205 }
31206
31207 @@ -3684,7 +3684,7 @@ static int __devinit hpsa_pci_init(struc
31208 if (prod_index < 0)
31209 return -ENODEV;
31210 h->product_name = products[prod_index].product_name;
31211 - h->access = *(products[prod_index].access);
31212 + h->access = products[prod_index].access;
31213
31214 if (hpsa_board_disabled(h->pdev)) {
31215 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
31216 @@ -3845,7 +3845,7 @@ static int __devinit hpsa_init_one(struc
31217 }
31218
31219 /* make sure the board interrupts are off */
31220 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31221 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31222
31223 if (h->msix_vector || h->msi_vector)
31224 rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_msi,
31225 @@ -3892,7 +3892,7 @@ static int __devinit hpsa_init_one(struc
31226 hpsa_scsi_setup(h);
31227
31228 /* Turn the interrupts on so we can service requests */
31229 - h->access.set_intr_mask(h, HPSA_INTR_ON);
31230 + h->access->set_intr_mask(h, HPSA_INTR_ON);
31231
31232 hpsa_put_ctlr_into_performant_mode(h);
31233 hpsa_hba_inquiry(h);
31234 @@ -3955,7 +3955,7 @@ static void hpsa_shutdown(struct pci_dev
31235 * To write all data in the battery backed cache to disks
31236 */
31237 hpsa_flush_cache(h);
31238 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31239 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31240 free_irq(h->intr[h->intr_mode], h);
31241 #ifdef CONFIG_PCI_MSI
31242 if (h->msix_vector)
31243 @@ -4118,7 +4118,7 @@ static __devinit void hpsa_enter_perform
31244 return;
31245 }
31246 /* Change the access methods to the performant access methods */
31247 - h->access = SA5_performant_access;
31248 + h->access = &SA5_performant_access;
31249 h->transMethod = CFGTBL_Trans_Performant;
31250 }
31251
31252 diff -urNp linux-2.6.39.4/drivers/scsi/hpsa.h linux-2.6.39.4/drivers/scsi/hpsa.h
31253 --- linux-2.6.39.4/drivers/scsi/hpsa.h 2011-05-19 00:06:34.000000000 -0400
31254 +++ linux-2.6.39.4/drivers/scsi/hpsa.h 2011-08-05 20:34:06.000000000 -0400
31255 @@ -73,7 +73,7 @@ struct ctlr_info {
31256 unsigned int msix_vector;
31257 unsigned int msi_vector;
31258 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
31259 - struct access_method access;
31260 + struct access_method *access;
31261
31262 /* queue and queue Info */
31263 struct list_head reqQ;
31264 diff -urNp linux-2.6.39.4/drivers/scsi/ips.h linux-2.6.39.4/drivers/scsi/ips.h
31265 --- linux-2.6.39.4/drivers/scsi/ips.h 2011-05-19 00:06:34.000000000 -0400
31266 +++ linux-2.6.39.4/drivers/scsi/ips.h 2011-08-05 20:34:06.000000000 -0400
31267 @@ -1027,7 +1027,7 @@ typedef struct {
31268 int (*intr)(struct ips_ha *);
31269 void (*enableint)(struct ips_ha *);
31270 uint32_t (*statupd)(struct ips_ha *);
31271 -} ips_hw_func_t;
31272 +} __no_const ips_hw_func_t;
31273
31274 typedef struct ips_ha {
31275 uint8_t ha_id[IPS_MAX_CHANNELS+1];
31276 diff -urNp linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c
31277 --- linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c 2011-05-19 00:06:34.000000000 -0400
31278 +++ linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c 2011-08-05 19:44:37.000000000 -0400
31279 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
31280 * all together if not used XXX
31281 */
31282 struct {
31283 - atomic_t no_free_exch;
31284 - atomic_t no_free_exch_xid;
31285 - atomic_t xid_not_found;
31286 - atomic_t xid_busy;
31287 - atomic_t seq_not_found;
31288 - atomic_t non_bls_resp;
31289 + atomic_unchecked_t no_free_exch;
31290 + atomic_unchecked_t no_free_exch_xid;
31291 + atomic_unchecked_t xid_not_found;
31292 + atomic_unchecked_t xid_busy;
31293 + atomic_unchecked_t seq_not_found;
31294 + atomic_unchecked_t non_bls_resp;
31295 } stats;
31296 };
31297
31298 @@ -700,7 +700,7 @@ static struct fc_exch *fc_exch_em_alloc(
31299 /* allocate memory for exchange */
31300 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
31301 if (!ep) {
31302 - atomic_inc(&mp->stats.no_free_exch);
31303 + atomic_inc_unchecked(&mp->stats.no_free_exch);
31304 goto out;
31305 }
31306 memset(ep, 0, sizeof(*ep));
31307 @@ -761,7 +761,7 @@ out:
31308 return ep;
31309 err:
31310 spin_unlock_bh(&pool->lock);
31311 - atomic_inc(&mp->stats.no_free_exch_xid);
31312 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
31313 mempool_free(ep, mp->ep_pool);
31314 return NULL;
31315 }
31316 @@ -906,7 +906,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31317 xid = ntohs(fh->fh_ox_id); /* we originated exch */
31318 ep = fc_exch_find(mp, xid);
31319 if (!ep) {
31320 - atomic_inc(&mp->stats.xid_not_found);
31321 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31322 reject = FC_RJT_OX_ID;
31323 goto out;
31324 }
31325 @@ -936,7 +936,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31326 ep = fc_exch_find(mp, xid);
31327 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
31328 if (ep) {
31329 - atomic_inc(&mp->stats.xid_busy);
31330 + atomic_inc_unchecked(&mp->stats.xid_busy);
31331 reject = FC_RJT_RX_ID;
31332 goto rel;
31333 }
31334 @@ -947,7 +947,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31335 }
31336 xid = ep->xid; /* get our XID */
31337 } else if (!ep) {
31338 - atomic_inc(&mp->stats.xid_not_found);
31339 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31340 reject = FC_RJT_RX_ID; /* XID not found */
31341 goto out;
31342 }
31343 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31344 } else {
31345 sp = &ep->seq;
31346 if (sp->id != fh->fh_seq_id) {
31347 - atomic_inc(&mp->stats.seq_not_found);
31348 + atomic_inc_unchecked(&mp->stats.seq_not_found);
31349 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
31350 goto rel;
31351 }
31352 @@ -1392,22 +1392,22 @@ static void fc_exch_recv_seq_resp(struct
31353
31354 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
31355 if (!ep) {
31356 - atomic_inc(&mp->stats.xid_not_found);
31357 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31358 goto out;
31359 }
31360 if (ep->esb_stat & ESB_ST_COMPLETE) {
31361 - atomic_inc(&mp->stats.xid_not_found);
31362 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31363 goto rel;
31364 }
31365 if (ep->rxid == FC_XID_UNKNOWN)
31366 ep->rxid = ntohs(fh->fh_rx_id);
31367 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
31368 - atomic_inc(&mp->stats.xid_not_found);
31369 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31370 goto rel;
31371 }
31372 if (ep->did != ntoh24(fh->fh_s_id) &&
31373 ep->did != FC_FID_FLOGI) {
31374 - atomic_inc(&mp->stats.xid_not_found);
31375 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31376 goto rel;
31377 }
31378 sof = fr_sof(fp);
31379 @@ -1416,7 +1416,7 @@ static void fc_exch_recv_seq_resp(struct
31380 sp->ssb_stat |= SSB_ST_RESP;
31381 sp->id = fh->fh_seq_id;
31382 } else if (sp->id != fh->fh_seq_id) {
31383 - atomic_inc(&mp->stats.seq_not_found);
31384 + atomic_inc_unchecked(&mp->stats.seq_not_found);
31385 goto rel;
31386 }
31387
31388 @@ -1479,9 +1479,9 @@ static void fc_exch_recv_resp(struct fc_
31389 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
31390
31391 if (!sp)
31392 - atomic_inc(&mp->stats.xid_not_found);
31393 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31394 else
31395 - atomic_inc(&mp->stats.non_bls_resp);
31396 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
31397
31398 fc_frame_free(fp);
31399 }
31400 diff -urNp linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c
31401 --- linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c 2011-05-19 00:06:34.000000000 -0400
31402 +++ linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c 2011-08-05 20:34:06.000000000 -0400
31403 @@ -314,7 +314,7 @@ static struct ata_port_operations sas_sa
31404 .postreset = ata_std_postreset,
31405 .error_handler = ata_std_error_handler,
31406 .post_internal_cmd = sas_ata_post_internal,
31407 - .qc_defer = ata_std_qc_defer,
31408 + .qc_defer = ata_std_qc_defer,
31409 .qc_prep = ata_noop_qc_prep,
31410 .qc_issue = sas_ata_qc_issue,
31411 .qc_fill_rtf = sas_ata_qc_fill_rtf,
31412 diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c
31413 --- linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-19 00:06:34.000000000 -0400
31414 +++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-08-05 19:44:37.000000000 -0400
31415 @@ -104,7 +104,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
31416
31417 #include <linux/debugfs.h>
31418
31419 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31420 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31421 static unsigned long lpfc_debugfs_start_time = 0L;
31422
31423 /* iDiag */
31424 @@ -141,7 +141,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
31425 lpfc_debugfs_enable = 0;
31426
31427 len = 0;
31428 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
31429 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
31430 (lpfc_debugfs_max_disc_trc - 1);
31431 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
31432 dtp = vport->disc_trc + i;
31433 @@ -202,7 +202,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
31434 lpfc_debugfs_enable = 0;
31435
31436 len = 0;
31437 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
31438 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
31439 (lpfc_debugfs_max_slow_ring_trc - 1);
31440 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
31441 dtp = phba->slow_ring_trc + i;
31442 @@ -380,6 +380,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
31443 uint32_t *ptr;
31444 char buffer[1024];
31445
31446 + pax_track_stack();
31447 +
31448 off = 0;
31449 spin_lock_irq(&phba->hbalock);
31450
31451 @@ -617,14 +619,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
31452 !vport || !vport->disc_trc)
31453 return;
31454
31455 - index = atomic_inc_return(&vport->disc_trc_cnt) &
31456 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
31457 (lpfc_debugfs_max_disc_trc - 1);
31458 dtp = vport->disc_trc + index;
31459 dtp->fmt = fmt;
31460 dtp->data1 = data1;
31461 dtp->data2 = data2;
31462 dtp->data3 = data3;
31463 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31464 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31465 dtp->jif = jiffies;
31466 #endif
31467 return;
31468 @@ -655,14 +657,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
31469 !phba || !phba->slow_ring_trc)
31470 return;
31471
31472 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
31473 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
31474 (lpfc_debugfs_max_slow_ring_trc - 1);
31475 dtp = phba->slow_ring_trc + index;
31476 dtp->fmt = fmt;
31477 dtp->data1 = data1;
31478 dtp->data2 = data2;
31479 dtp->data3 = data3;
31480 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31481 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31482 dtp->jif = jiffies;
31483 #endif
31484 return;
31485 @@ -2145,7 +2147,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31486 "slow_ring buffer\n");
31487 goto debug_failed;
31488 }
31489 - atomic_set(&phba->slow_ring_trc_cnt, 0);
31490 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
31491 memset(phba->slow_ring_trc, 0,
31492 (sizeof(struct lpfc_debugfs_trc) *
31493 lpfc_debugfs_max_slow_ring_trc));
31494 @@ -2191,7 +2193,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31495 "buffer\n");
31496 goto debug_failed;
31497 }
31498 - atomic_set(&vport->disc_trc_cnt, 0);
31499 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
31500
31501 snprintf(name, sizeof(name), "discovery_trace");
31502 vport->debug_disc_trc =
31503 diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h
31504 --- linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h 2011-05-19 00:06:34.000000000 -0400
31505 +++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h 2011-08-05 19:44:37.000000000 -0400
31506 @@ -419,7 +419,7 @@ struct lpfc_vport {
31507 struct dentry *debug_nodelist;
31508 struct dentry *vport_debugfs_root;
31509 struct lpfc_debugfs_trc *disc_trc;
31510 - atomic_t disc_trc_cnt;
31511 + atomic_unchecked_t disc_trc_cnt;
31512 #endif
31513 uint8_t stat_data_enabled;
31514 uint8_t stat_data_blocked;
31515 @@ -785,8 +785,8 @@ struct lpfc_hba {
31516 struct timer_list fabric_block_timer;
31517 unsigned long bit_flags;
31518 #define FABRIC_COMANDS_BLOCKED 0
31519 - atomic_t num_rsrc_err;
31520 - atomic_t num_cmd_success;
31521 + atomic_unchecked_t num_rsrc_err;
31522 + atomic_unchecked_t num_cmd_success;
31523 unsigned long last_rsrc_error_time;
31524 unsigned long last_ramp_down_time;
31525 unsigned long last_ramp_up_time;
31526 @@ -800,7 +800,7 @@ struct lpfc_hba {
31527 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
31528 struct dentry *debug_slow_ring_trc;
31529 struct lpfc_debugfs_trc *slow_ring_trc;
31530 - atomic_t slow_ring_trc_cnt;
31531 + atomic_unchecked_t slow_ring_trc_cnt;
31532 /* iDiag debugfs sub-directory */
31533 struct dentry *idiag_root;
31534 struct dentry *idiag_pci_cfg;
31535 diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c
31536 --- linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c 2011-05-19 00:06:34.000000000 -0400
31537 +++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c 2011-08-05 20:34:06.000000000 -0400
31538 @@ -9535,8 +9535,10 @@ lpfc_init(void)
31539 printk(LPFC_COPYRIGHT "\n");
31540
31541 if (lpfc_enable_npiv) {
31542 - lpfc_transport_functions.vport_create = lpfc_vport_create;
31543 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31544 + pax_open_kernel();
31545 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
31546 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31547 + pax_close_kernel();
31548 }
31549 lpfc_transport_template =
31550 fc_attach_transport(&lpfc_transport_functions);
31551 diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c
31552 --- linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-19 00:06:34.000000000 -0400
31553 +++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-08-05 19:44:37.000000000 -0400
31554 @@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
31555 uint32_t evt_posted;
31556
31557 spin_lock_irqsave(&phba->hbalock, flags);
31558 - atomic_inc(&phba->num_rsrc_err);
31559 + atomic_inc_unchecked(&phba->num_rsrc_err);
31560 phba->last_rsrc_error_time = jiffies;
31561
31562 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
31563 @@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
31564 unsigned long flags;
31565 struct lpfc_hba *phba = vport->phba;
31566 uint32_t evt_posted;
31567 - atomic_inc(&phba->num_cmd_success);
31568 + atomic_inc_unchecked(&phba->num_cmd_success);
31569
31570 if (vport->cfg_lun_queue_depth <= queue_depth)
31571 return;
31572 @@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31573 unsigned long num_rsrc_err, num_cmd_success;
31574 int i;
31575
31576 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
31577 - num_cmd_success = atomic_read(&phba->num_cmd_success);
31578 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
31579 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
31580
31581 vports = lpfc_create_vport_work_array(phba);
31582 if (vports != NULL)
31583 @@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31584 }
31585 }
31586 lpfc_destroy_vport_work_array(phba, vports);
31587 - atomic_set(&phba->num_rsrc_err, 0);
31588 - atomic_set(&phba->num_cmd_success, 0);
31589 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
31590 + atomic_set_unchecked(&phba->num_cmd_success, 0);
31591 }
31592
31593 /**
31594 @@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
31595 }
31596 }
31597 lpfc_destroy_vport_work_array(phba, vports);
31598 - atomic_set(&phba->num_rsrc_err, 0);
31599 - atomic_set(&phba->num_cmd_success, 0);
31600 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
31601 + atomic_set_unchecked(&phba->num_cmd_success, 0);
31602 }
31603
31604 /**
31605 diff -urNp linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c
31606 --- linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-19 00:06:34.000000000 -0400
31607 +++ linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-08-05 19:44:37.000000000 -0400
31608 @@ -3510,6 +3510,8 @@ megaraid_cmm_register(adapter_t *adapter
31609 int rval;
31610 int i;
31611
31612 + pax_track_stack();
31613 +
31614 // Allocate memory for the base list of scb for management module.
31615 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
31616
31617 diff -urNp linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c
31618 --- linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c 2011-05-19 00:06:34.000000000 -0400
31619 +++ linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c 2011-08-05 19:44:37.000000000 -0400
31620 @@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
31621 int nelem = ARRAY_SIZE(get_attrs), a = 0;
31622 int ret;
31623
31624 + pax_track_stack();
31625 +
31626 or = osd_start_request(od, GFP_KERNEL);
31627 if (!or)
31628 return -ENOMEM;
31629 diff -urNp linux-2.6.39.4/drivers/scsi/pmcraid.c linux-2.6.39.4/drivers/scsi/pmcraid.c
31630 --- linux-2.6.39.4/drivers/scsi/pmcraid.c 2011-05-19 00:06:34.000000000 -0400
31631 +++ linux-2.6.39.4/drivers/scsi/pmcraid.c 2011-08-05 19:44:37.000000000 -0400
31632 @@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
31633 res->scsi_dev = scsi_dev;
31634 scsi_dev->hostdata = res;
31635 res->change_detected = 0;
31636 - atomic_set(&res->read_failures, 0);
31637 - atomic_set(&res->write_failures, 0);
31638 + atomic_set_unchecked(&res->read_failures, 0);
31639 + atomic_set_unchecked(&res->write_failures, 0);
31640 rc = 0;
31641 }
31642 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
31643 @@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
31644
31645 /* If this was a SCSI read/write command keep count of errors */
31646 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
31647 - atomic_inc(&res->read_failures);
31648 + atomic_inc_unchecked(&res->read_failures);
31649 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
31650 - atomic_inc(&res->write_failures);
31651 + atomic_inc_unchecked(&res->write_failures);
31652
31653 if (!RES_IS_GSCSI(res->cfg_entry) &&
31654 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
31655 @@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
31656 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31657 * hrrq_id assigned here in queuecommand
31658 */
31659 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31660 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31661 pinstance->num_hrrq;
31662 cmd->cmd_done = pmcraid_io_done;
31663
31664 @@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
31665 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31666 * hrrq_id assigned here in queuecommand
31667 */
31668 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31669 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31670 pinstance->num_hrrq;
31671
31672 if (request_size) {
31673 @@ -4495,7 +4495,7 @@ static void pmcraid_worker_function(stru
31674
31675 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
31676 /* add resources only after host is added into system */
31677 - if (!atomic_read(&pinstance->expose_resources))
31678 + if (!atomic_read_unchecked(&pinstance->expose_resources))
31679 return;
31680
31681 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
31682 @@ -5329,8 +5329,8 @@ static int __devinit pmcraid_init_instan
31683 init_waitqueue_head(&pinstance->reset_wait_q);
31684
31685 atomic_set(&pinstance->outstanding_cmds, 0);
31686 - atomic_set(&pinstance->last_message_id, 0);
31687 - atomic_set(&pinstance->expose_resources, 0);
31688 + atomic_set_unchecked(&pinstance->last_message_id, 0);
31689 + atomic_set_unchecked(&pinstance->expose_resources, 0);
31690
31691 INIT_LIST_HEAD(&pinstance->free_res_q);
31692 INIT_LIST_HEAD(&pinstance->used_res_q);
31693 @@ -6045,7 +6045,7 @@ static int __devinit pmcraid_probe(
31694 /* Schedule worker thread to handle CCN and take care of adding and
31695 * removing devices to OS
31696 */
31697 - atomic_set(&pinstance->expose_resources, 1);
31698 + atomic_set_unchecked(&pinstance->expose_resources, 1);
31699 schedule_work(&pinstance->worker_q);
31700 return rc;
31701
31702 diff -urNp linux-2.6.39.4/drivers/scsi/pmcraid.h linux-2.6.39.4/drivers/scsi/pmcraid.h
31703 --- linux-2.6.39.4/drivers/scsi/pmcraid.h 2011-05-19 00:06:34.000000000 -0400
31704 +++ linux-2.6.39.4/drivers/scsi/pmcraid.h 2011-08-05 19:44:37.000000000 -0400
31705 @@ -750,7 +750,7 @@ struct pmcraid_instance {
31706 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
31707
31708 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
31709 - atomic_t last_message_id;
31710 + atomic_unchecked_t last_message_id;
31711
31712 /* configuration table */
31713 struct pmcraid_config_table *cfg_table;
31714 @@ -779,7 +779,7 @@ struct pmcraid_instance {
31715 atomic_t outstanding_cmds;
31716
31717 /* should add/delete resources to mid-layer now ?*/
31718 - atomic_t expose_resources;
31719 + atomic_unchecked_t expose_resources;
31720
31721
31722
31723 @@ -815,8 +815,8 @@ struct pmcraid_resource_entry {
31724 struct pmcraid_config_table_entry_ext cfg_entry_ext;
31725 };
31726 struct scsi_device *scsi_dev; /* Link scsi_device structure */
31727 - atomic_t read_failures; /* count of failed READ commands */
31728 - atomic_t write_failures; /* count of failed WRITE commands */
31729 + atomic_unchecked_t read_failures; /* count of failed READ commands */
31730 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
31731
31732 /* To indicate add/delete/modify during CCN */
31733 u8 change_detected;
31734 diff -urNp linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h
31735 --- linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h 2011-05-19 00:06:34.000000000 -0400
31736 +++ linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h 2011-08-05 20:34:06.000000000 -0400
31737 @@ -2236,7 +2236,7 @@ struct isp_operations {
31738 int (*get_flash_version) (struct scsi_qla_host *, void *);
31739 int (*start_scsi) (srb_t *);
31740 int (*abort_isp) (struct scsi_qla_host *);
31741 -};
31742 +} __no_const;
31743
31744 /* MSI-X Support *************************************************************/
31745
31746 diff -urNp linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h
31747 --- linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h 2011-05-19 00:06:34.000000000 -0400
31748 +++ linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h 2011-08-05 19:44:37.000000000 -0400
31749 @@ -256,7 +256,7 @@ struct ddb_entry {
31750 atomic_t retry_relogin_timer; /* Min Time between relogins
31751 * (4000 only) */
31752 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
31753 - atomic_t relogin_retry_count; /* Num of times relogin has been
31754 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
31755 * retried */
31756
31757 uint16_t port;
31758 diff -urNp linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c
31759 --- linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c 2011-05-19 00:06:34.000000000 -0400
31760 +++ linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c 2011-08-05 19:44:37.000000000 -0400
31761 @@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
31762 ddb_entry->fw_ddb_index = fw_ddb_index;
31763 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
31764 atomic_set(&ddb_entry->relogin_timer, 0);
31765 - atomic_set(&ddb_entry->relogin_retry_count, 0);
31766 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31767 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31768 list_add_tail(&ddb_entry->list, &ha->ddb_list);
31769 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
31770 @@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
31771 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
31772 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
31773 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31774 - atomic_set(&ddb_entry->relogin_retry_count, 0);
31775 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31776 atomic_set(&ddb_entry->relogin_timer, 0);
31777 clear_bit(DF_RELOGIN, &ddb_entry->flags);
31778 iscsi_unblock_session(ddb_entry->sess);
31779 diff -urNp linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c
31780 --- linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c 2011-05-19 00:06:34.000000000 -0400
31781 +++ linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c 2011-08-05 19:44:37.000000000 -0400
31782 @@ -802,13 +802,13 @@ static void qla4xxx_timer(struct scsi_ql
31783 ddb_entry->fw_ddb_device_state ==
31784 DDB_DS_SESSION_FAILED) {
31785 /* Reset retry relogin timer */
31786 - atomic_inc(&ddb_entry->relogin_retry_count);
31787 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
31788 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
31789 " timed out-retrying"
31790 " relogin (%d)\n",
31791 ha->host_no,
31792 ddb_entry->fw_ddb_index,
31793 - atomic_read(&ddb_entry->
31794 + atomic_read_unchecked(&ddb_entry->
31795 relogin_retry_count))
31796 );
31797 start_dpc++;
31798 diff -urNp linux-2.6.39.4/drivers/scsi/scsi.c linux-2.6.39.4/drivers/scsi/scsi.c
31799 --- linux-2.6.39.4/drivers/scsi/scsi.c 2011-05-19 00:06:34.000000000 -0400
31800 +++ linux-2.6.39.4/drivers/scsi/scsi.c 2011-08-05 19:44:37.000000000 -0400
31801 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
31802 unsigned long timeout;
31803 int rtn = 0;
31804
31805 - atomic_inc(&cmd->device->iorequest_cnt);
31806 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31807
31808 /* check if the device is still usable */
31809 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
31810 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_debug.c linux-2.6.39.4/drivers/scsi/scsi_debug.c
31811 --- linux-2.6.39.4/drivers/scsi/scsi_debug.c 2011-05-19 00:06:34.000000000 -0400
31812 +++ linux-2.6.39.4/drivers/scsi/scsi_debug.c 2011-08-05 19:44:37.000000000 -0400
31813 @@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
31814 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
31815 unsigned char *cmd = (unsigned char *)scp->cmnd;
31816
31817 + pax_track_stack();
31818 +
31819 if ((errsts = check_readiness(scp, 1, devip)))
31820 return errsts;
31821 memset(arr, 0, sizeof(arr));
31822 @@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
31823 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
31824 unsigned char *cmd = (unsigned char *)scp->cmnd;
31825
31826 + pax_track_stack();
31827 +
31828 if ((errsts = check_readiness(scp, 1, devip)))
31829 return errsts;
31830 memset(arr, 0, sizeof(arr));
31831 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_lib.c linux-2.6.39.4/drivers/scsi/scsi_lib.c
31832 --- linux-2.6.39.4/drivers/scsi/scsi_lib.c 2011-05-19 00:06:34.000000000 -0400
31833 +++ linux-2.6.39.4/drivers/scsi/scsi_lib.c 2011-08-05 19:44:37.000000000 -0400
31834 @@ -1410,7 +1410,7 @@ static void scsi_kill_request(struct req
31835 shost = sdev->host;
31836 scsi_init_cmd_errh(cmd);
31837 cmd->result = DID_NO_CONNECT << 16;
31838 - atomic_inc(&cmd->device->iorequest_cnt);
31839 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31840
31841 /*
31842 * SCSI request completion path will do scsi_device_unbusy(),
31843 @@ -1436,9 +1436,9 @@ static void scsi_softirq_done(struct req
31844
31845 INIT_LIST_HEAD(&cmd->eh_entry);
31846
31847 - atomic_inc(&cmd->device->iodone_cnt);
31848 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
31849 if (cmd->result)
31850 - atomic_inc(&cmd->device->ioerr_cnt);
31851 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
31852
31853 disposition = scsi_decide_disposition(cmd);
31854 if (disposition != SUCCESS &&
31855 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_sysfs.c linux-2.6.39.4/drivers/scsi/scsi_sysfs.c
31856 --- linux-2.6.39.4/drivers/scsi/scsi_sysfs.c 2011-06-25 12:55:22.000000000 -0400
31857 +++ linux-2.6.39.4/drivers/scsi/scsi_sysfs.c 2011-08-05 19:44:37.000000000 -0400
31858 @@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
31859 char *buf) \
31860 { \
31861 struct scsi_device *sdev = to_scsi_device(dev); \
31862 - unsigned long long count = atomic_read(&sdev->field); \
31863 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
31864 return snprintf(buf, 20, "0x%llx\n", count); \
31865 } \
31866 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
31867 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c
31868 --- linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c 2011-05-19 00:06:34.000000000 -0400
31869 +++ linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c 2011-08-05 19:44:37.000000000 -0400
31870 @@ -485,7 +485,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
31871 * Netlink Infrastructure
31872 */
31873
31874 -static atomic_t fc_event_seq;
31875 +static atomic_unchecked_t fc_event_seq;
31876
31877 /**
31878 * fc_get_event_number - Obtain the next sequential FC event number
31879 @@ -498,7 +498,7 @@ static atomic_t fc_event_seq;
31880 u32
31881 fc_get_event_number(void)
31882 {
31883 - return atomic_add_return(1, &fc_event_seq);
31884 + return atomic_add_return_unchecked(1, &fc_event_seq);
31885 }
31886 EXPORT_SYMBOL(fc_get_event_number);
31887
31888 @@ -646,7 +646,7 @@ static __init int fc_transport_init(void
31889 {
31890 int error;
31891
31892 - atomic_set(&fc_event_seq, 0);
31893 + atomic_set_unchecked(&fc_event_seq, 0);
31894
31895 error = transport_class_register(&fc_host_class);
31896 if (error)
31897 @@ -836,7 +836,7 @@ static int fc_str_to_dev_loss(const char
31898 char *cp;
31899
31900 *val = simple_strtoul(buf, &cp, 0);
31901 - if ((*cp && (*cp != '\n')) || (*val < 0))
31902 + if (*cp && (*cp != '\n'))
31903 return -EINVAL;
31904 /*
31905 * Check for overflow; dev_loss_tmo is u32
31906 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c
31907 --- linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c 2011-05-19 00:06:34.000000000 -0400
31908 +++ linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c 2011-08-05 19:44:37.000000000 -0400
31909 @@ -83,7 +83,7 @@ struct iscsi_internal {
31910 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
31911 };
31912
31913 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
31914 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
31915 static struct workqueue_struct *iscsi_eh_timer_workq;
31916
31917 /*
31918 @@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
31919 int err;
31920
31921 ihost = shost->shost_data;
31922 - session->sid = atomic_add_return(1, &iscsi_session_nr);
31923 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
31924
31925 if (id == ISCSI_MAX_TARGET) {
31926 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
31927 @@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
31928 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
31929 ISCSI_TRANSPORT_VERSION);
31930
31931 - atomic_set(&iscsi_session_nr, 0);
31932 + atomic_set_unchecked(&iscsi_session_nr, 0);
31933
31934 err = class_register(&iscsi_transport_class);
31935 if (err)
31936 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c
31937 --- linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c 2011-05-19 00:06:34.000000000 -0400
31938 +++ linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c 2011-08-05 19:44:37.000000000 -0400
31939 @@ -33,7 +33,7 @@
31940 #include "scsi_transport_srp_internal.h"
31941
31942 struct srp_host_attrs {
31943 - atomic_t next_port_id;
31944 + atomic_unchecked_t next_port_id;
31945 };
31946 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
31947
31948 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
31949 struct Scsi_Host *shost = dev_to_shost(dev);
31950 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
31951
31952 - atomic_set(&srp_host->next_port_id, 0);
31953 + atomic_set_unchecked(&srp_host->next_port_id, 0);
31954 return 0;
31955 }
31956
31957 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
31958 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
31959 rport->roles = ids->roles;
31960
31961 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
31962 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
31963 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
31964
31965 transport_setup_device(&rport->dev);
31966 diff -urNp linux-2.6.39.4/drivers/scsi/sg.c linux-2.6.39.4/drivers/scsi/sg.c
31967 --- linux-2.6.39.4/drivers/scsi/sg.c 2011-05-19 00:06:34.000000000 -0400
31968 +++ linux-2.6.39.4/drivers/scsi/sg.c 2011-08-05 19:44:37.000000000 -0400
31969 @@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
31970 const struct file_operations * fops;
31971 };
31972
31973 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
31974 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
31975 {"allow_dio", &adio_fops},
31976 {"debug", &debug_fops},
31977 {"def_reserved_size", &dressz_fops},
31978 @@ -2325,7 +2325,7 @@ sg_proc_init(void)
31979 {
31980 int k, mask;
31981 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
31982 - struct sg_proc_leaf * leaf;
31983 + const struct sg_proc_leaf * leaf;
31984
31985 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
31986 if (!sg_proc_sgp)
31987 diff -urNp linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c
31988 --- linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-19 00:06:34.000000000 -0400
31989 +++ linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-08-05 19:44:37.000000000 -0400
31990 @@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
31991 int do_iounmap = 0;
31992 int do_disable_device = 1;
31993
31994 + pax_track_stack();
31995 +
31996 memset(&sym_dev, 0, sizeof(sym_dev));
31997 memset(&nvram, 0, sizeof(nvram));
31998 sym_dev.pdev = pdev;
31999 diff -urNp linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c
32000 --- linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c 2011-05-19 00:06:34.000000000 -0400
32001 +++ linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c 2011-08-05 19:44:37.000000000 -0400
32002 @@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
32003 dma_addr_t base;
32004 unsigned i;
32005
32006 + pax_track_stack();
32007 +
32008 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
32009 cmd.reqRingNumPages = adapter->req_pages;
32010 cmd.cmpRingNumPages = adapter->cmp_pages;
32011 diff -urNp linux-2.6.39.4/drivers/spi/spi.c linux-2.6.39.4/drivers/spi/spi.c
32012 --- linux-2.6.39.4/drivers/spi/spi.c 2011-05-19 00:06:34.000000000 -0400
32013 +++ linux-2.6.39.4/drivers/spi/spi.c 2011-08-05 19:44:37.000000000 -0400
32014 @@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
32015 EXPORT_SYMBOL_GPL(spi_bus_unlock);
32016
32017 /* portable code must never pass more than 32 bytes */
32018 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
32019 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
32020
32021 static u8 *buf;
32022
32023 diff -urNp linux-2.6.39.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-2.6.39.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c
32024 --- linux-2.6.39.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-05-19 00:06:34.000000000 -0400
32025 +++ linux-2.6.39.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-14 12:12:59.000000000 -0400
32026 @@ -384,7 +384,7 @@ static struct ar_cookie s_ar_cookie_mem[
32027 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
32028
32029
32030 -static struct net_device_ops ar6000_netdev_ops = {
32031 +static net_device_ops_no_const ar6000_netdev_ops = {
32032 .ndo_init = NULL,
32033 .ndo_open = ar6000_open,
32034 .ndo_stop = ar6000_close,
32035 diff -urNp linux-2.6.39.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-2.6.39.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
32036 --- linux-2.6.39.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-05-19 00:06:34.000000000 -0400
32037 +++ linux-2.6.39.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-08-14 09:32:05.000000000 -0400
32038 @@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
32039 typedef struct ar6k_pal_config_s
32040 {
32041 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
32042 -}ar6k_pal_config_t;
32043 +} __no_const ar6k_pal_config_t;
32044
32045 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
32046 #endif /* _AR6K_PAL_H_ */
32047 diff -urNp linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
32048 --- linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-05-19 00:06:34.000000000 -0400
32049 +++ linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-08-05 20:34:06.000000000 -0400
32050 @@ -857,14 +857,14 @@ static void dhd_op_if(dhd_if_t *ifp)
32051 free_netdev(ifp->net);
32052 }
32053 /* Allocate etherdev, including space for private structure */
32054 - ifp->net = alloc_etherdev(sizeof(dhd));
32055 + ifp->net = alloc_etherdev(sizeof(*dhd));
32056 if (!ifp->net) {
32057 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
32058 ret = -ENOMEM;
32059 }
32060 if (ret == 0) {
32061 strcpy(ifp->net->name, ifp->name);
32062 - memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
32063 + memcpy(netdev_priv(ifp->net), dhd, sizeof(*dhd));
32064 err = dhd_net_attach(&dhd->pub, ifp->idx);
32065 if (err != 0) {
32066 DHD_ERROR(("%s: dhd_net_attach failed, "
32067 @@ -1923,7 +1923,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32068 strcpy(nv_path, nvram_path);
32069
32070 /* Allocate etherdev, including space for private structure */
32071 - net = alloc_etherdev(sizeof(dhd));
32072 + net = alloc_etherdev(sizeof(*dhd));
32073 if (!net) {
32074 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
32075 goto fail;
32076 @@ -1939,7 +1939,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32077 /*
32078 * Save the dhd_info into the priv
32079 */
32080 - memcpy(netdev_priv(net), &dhd, sizeof(dhd));
32081 + memcpy(netdev_priv(net), dhd, sizeof(*dhd));
32082
32083 /* Set network interface name if it was provided as module parameter */
32084 if (iface_name[0]) {
32085 @@ -2056,7 +2056,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32086 /*
32087 * Save the dhd_info into the priv
32088 */
32089 - memcpy(netdev_priv(net), &dhd, sizeof(dhd));
32090 + memcpy(netdev_priv(net), dhd, sizeof(*dhd));
32091
32092 #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
32093 g_bus = bus;
32094 diff -urNp linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c
32095 --- linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c 2011-05-19 00:06:34.000000000 -0400
32096 +++ linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c 2011-08-05 19:44:37.000000000 -0400
32097 @@ -495,7 +495,7 @@ wl_iw_get_range(struct net_device *dev,
32098 list = (wl_u32_list_t *) channels;
32099
32100 dwrq->length = sizeof(struct iw_range);
32101 - memset(range, 0, sizeof(range));
32102 + memset(range, 0, sizeof(*range));
32103
32104 range->min_nwid = range->max_nwid = 0;
32105
32106 diff -urNp linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c
32107 --- linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c 2011-05-19 00:06:34.000000000 -0400
32108 +++ linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c 2011-08-05 19:44:37.000000000 -0400
32109 @@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
32110 struct net_device_stats *stats = &etdev->net_stats;
32111
32112 if (tcb->flags & fMP_DEST_BROAD)
32113 - atomic_inc(&etdev->Stats.brdcstxmt);
32114 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
32115 else if (tcb->flags & fMP_DEST_MULTI)
32116 - atomic_inc(&etdev->Stats.multixmt);
32117 + atomic_inc_unchecked(&etdev->Stats.multixmt);
32118 else
32119 - atomic_inc(&etdev->Stats.unixmt);
32120 + atomic_inc_unchecked(&etdev->Stats.unixmt);
32121
32122 if (tcb->skb) {
32123 stats->tx_bytes += tcb->skb->len;
32124 diff -urNp linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h
32125 --- linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h 2011-05-19 00:06:34.000000000 -0400
32126 +++ linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h 2011-08-05 19:44:37.000000000 -0400
32127 @@ -110,11 +110,11 @@ typedef struct _ce_stats_t {
32128 * operations
32129 */
32130 u32 unircv; /* # multicast packets received */
32131 - atomic_t unixmt; /* # multicast packets for Tx */
32132 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
32133 u32 multircv; /* # multicast packets received */
32134 - atomic_t multixmt; /* # multicast packets for Tx */
32135 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
32136 u32 brdcstrcv; /* # broadcast packets received */
32137 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
32138 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
32139 u32 norcvbuf; /* # Rx packets discarded */
32140 u32 noxmtbuf; /* # Tx packets discarded */
32141
32142 diff -urNp linux-2.6.39.4/drivers/staging/gma500/psb_ttm_glue.c linux-2.6.39.4/drivers/staging/gma500/psb_ttm_glue.c
32143 --- linux-2.6.39.4/drivers/staging/gma500/psb_ttm_glue.c 2011-05-19 00:06:34.000000000 -0400
32144 +++ linux-2.6.39.4/drivers/staging/gma500/psb_ttm_glue.c 2011-08-14 12:25:25.000000000 -0400
32145 @@ -230,8 +230,10 @@ int psb_mmap(struct file *filp, struct v
32146 if (unlikely(dev_priv->ttm_vm_ops == NULL)) {
32147 dev_priv->ttm_vm_ops = (struct vm_operations_struct *)
32148 vma->vm_ops;
32149 - psb_ttm_vm_ops = *vma->vm_ops;
32150 - psb_ttm_vm_ops.fault = &psb_ttm_fault;
32151 + pax_open_kernel();
32152 + memcpy((void *)&psb_ttm_vm_ops, vma->vm_ops, sizeof(psb_ttm_vm_ops));
32153 + *(void **)&psb_ttm_vm_ops.fault = &psb_ttm_fault;
32154 + pax_close_kernel();
32155 }
32156
32157 vma->vm_ops = &psb_ttm_vm_ops;
32158 diff -urNp linux-2.6.39.4/drivers/staging/hv/channel.c linux-2.6.39.4/drivers/staging/hv/channel.c
32159 --- linux-2.6.39.4/drivers/staging/hv/channel.c 2011-05-19 00:06:34.000000000 -0400
32160 +++ linux-2.6.39.4/drivers/staging/hv/channel.c 2011-08-05 19:44:37.000000000 -0400
32161 @@ -509,8 +509,8 @@ int vmbus_establish_gpadl(struct vmbus_c
32162 unsigned long flags;
32163 int ret = 0;
32164
32165 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
32166 - atomic_inc(&vmbus_connection.next_gpadl_handle);
32167 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
32168 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
32169
32170 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
32171 if (ret)
32172 diff -urNp linux-2.6.39.4/drivers/staging/hv/hv.c linux-2.6.39.4/drivers/staging/hv/hv.c
32173 --- linux-2.6.39.4/drivers/staging/hv/hv.c 2011-05-19 00:06:34.000000000 -0400
32174 +++ linux-2.6.39.4/drivers/staging/hv/hv.c 2011-08-05 19:44:37.000000000 -0400
32175 @@ -163,7 +163,7 @@ static u64 do_hypercall(u64 control, voi
32176 u64 output_address = (output) ? virt_to_phys(output) : 0;
32177 u32 output_address_hi = output_address >> 32;
32178 u32 output_address_lo = output_address & 0xFFFFFFFF;
32179 - volatile void *hypercall_page = hv_context.hypercall_page;
32180 + volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
32181
32182 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
32183 control, input, output);
32184 diff -urNp linux-2.6.39.4/drivers/staging/hv/hv_mouse.c linux-2.6.39.4/drivers/staging/hv/hv_mouse.c
32185 --- linux-2.6.39.4/drivers/staging/hv/hv_mouse.c 2011-05-19 00:06:34.000000000 -0400
32186 +++ linux-2.6.39.4/drivers/staging/hv/hv_mouse.c 2011-08-13 20:26:10.000000000 -0400
32187 @@ -898,8 +898,10 @@ static void reportdesc_callback(struct h
32188 if (hid_dev) {
32189 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
32190
32191 - hid_dev->ll_driver->open = mousevsc_hid_open;
32192 - hid_dev->ll_driver->close = mousevsc_hid_close;
32193 + pax_open_kernel();
32194 + *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
32195 + *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
32196 + pax_close_kernel();
32197
32198 hid_dev->bus = BUS_VIRTUAL;
32199 hid_dev->vendor = input_device_ctx->device_info.vendor;
32200 diff -urNp linux-2.6.39.4/drivers/staging/hv/rndis_filter.c linux-2.6.39.4/drivers/staging/hv/rndis_filter.c
32201 --- linux-2.6.39.4/drivers/staging/hv/rndis_filter.c 2011-05-19 00:06:34.000000000 -0400
32202 +++ linux-2.6.39.4/drivers/staging/hv/rndis_filter.c 2011-08-05 19:44:37.000000000 -0400
32203 @@ -49,7 +49,7 @@ struct rndis_device {
32204
32205 enum rndis_device_state state;
32206 u32 link_stat;
32207 - atomic_t new_req_id;
32208 + atomic_unchecked_t new_req_id;
32209
32210 spinlock_t request_lock;
32211 struct list_head req_list;
32212 @@ -144,7 +144,7 @@ static struct rndis_request *get_rndis_r
32213 * template
32214 */
32215 set = &rndis_msg->msg.set_req;
32216 - set->req_id = atomic_inc_return(&dev->new_req_id);
32217 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32218
32219 /* Add to the request list */
32220 spin_lock_irqsave(&dev->request_lock, flags);
32221 @@ -709,7 +709,7 @@ static void rndis_filter_halt_device(str
32222
32223 /* Setup the rndis set */
32224 halt = &request->request_msg.msg.halt_req;
32225 - halt->req_id = atomic_inc_return(&dev->new_req_id);
32226 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32227
32228 /* Ignore return since this msg is optional. */
32229 rndis_filter_send_request(dev, request);
32230 diff -urNp linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c
32231 --- linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c 2011-05-19 00:06:34.000000000 -0400
32232 +++ linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c 2011-08-05 19:44:37.000000000 -0400
32233 @@ -661,14 +661,14 @@ int vmbus_child_device_register(struct h
32234 {
32235 int ret = 0;
32236
32237 - static atomic_t device_num = ATOMIC_INIT(0);
32238 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
32239
32240 DPRINT_DBG(VMBUS_DRV, "child device (%p) registering",
32241 child_device_obj);
32242
32243 /* Set the device name. Otherwise, device_register() will fail. */
32244 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
32245 - atomic_inc_return(&device_num));
32246 + atomic_inc_return_unchecked(&device_num));
32247
32248 /* The new device belongs to this bus */
32249 child_device_obj->device.bus = &vmbus_drv.bus; /* device->dev.bus; */
32250 diff -urNp linux-2.6.39.4/drivers/staging/hv/vmbus_private.h linux-2.6.39.4/drivers/staging/hv/vmbus_private.h
32251 --- linux-2.6.39.4/drivers/staging/hv/vmbus_private.h 2011-05-19 00:06:34.000000000 -0400
32252 +++ linux-2.6.39.4/drivers/staging/hv/vmbus_private.h 2011-08-05 19:44:37.000000000 -0400
32253 @@ -58,7 +58,7 @@ enum vmbus_connect_state {
32254 struct vmbus_connection {
32255 enum vmbus_connect_state conn_state;
32256
32257 - atomic_t next_gpadl_handle;
32258 + atomic_unchecked_t next_gpadl_handle;
32259
32260 /*
32261 * Represents channel interrupts. Each bit position represents a
32262 diff -urNp linux-2.6.39.4/drivers/staging/iio/ring_generic.h linux-2.6.39.4/drivers/staging/iio/ring_generic.h
32263 --- linux-2.6.39.4/drivers/staging/iio/ring_generic.h 2011-05-19 00:06:34.000000000 -0400
32264 +++ linux-2.6.39.4/drivers/staging/iio/ring_generic.h 2011-08-13 20:14:25.000000000 -0400
32265 @@ -86,7 +86,7 @@ struct iio_ring_access_funcs {
32266
32267 int (*is_enabled)(struct iio_ring_buffer *ring);
32268 int (*enable)(struct iio_ring_buffer *ring);
32269 -};
32270 +} __no_const;
32271
32272 /**
32273 * struct iio_ring_buffer - general ring buffer structure
32274 @@ -134,7 +134,7 @@ struct iio_ring_buffer {
32275 struct iio_handler access_handler;
32276 struct iio_event_interface ev_int;
32277 struct iio_shared_ev_pointer shared_ev_pointer;
32278 - struct iio_ring_access_funcs access;
32279 + struct iio_ring_access_funcs access;
32280 int (*preenable)(struct iio_dev *);
32281 int (*postenable)(struct iio_dev *);
32282 int (*predisable)(struct iio_dev *);
32283 diff -urNp linux-2.6.39.4/drivers/staging/octeon/ethernet.c linux-2.6.39.4/drivers/staging/octeon/ethernet.c
32284 --- linux-2.6.39.4/drivers/staging/octeon/ethernet.c 2011-05-19 00:06:34.000000000 -0400
32285 +++ linux-2.6.39.4/drivers/staging/octeon/ethernet.c 2011-08-05 19:44:37.000000000 -0400
32286 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
32287 * since the RX tasklet also increments it.
32288 */
32289 #ifdef CONFIG_64BIT
32290 - atomic64_add(rx_status.dropped_packets,
32291 - (atomic64_t *)&priv->stats.rx_dropped);
32292 + atomic64_add_unchecked(rx_status.dropped_packets,
32293 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32294 #else
32295 - atomic_add(rx_status.dropped_packets,
32296 - (atomic_t *)&priv->stats.rx_dropped);
32297 + atomic_add_unchecked(rx_status.dropped_packets,
32298 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
32299 #endif
32300 }
32301
32302 diff -urNp linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c
32303 --- linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c 2011-05-19 00:06:34.000000000 -0400
32304 +++ linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c 2011-08-05 19:44:37.000000000 -0400
32305 @@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi
32306 /* Increment RX stats for virtual ports */
32307 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
32308 #ifdef CONFIG_64BIT
32309 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
32310 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
32311 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
32312 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
32313 #else
32314 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
32315 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
32316 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
32317 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
32318 #endif
32319 }
32320 netif_receive_skb(skb);
32321 @@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
32322 dev->name);
32323 */
32324 #ifdef CONFIG_64BIT
32325 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
32326 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32327 #else
32328 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
32329 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
32330 #endif
32331 dev_kfree_skb_irq(skb);
32332 }
32333 diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/inode.c linux-2.6.39.4/drivers/staging/pohmelfs/inode.c
32334 --- linux-2.6.39.4/drivers/staging/pohmelfs/inode.c 2011-05-19 00:06:34.000000000 -0400
32335 +++ linux-2.6.39.4/drivers/staging/pohmelfs/inode.c 2011-08-05 19:44:37.000000000 -0400
32336 @@ -1855,7 +1855,7 @@ static int pohmelfs_fill_super(struct su
32337 mutex_init(&psb->mcache_lock);
32338 psb->mcache_root = RB_ROOT;
32339 psb->mcache_timeout = msecs_to_jiffies(5000);
32340 - atomic_long_set(&psb->mcache_gen, 0);
32341 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
32342
32343 psb->trans_max_pages = 100;
32344
32345 @@ -1870,7 +1870,7 @@ static int pohmelfs_fill_super(struct su
32346 INIT_LIST_HEAD(&psb->crypto_ready_list);
32347 INIT_LIST_HEAD(&psb->crypto_active_list);
32348
32349 - atomic_set(&psb->trans_gen, 1);
32350 + atomic_set_unchecked(&psb->trans_gen, 1);
32351 atomic_long_set(&psb->total_inodes, 0);
32352
32353 mutex_init(&psb->state_lock);
32354 diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c
32355 --- linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c 2011-05-19 00:06:34.000000000 -0400
32356 +++ linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c 2011-08-05 19:44:37.000000000 -0400
32357 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
32358 m->data = data;
32359 m->start = start;
32360 m->size = size;
32361 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
32362 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
32363
32364 mutex_lock(&psb->mcache_lock);
32365 err = pohmelfs_mcache_insert(psb, m);
32366 diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h
32367 --- linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h 2011-05-19 00:06:34.000000000 -0400
32368 +++ linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h 2011-08-05 19:44:37.000000000 -0400
32369 @@ -571,14 +571,14 @@ struct pohmelfs_config;
32370 struct pohmelfs_sb {
32371 struct rb_root mcache_root;
32372 struct mutex mcache_lock;
32373 - atomic_long_t mcache_gen;
32374 + atomic_long_unchecked_t mcache_gen;
32375 unsigned long mcache_timeout;
32376
32377 unsigned int idx;
32378
32379 unsigned int trans_retries;
32380
32381 - atomic_t trans_gen;
32382 + atomic_unchecked_t trans_gen;
32383
32384 unsigned int crypto_attached_size;
32385 unsigned int crypto_align_size;
32386 diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/trans.c linux-2.6.39.4/drivers/staging/pohmelfs/trans.c
32387 --- linux-2.6.39.4/drivers/staging/pohmelfs/trans.c 2011-05-19 00:06:34.000000000 -0400
32388 +++ linux-2.6.39.4/drivers/staging/pohmelfs/trans.c 2011-08-05 19:44:37.000000000 -0400
32389 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
32390 int err;
32391 struct netfs_cmd *cmd = t->iovec.iov_base;
32392
32393 - t->gen = atomic_inc_return(&psb->trans_gen);
32394 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
32395
32396 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
32397 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
32398 diff -urNp linux-2.6.39.4/drivers/staging/rtl8712/rtl871x_io.h linux-2.6.39.4/drivers/staging/rtl8712/rtl871x_io.h
32399 --- linux-2.6.39.4/drivers/staging/rtl8712/rtl871x_io.h 2011-05-19 00:06:34.000000000 -0400
32400 +++ linux-2.6.39.4/drivers/staging/rtl8712/rtl871x_io.h 2011-08-13 20:31:57.000000000 -0400
32401 @@ -83,7 +83,7 @@ struct _io_ops {
32402 u8 *pmem);
32403 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
32404 u8 *pmem);
32405 -};
32406 +} __no_const;
32407
32408 struct io_req {
32409 struct list_head list;
32410 diff -urNp linux-2.6.39.4/drivers/staging/sbe-2t3e3/netdev.c linux-2.6.39.4/drivers/staging/sbe-2t3e3/netdev.c
32411 --- linux-2.6.39.4/drivers/staging/sbe-2t3e3/netdev.c 2011-05-19 00:06:34.000000000 -0400
32412 +++ linux-2.6.39.4/drivers/staging/sbe-2t3e3/netdev.c 2011-08-14 12:29:10.000000000 -0400
32413 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
32414 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
32415
32416 if (rlen)
32417 - if (copy_to_user(data, &resp, rlen))
32418 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
32419 return -EFAULT;
32420
32421 return 0;
32422 diff -urNp linux-2.6.39.4/drivers/staging/tty/istallion.c linux-2.6.39.4/drivers/staging/tty/istallion.c
32423 --- linux-2.6.39.4/drivers/staging/tty/istallion.c 2011-05-19 00:06:34.000000000 -0400
32424 +++ linux-2.6.39.4/drivers/staging/tty/istallion.c 2011-08-05 19:44:37.000000000 -0400
32425 @@ -186,7 +186,6 @@ static struct ktermios stli_deftermios
32426 * re-used for each stats call.
32427 */
32428 static comstats_t stli_comstats;
32429 -static combrd_t stli_brdstats;
32430 static struct asystats stli_cdkstats;
32431
32432 /*****************************************************************************/
32433 @@ -4003,6 +4002,7 @@ out:
32434
32435 static int stli_getbrdstats(combrd_t __user *bp)
32436 {
32437 + combrd_t stli_brdstats;
32438 struct stlibrd *brdp;
32439 unsigned int i;
32440
32441 @@ -4226,6 +4226,8 @@ static int stli_getportstruct(struct stl
32442 struct stliport stli_dummyport;
32443 struct stliport *portp;
32444
32445 + pax_track_stack();
32446 +
32447 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
32448 return -EFAULT;
32449 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
32450 @@ -4248,6 +4250,8 @@ static int stli_getbrdstruct(struct stli
32451 struct stlibrd stli_dummybrd;
32452 struct stlibrd *brdp;
32453
32454 + pax_track_stack();
32455 +
32456 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
32457 return -EFAULT;
32458 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
32459 diff -urNp linux-2.6.39.4/drivers/staging/tty/stallion.c linux-2.6.39.4/drivers/staging/tty/stallion.c
32460 --- linux-2.6.39.4/drivers/staging/tty/stallion.c 2011-05-19 00:06:34.000000000 -0400
32461 +++ linux-2.6.39.4/drivers/staging/tty/stallion.c 2011-08-05 19:44:37.000000000 -0400
32462 @@ -2406,6 +2406,8 @@ static int stl_getportstruct(struct stlp
32463 struct stlport stl_dummyport;
32464 struct stlport *portp;
32465
32466 + pax_track_stack();
32467 +
32468 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
32469 return -EFAULT;
32470 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
32471 diff -urNp linux-2.6.39.4/drivers/staging/usbip/usbip_common.h linux-2.6.39.4/drivers/staging/usbip/usbip_common.h
32472 --- linux-2.6.39.4/drivers/staging/usbip/usbip_common.h 2011-05-19 00:06:34.000000000 -0400
32473 +++ linux-2.6.39.4/drivers/staging/usbip/usbip_common.h 2011-08-18 23:21:09.000000000 -0400
32474 @@ -367,7 +367,7 @@ struct usbip_device {
32475 void (*shutdown)(struct usbip_device *);
32476 void (*reset)(struct usbip_device *);
32477 void (*unusable)(struct usbip_device *);
32478 - } eh_ops;
32479 + } __no_const eh_ops;
32480 };
32481
32482
32483 diff -urNp linux-2.6.39.4/drivers/staging/usbip/vhci.h linux-2.6.39.4/drivers/staging/usbip/vhci.h
32484 --- linux-2.6.39.4/drivers/staging/usbip/vhci.h 2011-05-19 00:06:34.000000000 -0400
32485 +++ linux-2.6.39.4/drivers/staging/usbip/vhci.h 2011-08-05 19:44:37.000000000 -0400
32486 @@ -92,7 +92,7 @@ struct vhci_hcd {
32487 unsigned resuming:1;
32488 unsigned long re_timeout;
32489
32490 - atomic_t seqnum;
32491 + atomic_unchecked_t seqnum;
32492
32493 /*
32494 * NOTE:
32495 diff -urNp linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c
32496 --- linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c 2011-05-19 00:06:34.000000000 -0400
32497 +++ linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c 2011-08-18 23:22:51.000000000 -0400
32498 @@ -536,7 +536,7 @@ static void vhci_tx_urb(struct urb *urb)
32499 return;
32500 }
32501
32502 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
32503 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32504 if (priv->seqnum == 0xffff)
32505 usbip_uinfo("seqnum max\n");
32506
32507 @@ -795,7 +795,7 @@ static int vhci_urb_dequeue(struct usb_h
32508 return -ENOMEM;
32509 }
32510
32511 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
32512 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32513 if (unlink->seqnum == 0xffff)
32514 usbip_uinfo("seqnum max\n");
32515
32516 @@ -992,7 +992,7 @@ static int vhci_start(struct usb_hcd *hc
32517 vdev->rhport = rhport;
32518 }
32519
32520 - atomic_set(&vhci->seqnum, 0);
32521 + atomic_set_unchecked(&vhci->seqnum, 0);
32522 spin_lock_init(&vhci->lock);
32523
32524
32525 diff -urNp linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c
32526 --- linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c 2011-05-19 00:06:34.000000000 -0400
32527 +++ linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c 2011-08-05 19:44:37.000000000 -0400
32528 @@ -81,7 +81,7 @@ static void vhci_recv_ret_submit(struct
32529 usbip_uerr("cannot find a urb of seqnum %u\n",
32530 pdu->base.seqnum);
32531 usbip_uinfo("max seqnum %d\n",
32532 - atomic_read(&the_controller->seqnum));
32533 + atomic_read_unchecked(&the_controller->seqnum));
32534 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
32535 return;
32536 }
32537 diff -urNp linux-2.6.39.4/drivers/staging/wlan-ng/hfa384x_usb.c linux-2.6.39.4/drivers/staging/wlan-ng/hfa384x_usb.c
32538 --- linux-2.6.39.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-05-19 00:06:34.000000000 -0400
32539 +++ linux-2.6.39.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-13 20:36:25.000000000 -0400
32540 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
32541
32542 struct usbctlx_completor {
32543 int (*complete) (struct usbctlx_completor *);
32544 -};
32545 +} __no_const;
32546
32547 static int
32548 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
32549 diff -urNp linux-2.6.39.4/drivers/target/target_core_alua.c linux-2.6.39.4/drivers/target/target_core_alua.c
32550 --- linux-2.6.39.4/drivers/target/target_core_alua.c 2011-05-19 00:06:34.000000000 -0400
32551 +++ linux-2.6.39.4/drivers/target/target_core_alua.c 2011-08-05 19:44:37.000000000 -0400
32552 @@ -675,6 +675,8 @@ static int core_alua_update_tpg_primary_
32553 char path[ALUA_METADATA_PATH_LEN];
32554 int len;
32555
32556 + pax_track_stack();
32557 +
32558 memset(path, 0, ALUA_METADATA_PATH_LEN);
32559
32560 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
32561 @@ -938,6 +940,8 @@ static int core_alua_update_tpg_secondar
32562 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
32563 int len;
32564
32565 + pax_track_stack();
32566 +
32567 memset(path, 0, ALUA_METADATA_PATH_LEN);
32568 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
32569
32570 diff -urNp linux-2.6.39.4/drivers/target/target_core_cdb.c linux-2.6.39.4/drivers/target/target_core_cdb.c
32571 --- linux-2.6.39.4/drivers/target/target_core_cdb.c 2011-05-19 00:06:34.000000000 -0400
32572 +++ linux-2.6.39.4/drivers/target/target_core_cdb.c 2011-08-05 19:44:37.000000000 -0400
32573 @@ -838,6 +838,8 @@ target_emulate_modesense(struct se_cmd *
32574 int length = 0;
32575 unsigned char buf[SE_MODE_PAGE_BUF];
32576
32577 + pax_track_stack();
32578 +
32579 memset(buf, 0, SE_MODE_PAGE_BUF);
32580
32581 switch (cdb[2] & 0x3f) {
32582 diff -urNp linux-2.6.39.4/drivers/target/target_core_configfs.c linux-2.6.39.4/drivers/target/target_core_configfs.c
32583 --- linux-2.6.39.4/drivers/target/target_core_configfs.c 2011-05-19 00:06:34.000000000 -0400
32584 +++ linux-2.6.39.4/drivers/target/target_core_configfs.c 2011-08-05 20:34:06.000000000 -0400
32585 @@ -1280,6 +1280,8 @@ static ssize_t target_core_dev_pr_show_a
32586 ssize_t len = 0;
32587 int reg_count = 0, prf_isid;
32588
32589 + pax_track_stack();
32590 +
32591 if (!(su_dev->se_dev_ptr))
32592 return -ENODEV;
32593
32594 diff -urNp linux-2.6.39.4/drivers/target/target_core_pr.c linux-2.6.39.4/drivers/target/target_core_pr.c
32595 --- linux-2.6.39.4/drivers/target/target_core_pr.c 2011-05-19 00:06:34.000000000 -0400
32596 +++ linux-2.6.39.4/drivers/target/target_core_pr.c 2011-08-05 19:44:37.000000000 -0400
32597 @@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
32598 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
32599 u16 tpgt;
32600
32601 + pax_track_stack();
32602 +
32603 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
32604 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
32605 /*
32606 @@ -1861,6 +1863,8 @@ static int __core_scsi3_update_aptpl_buf
32607 ssize_t len = 0;
32608 int reg_count = 0;
32609
32610 + pax_track_stack();
32611 +
32612 memset(buf, 0, pr_aptpl_buf_len);
32613 /*
32614 * Called to clear metadata once APTPL has been deactivated.
32615 @@ -1983,6 +1987,8 @@ static int __core_scsi3_write_aptpl_to_f
32616 char path[512];
32617 int ret;
32618
32619 + pax_track_stack();
32620 +
32621 memset(iov, 0, sizeof(struct iovec));
32622 memset(path, 0, 512);
32623
32624 diff -urNp linux-2.6.39.4/drivers/target/target_core_tmr.c linux-2.6.39.4/drivers/target/target_core_tmr.c
32625 --- linux-2.6.39.4/drivers/target/target_core_tmr.c 2011-06-03 00:04:14.000000000 -0400
32626 +++ linux-2.6.39.4/drivers/target/target_core_tmr.c 2011-08-05 19:44:37.000000000 -0400
32627 @@ -263,7 +263,7 @@ int core_tmr_lun_reset(
32628 CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
32629 T_TASK(cmd)->t_task_cdbs,
32630 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32631 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32632 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32633 atomic_read(&T_TASK(cmd)->t_transport_active),
32634 atomic_read(&T_TASK(cmd)->t_transport_stop),
32635 atomic_read(&T_TASK(cmd)->t_transport_sent));
32636 @@ -305,7 +305,7 @@ int core_tmr_lun_reset(
32637 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
32638 " task: %p, t_fe_count: %d dev: %p\n", task,
32639 fe_count, dev);
32640 - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32641 + atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32642 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
32643 flags);
32644 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32645 @@ -315,7 +315,7 @@ int core_tmr_lun_reset(
32646 }
32647 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
32648 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
32649 - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32650 + atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32651 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
32652 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32653
32654 diff -urNp linux-2.6.39.4/drivers/target/target_core_transport.c linux-2.6.39.4/drivers/target/target_core_transport.c
32655 --- linux-2.6.39.4/drivers/target/target_core_transport.c 2011-06-03 00:04:14.000000000 -0400
32656 +++ linux-2.6.39.4/drivers/target/target_core_transport.c 2011-08-05 19:44:37.000000000 -0400
32657 @@ -1681,7 +1681,7 @@ struct se_device *transport_add_device_t
32658
32659 dev->queue_depth = dev_limits->queue_depth;
32660 atomic_set(&dev->depth_left, dev->queue_depth);
32661 - atomic_set(&dev->dev_ordered_id, 0);
32662 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
32663
32664 se_dev_set_default_attribs(dev, dev_limits);
32665
32666 @@ -1882,7 +1882,7 @@ static int transport_check_alloc_task_at
32667 * Used to determine when ORDERED commands should go from
32668 * Dormant to Active status.
32669 */
32670 - cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
32671 + cmd->se_ordered_id = atomic_inc_return_unchecked(&SE_DEV(cmd)->dev_ordered_id);
32672 smp_mb__after_atomic_inc();
32673 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
32674 cmd->se_ordered_id, cmd->sam_task_attr,
32675 @@ -2169,7 +2169,7 @@ static void transport_generic_request_fa
32676 " t_transport_active: %d t_transport_stop: %d"
32677 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
32678 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32679 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32680 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32681 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
32682 atomic_read(&T_TASK(cmd)->t_transport_active),
32683 atomic_read(&T_TASK(cmd)->t_transport_stop),
32684 @@ -2673,9 +2673,9 @@ check_depth:
32685 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
32686 atomic_set(&task->task_active, 1);
32687 atomic_set(&task->task_sent, 1);
32688 - atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
32689 + atomic_inc_unchecked(&T_TASK(cmd)->t_task_cdbs_sent);
32690
32691 - if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
32692 + if (atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent) ==
32693 T_TASK(cmd)->t_task_cdbs)
32694 atomic_set(&cmd->transport_sent, 1);
32695
32696 @@ -5568,7 +5568,7 @@ static void transport_generic_wait_for_t
32697 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
32698 }
32699 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
32700 - atomic_read(&T_TASK(cmd)->t_transport_aborted))
32701 + atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted))
32702 goto remove;
32703
32704 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
32705 @@ -5797,7 +5797,7 @@ int transport_check_aborted_status(struc
32706 {
32707 int ret = 0;
32708
32709 - if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
32710 + if (atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted) != 0) {
32711 if (!(send_status) ||
32712 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
32713 return 1;
32714 @@ -5825,7 +5825,7 @@ void transport_send_task_abort(struct se
32715 */
32716 if (cmd->data_direction == DMA_TO_DEVICE) {
32717 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
32718 - atomic_inc(&T_TASK(cmd)->t_transport_aborted);
32719 + atomic_inc_unchecked(&T_TASK(cmd)->t_transport_aborted);
32720 smp_mb__after_atomic_inc();
32721 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
32722 transport_new_cmd_failure(cmd);
32723 @@ -5949,7 +5949,7 @@ static void transport_processing_shutdow
32724 CMD_TFO(cmd)->get_task_tag(cmd),
32725 T_TASK(cmd)->t_task_cdbs,
32726 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32727 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32728 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32729 atomic_read(&T_TASK(cmd)->t_transport_active),
32730 atomic_read(&T_TASK(cmd)->t_transport_stop),
32731 atomic_read(&T_TASK(cmd)->t_transport_sent));
32732 diff -urNp linux-2.6.39.4/drivers/telephony/ixj.c linux-2.6.39.4/drivers/telephony/ixj.c
32733 --- linux-2.6.39.4/drivers/telephony/ixj.c 2011-05-19 00:06:34.000000000 -0400
32734 +++ linux-2.6.39.4/drivers/telephony/ixj.c 2011-08-05 19:44:37.000000000 -0400
32735 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
32736 bool mContinue;
32737 char *pIn, *pOut;
32738
32739 + pax_track_stack();
32740 +
32741 if (!SCI_Prepare(j))
32742 return 0;
32743
32744 diff -urNp linux-2.6.39.4/drivers/tty/hvc/hvcs.c linux-2.6.39.4/drivers/tty/hvc/hvcs.c
32745 --- linux-2.6.39.4/drivers/tty/hvc/hvcs.c 2011-05-19 00:06:34.000000000 -0400
32746 +++ linux-2.6.39.4/drivers/tty/hvc/hvcs.c 2011-08-05 19:44:37.000000000 -0400
32747 @@ -83,6 +83,7 @@
32748 #include <asm/hvcserver.h>
32749 #include <asm/uaccess.h>
32750 #include <asm/vio.h>
32751 +#include <asm/local.h>
32752
32753 /*
32754 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
32755 @@ -270,7 +271,7 @@ struct hvcs_struct {
32756 unsigned int index;
32757
32758 struct tty_struct *tty;
32759 - int open_count;
32760 + local_t open_count;
32761
32762 /*
32763 * Used to tell the driver kernel_thread what operations need to take
32764 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
32765
32766 spin_lock_irqsave(&hvcsd->lock, flags);
32767
32768 - if (hvcsd->open_count > 0) {
32769 + if (local_read(&hvcsd->open_count) > 0) {
32770 spin_unlock_irqrestore(&hvcsd->lock, flags);
32771 printk(KERN_INFO "HVCS: vterm state unchanged. "
32772 "The hvcs device node is still in use.\n");
32773 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
32774 if ((retval = hvcs_partner_connect(hvcsd)))
32775 goto error_release;
32776
32777 - hvcsd->open_count = 1;
32778 + local_set(&hvcsd->open_count, 1);
32779 hvcsd->tty = tty;
32780 tty->driver_data = hvcsd;
32781
32782 @@ -1179,7 +1180,7 @@ fast_open:
32783
32784 spin_lock_irqsave(&hvcsd->lock, flags);
32785 kref_get(&hvcsd->kref);
32786 - hvcsd->open_count++;
32787 + local_inc(&hvcsd->open_count);
32788 hvcsd->todo_mask |= HVCS_SCHED_READ;
32789 spin_unlock_irqrestore(&hvcsd->lock, flags);
32790
32791 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
32792 hvcsd = tty->driver_data;
32793
32794 spin_lock_irqsave(&hvcsd->lock, flags);
32795 - if (--hvcsd->open_count == 0) {
32796 + if (local_dec_and_test(&hvcsd->open_count)) {
32797
32798 vio_disable_interrupts(hvcsd->vdev);
32799
32800 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
32801 free_irq(irq, hvcsd);
32802 kref_put(&hvcsd->kref, destroy_hvcs_struct);
32803 return;
32804 - } else if (hvcsd->open_count < 0) {
32805 + } else if (local_read(&hvcsd->open_count) < 0) {
32806 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
32807 " is missmanaged.\n",
32808 - hvcsd->vdev->unit_address, hvcsd->open_count);
32809 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
32810 }
32811
32812 spin_unlock_irqrestore(&hvcsd->lock, flags);
32813 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
32814
32815 spin_lock_irqsave(&hvcsd->lock, flags);
32816 /* Preserve this so that we know how many kref refs to put */
32817 - temp_open_count = hvcsd->open_count;
32818 + temp_open_count = local_read(&hvcsd->open_count);
32819
32820 /*
32821 * Don't kref put inside the spinlock because the destruction
32822 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
32823 hvcsd->tty->driver_data = NULL;
32824 hvcsd->tty = NULL;
32825
32826 - hvcsd->open_count = 0;
32827 + local_set(&hvcsd->open_count, 0);
32828
32829 /* This will drop any buffered data on the floor which is OK in a hangup
32830 * scenario. */
32831 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
32832 * the middle of a write operation? This is a crummy place to do this
32833 * but we want to keep it all in the spinlock.
32834 */
32835 - if (hvcsd->open_count <= 0) {
32836 + if (local_read(&hvcsd->open_count) <= 0) {
32837 spin_unlock_irqrestore(&hvcsd->lock, flags);
32838 return -ENODEV;
32839 }
32840 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
32841 {
32842 struct hvcs_struct *hvcsd = tty->driver_data;
32843
32844 - if (!hvcsd || hvcsd->open_count <= 0)
32845 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
32846 return 0;
32847
32848 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
32849 diff -urNp linux-2.6.39.4/drivers/tty/ipwireless/tty.c linux-2.6.39.4/drivers/tty/ipwireless/tty.c
32850 --- linux-2.6.39.4/drivers/tty/ipwireless/tty.c 2011-05-19 00:06:34.000000000 -0400
32851 +++ linux-2.6.39.4/drivers/tty/ipwireless/tty.c 2011-08-05 19:44:37.000000000 -0400
32852 @@ -29,6 +29,7 @@
32853 #include <linux/tty_driver.h>
32854 #include <linux/tty_flip.h>
32855 #include <linux/uaccess.h>
32856 +#include <asm/local.h>
32857
32858 #include "tty.h"
32859 #include "network.h"
32860 @@ -51,7 +52,7 @@ struct ipw_tty {
32861 int tty_type;
32862 struct ipw_network *network;
32863 struct tty_struct *linux_tty;
32864 - int open_count;
32865 + local_t open_count;
32866 unsigned int control_lines;
32867 struct mutex ipw_tty_mutex;
32868 int tx_bytes_queued;
32869 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
32870 mutex_unlock(&tty->ipw_tty_mutex);
32871 return -ENODEV;
32872 }
32873 - if (tty->open_count == 0)
32874 + if (local_read(&tty->open_count) == 0)
32875 tty->tx_bytes_queued = 0;
32876
32877 - tty->open_count++;
32878 + local_inc(&tty->open_count);
32879
32880 tty->linux_tty = linux_tty;
32881 linux_tty->driver_data = tty;
32882 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
32883
32884 static void do_ipw_close(struct ipw_tty *tty)
32885 {
32886 - tty->open_count--;
32887 -
32888 - if (tty->open_count == 0) {
32889 + if (local_dec_return(&tty->open_count) == 0) {
32890 struct tty_struct *linux_tty = tty->linux_tty;
32891
32892 if (linux_tty != NULL) {
32893 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
32894 return;
32895
32896 mutex_lock(&tty->ipw_tty_mutex);
32897 - if (tty->open_count == 0) {
32898 + if (local_read(&tty->open_count) == 0) {
32899 mutex_unlock(&tty->ipw_tty_mutex);
32900 return;
32901 }
32902 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
32903 return;
32904 }
32905
32906 - if (!tty->open_count) {
32907 + if (!local_read(&tty->open_count)) {
32908 mutex_unlock(&tty->ipw_tty_mutex);
32909 return;
32910 }
32911 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
32912 return -ENODEV;
32913
32914 mutex_lock(&tty->ipw_tty_mutex);
32915 - if (!tty->open_count) {
32916 + if (!local_read(&tty->open_count)) {
32917 mutex_unlock(&tty->ipw_tty_mutex);
32918 return -EINVAL;
32919 }
32920 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
32921 if (!tty)
32922 return -ENODEV;
32923
32924 - if (!tty->open_count)
32925 + if (!local_read(&tty->open_count))
32926 return -EINVAL;
32927
32928 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
32929 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
32930 if (!tty)
32931 return 0;
32932
32933 - if (!tty->open_count)
32934 + if (!local_read(&tty->open_count))
32935 return 0;
32936
32937 return tty->tx_bytes_queued;
32938 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
32939 if (!tty)
32940 return -ENODEV;
32941
32942 - if (!tty->open_count)
32943 + if (!local_read(&tty->open_count))
32944 return -EINVAL;
32945
32946 return get_control_lines(tty);
32947 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
32948 if (!tty)
32949 return -ENODEV;
32950
32951 - if (!tty->open_count)
32952 + if (!local_read(&tty->open_count))
32953 return -EINVAL;
32954
32955 return set_control_lines(tty, set, clear);
32956 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
32957 if (!tty)
32958 return -ENODEV;
32959
32960 - if (!tty->open_count)
32961 + if (!local_read(&tty->open_count))
32962 return -EINVAL;
32963
32964 /* FIXME: Exactly how is the tty object locked here .. */
32965 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
32966 against a parallel ioctl etc */
32967 mutex_lock(&ttyj->ipw_tty_mutex);
32968 }
32969 - while (ttyj->open_count)
32970 + while (local_read(&ttyj->open_count))
32971 do_ipw_close(ttyj);
32972 ipwireless_disassociate_network_ttys(network,
32973 ttyj->channel_idx);
32974 diff -urNp linux-2.6.39.4/drivers/tty/n_gsm.c linux-2.6.39.4/drivers/tty/n_gsm.c
32975 --- linux-2.6.39.4/drivers/tty/n_gsm.c 2011-05-19 00:06:34.000000000 -0400
32976 +++ linux-2.6.39.4/drivers/tty/n_gsm.c 2011-08-05 19:44:37.000000000 -0400
32977 @@ -1588,7 +1588,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
32978 return NULL;
32979 spin_lock_init(&dlci->lock);
32980 dlci->fifo = &dlci->_fifo;
32981 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
32982 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
32983 kfree(dlci);
32984 return NULL;
32985 }
32986 diff -urNp linux-2.6.39.4/drivers/tty/n_tty.c linux-2.6.39.4/drivers/tty/n_tty.c
32987 --- linux-2.6.39.4/drivers/tty/n_tty.c 2011-05-19 00:06:34.000000000 -0400
32988 +++ linux-2.6.39.4/drivers/tty/n_tty.c 2011-08-05 19:44:37.000000000 -0400
32989 @@ -2122,6 +2122,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
32990 {
32991 *ops = tty_ldisc_N_TTY;
32992 ops->owner = NULL;
32993 - ops->refcount = ops->flags = 0;
32994 + atomic_set(&ops->refcount, 0);
32995 + ops->flags = 0;
32996 }
32997 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
32998 diff -urNp linux-2.6.39.4/drivers/tty/pty.c linux-2.6.39.4/drivers/tty/pty.c
32999 --- linux-2.6.39.4/drivers/tty/pty.c 2011-05-19 00:06:34.000000000 -0400
33000 +++ linux-2.6.39.4/drivers/tty/pty.c 2011-08-05 20:34:06.000000000 -0400
33001 @@ -753,8 +753,10 @@ static void __init unix98_pty_init(void)
33002 register_sysctl_table(pty_root_table);
33003
33004 /* Now create the /dev/ptmx special device */
33005 + pax_open_kernel();
33006 tty_default_fops(&ptmx_fops);
33007 - ptmx_fops.open = ptmx_open;
33008 + *(void **)&ptmx_fops.open = ptmx_open;
33009 + pax_close_kernel();
33010
33011 cdev_init(&ptmx_cdev, &ptmx_fops);
33012 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
33013 diff -urNp linux-2.6.39.4/drivers/tty/rocket.c linux-2.6.39.4/drivers/tty/rocket.c
33014 --- linux-2.6.39.4/drivers/tty/rocket.c 2011-05-19 00:06:34.000000000 -0400
33015 +++ linux-2.6.39.4/drivers/tty/rocket.c 2011-08-05 19:44:37.000000000 -0400
33016 @@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
33017 struct rocket_ports tmp;
33018 int board;
33019
33020 + pax_track_stack();
33021 +
33022 if (!retports)
33023 return -EFAULT;
33024 memset(&tmp, 0, sizeof (tmp));
33025 diff -urNp linux-2.6.39.4/drivers/tty/serial/kgdboc.c linux-2.6.39.4/drivers/tty/serial/kgdboc.c
33026 --- linux-2.6.39.4/drivers/tty/serial/kgdboc.c 2011-05-19 00:06:34.000000000 -0400
33027 +++ linux-2.6.39.4/drivers/tty/serial/kgdboc.c 2011-08-05 20:34:06.000000000 -0400
33028 @@ -23,8 +23,9 @@
33029 #define MAX_CONFIG_LEN 40
33030
33031 static struct kgdb_io kgdboc_io_ops;
33032 +static struct kgdb_io kgdboc_io_ops_console;
33033
33034 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
33035 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
33036 static int configured = -1;
33037
33038 static char config[MAX_CONFIG_LEN];
33039 @@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
33040 kgdboc_unregister_kbd();
33041 if (configured == 1)
33042 kgdb_unregister_io_module(&kgdboc_io_ops);
33043 + else if (configured == 2)
33044 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
33045 }
33046
33047 static int configure_kgdboc(void)
33048 @@ -156,13 +159,13 @@ static int configure_kgdboc(void)
33049 int err;
33050 char *cptr = config;
33051 struct console *cons;
33052 + int is_console = 0;
33053
33054 err = kgdboc_option_setup(config);
33055 if (err || !strlen(config) || isspace(config[0]))
33056 goto noconfig;
33057
33058 err = -ENODEV;
33059 - kgdboc_io_ops.is_console = 0;
33060 kgdb_tty_driver = NULL;
33061
33062 kgdboc_use_kms = 0;
33063 @@ -183,7 +186,7 @@ static int configure_kgdboc(void)
33064 int idx;
33065 if (cons->device && cons->device(cons, &idx) == p &&
33066 idx == tty_line) {
33067 - kgdboc_io_ops.is_console = 1;
33068 + is_console = 1;
33069 break;
33070 }
33071 cons = cons->next;
33072 @@ -193,12 +196,16 @@ static int configure_kgdboc(void)
33073 kgdb_tty_line = tty_line;
33074
33075 do_register:
33076 - err = kgdb_register_io_module(&kgdboc_io_ops);
33077 + if (is_console) {
33078 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
33079 + configured = 2;
33080 + } else {
33081 + err = kgdb_register_io_module(&kgdboc_io_ops);
33082 + configured = 1;
33083 + }
33084 if (err)
33085 goto noconfig;
33086
33087 - configured = 1;
33088 -
33089 return 0;
33090
33091 noconfig:
33092 @@ -212,7 +219,7 @@ noconfig:
33093 static int __init init_kgdboc(void)
33094 {
33095 /* Already configured? */
33096 - if (configured == 1)
33097 + if (configured >= 1)
33098 return 0;
33099
33100 return configure_kgdboc();
33101 @@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
33102 if (config[len - 1] == '\n')
33103 config[len - 1] = '\0';
33104
33105 - if (configured == 1)
33106 + if (configured >= 1)
33107 cleanup_kgdboc();
33108
33109 /* Go and configure with the new params. */
33110 @@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
33111 .post_exception = kgdboc_post_exp_handler,
33112 };
33113
33114 +static struct kgdb_io kgdboc_io_ops_console = {
33115 + .name = "kgdboc",
33116 + .read_char = kgdboc_get_char,
33117 + .write_char = kgdboc_put_char,
33118 + .pre_exception = kgdboc_pre_exp_handler,
33119 + .post_exception = kgdboc_post_exp_handler,
33120 + .is_console = 1
33121 +};
33122 +
33123 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
33124 /* This is only available if kgdboc is a built in for early debugging */
33125 static int __init kgdboc_early_init(char *opt)
33126 diff -urNp linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c
33127 --- linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c 2011-05-19 00:06:34.000000000 -0400
33128 +++ linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c 2011-08-05 20:34:06.000000000 -0400
33129 @@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
33130 int loop = 1, num, total = 0;
33131 u8 recv_buf[512], *pbuf;
33132
33133 + pax_track_stack();
33134 +
33135 pbuf = recv_buf;
33136 do {
33137 num = max3110_read_multi(max, pbuf);
33138 diff -urNp linux-2.6.39.4/drivers/tty/tty_io.c linux-2.6.39.4/drivers/tty/tty_io.c
33139 --- linux-2.6.39.4/drivers/tty/tty_io.c 2011-05-19 00:06:34.000000000 -0400
33140 +++ linux-2.6.39.4/drivers/tty/tty_io.c 2011-08-05 20:34:06.000000000 -0400
33141 @@ -3200,7 +3200,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
33142
33143 void tty_default_fops(struct file_operations *fops)
33144 {
33145 - *fops = tty_fops;
33146 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
33147 }
33148
33149 /*
33150 diff -urNp linux-2.6.39.4/drivers/tty/tty_ldisc.c linux-2.6.39.4/drivers/tty/tty_ldisc.c
33151 --- linux-2.6.39.4/drivers/tty/tty_ldisc.c 2011-07-09 09:18:51.000000000 -0400
33152 +++ linux-2.6.39.4/drivers/tty/tty_ldisc.c 2011-08-05 19:44:37.000000000 -0400
33153 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
33154 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
33155 struct tty_ldisc_ops *ldo = ld->ops;
33156
33157 - ldo->refcount--;
33158 + atomic_dec(&ldo->refcount);
33159 module_put(ldo->owner);
33160 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33161
33162 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
33163 spin_lock_irqsave(&tty_ldisc_lock, flags);
33164 tty_ldiscs[disc] = new_ldisc;
33165 new_ldisc->num = disc;
33166 - new_ldisc->refcount = 0;
33167 + atomic_set(&new_ldisc->refcount, 0);
33168 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33169
33170 return ret;
33171 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
33172 return -EINVAL;
33173
33174 spin_lock_irqsave(&tty_ldisc_lock, flags);
33175 - if (tty_ldiscs[disc]->refcount)
33176 + if (atomic_read(&tty_ldiscs[disc]->refcount))
33177 ret = -EBUSY;
33178 else
33179 tty_ldiscs[disc] = NULL;
33180 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
33181 if (ldops) {
33182 ret = ERR_PTR(-EAGAIN);
33183 if (try_module_get(ldops->owner)) {
33184 - ldops->refcount++;
33185 + atomic_inc(&ldops->refcount);
33186 ret = ldops;
33187 }
33188 }
33189 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
33190 unsigned long flags;
33191
33192 spin_lock_irqsave(&tty_ldisc_lock, flags);
33193 - ldops->refcount--;
33194 + atomic_dec(&ldops->refcount);
33195 module_put(ldops->owner);
33196 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33197 }
33198 diff -urNp linux-2.6.39.4/drivers/tty/vt/keyboard.c linux-2.6.39.4/drivers/tty/vt/keyboard.c
33199 --- linux-2.6.39.4/drivers/tty/vt/keyboard.c 2011-05-19 00:06:34.000000000 -0400
33200 +++ linux-2.6.39.4/drivers/tty/vt/keyboard.c 2011-08-05 19:44:37.000000000 -0400
33201 @@ -658,6 +658,16 @@ static void k_spec(struct vc_data *vc, u
33202 kbd->kbdmode == VC_OFF) &&
33203 value != KVAL(K_SAK))
33204 return; /* SAK is allowed even in raw mode */
33205 +
33206 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
33207 + {
33208 + void *func = fn_handler[value];
33209 + if (func == fn_show_state || func == fn_show_ptregs ||
33210 + func == fn_show_mem)
33211 + return;
33212 + }
33213 +#endif
33214 +
33215 fn_handler[value](vc);
33216 }
33217
33218 diff -urNp linux-2.6.39.4/drivers/tty/vt/vt.c linux-2.6.39.4/drivers/tty/vt/vt.c
33219 --- linux-2.6.39.4/drivers/tty/vt/vt.c 2011-05-19 00:06:34.000000000 -0400
33220 +++ linux-2.6.39.4/drivers/tty/vt/vt.c 2011-08-05 19:44:37.000000000 -0400
33221 @@ -261,7 +261,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
33222
33223 static void notify_write(struct vc_data *vc, unsigned int unicode)
33224 {
33225 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
33226 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
33227 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
33228 }
33229
33230 diff -urNp linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c
33231 --- linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c 2011-05-19 00:06:34.000000000 -0400
33232 +++ linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c 2011-08-05 19:44:37.000000000 -0400
33233 @@ -209,9 +209,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33234 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
33235 return -EFAULT;
33236
33237 - if (!capable(CAP_SYS_TTY_CONFIG))
33238 - perm = 0;
33239 -
33240 switch (cmd) {
33241 case KDGKBENT:
33242 key_map = key_maps[s];
33243 @@ -223,6 +220,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33244 val = (i ? K_HOLE : K_NOSUCHMAP);
33245 return put_user(val, &user_kbe->kb_value);
33246 case KDSKBENT:
33247 + if (!capable(CAP_SYS_TTY_CONFIG))
33248 + perm = 0;
33249 +
33250 if (!perm)
33251 return -EPERM;
33252 if (!i && v == K_NOSUCHMAP) {
33253 @@ -324,9 +324,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33254 int i, j, k;
33255 int ret;
33256
33257 - if (!capable(CAP_SYS_TTY_CONFIG))
33258 - perm = 0;
33259 -
33260 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
33261 if (!kbs) {
33262 ret = -ENOMEM;
33263 @@ -360,6 +357,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33264 kfree(kbs);
33265 return ((p && *p) ? -EOVERFLOW : 0);
33266 case KDSKBSENT:
33267 + if (!capable(CAP_SYS_TTY_CONFIG))
33268 + perm = 0;
33269 +
33270 if (!perm) {
33271 ret = -EPERM;
33272 goto reterr;
33273 diff -urNp linux-2.6.39.4/drivers/uio/uio.c linux-2.6.39.4/drivers/uio/uio.c
33274 --- linux-2.6.39.4/drivers/uio/uio.c 2011-05-19 00:06:34.000000000 -0400
33275 +++ linux-2.6.39.4/drivers/uio/uio.c 2011-08-05 19:44:37.000000000 -0400
33276 @@ -25,6 +25,7 @@
33277 #include <linux/kobject.h>
33278 #include <linux/cdev.h>
33279 #include <linux/uio_driver.h>
33280 +#include <asm/local.h>
33281
33282 #define UIO_MAX_DEVICES (1U << MINORBITS)
33283
33284 @@ -32,10 +33,10 @@ struct uio_device {
33285 struct module *owner;
33286 struct device *dev;
33287 int minor;
33288 - atomic_t event;
33289 + atomic_unchecked_t event;
33290 struct fasync_struct *async_queue;
33291 wait_queue_head_t wait;
33292 - int vma_count;
33293 + local_t vma_count;
33294 struct uio_info *info;
33295 struct kobject *map_dir;
33296 struct kobject *portio_dir;
33297 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device
33298 struct device_attribute *attr, char *buf)
33299 {
33300 struct uio_device *idev = dev_get_drvdata(dev);
33301 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
33302 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
33303 }
33304
33305 static struct device_attribute uio_class_attributes[] = {
33306 @@ -402,7 +403,7 @@ void uio_event_notify(struct uio_info *i
33307 {
33308 struct uio_device *idev = info->uio_dev;
33309
33310 - atomic_inc(&idev->event);
33311 + atomic_inc_unchecked(&idev->event);
33312 wake_up_interruptible(&idev->wait);
33313 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
33314 }
33315 @@ -455,7 +456,7 @@ static int uio_open(struct inode *inode,
33316 }
33317
33318 listener->dev = idev;
33319 - listener->event_count = atomic_read(&idev->event);
33320 + listener->event_count = atomic_read_unchecked(&idev->event);
33321 filep->private_data = listener;
33322
33323 if (idev->info->open) {
33324 @@ -506,7 +507,7 @@ static unsigned int uio_poll(struct file
33325 return -EIO;
33326
33327 poll_wait(filep, &idev->wait, wait);
33328 - if (listener->event_count != atomic_read(&idev->event))
33329 + if (listener->event_count != atomic_read_unchecked(&idev->event))
33330 return POLLIN | POLLRDNORM;
33331 return 0;
33332 }
33333 @@ -531,7 +532,7 @@ static ssize_t uio_read(struct file *fil
33334 do {
33335 set_current_state(TASK_INTERRUPTIBLE);
33336
33337 - event_count = atomic_read(&idev->event);
33338 + event_count = atomic_read_unchecked(&idev->event);
33339 if (event_count != listener->event_count) {
33340 if (copy_to_user(buf, &event_count, count))
33341 retval = -EFAULT;
33342 @@ -602,13 +603,13 @@ static int uio_find_mem_index(struct vm_
33343 static void uio_vma_open(struct vm_area_struct *vma)
33344 {
33345 struct uio_device *idev = vma->vm_private_data;
33346 - idev->vma_count++;
33347 + local_inc(&idev->vma_count);
33348 }
33349
33350 static void uio_vma_close(struct vm_area_struct *vma)
33351 {
33352 struct uio_device *idev = vma->vm_private_data;
33353 - idev->vma_count--;
33354 + local_dec(&idev->vma_count);
33355 }
33356
33357 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33358 @@ -819,7 +820,7 @@ int __uio_register_device(struct module
33359 idev->owner = owner;
33360 idev->info = info;
33361 init_waitqueue_head(&idev->wait);
33362 - atomic_set(&idev->event, 0);
33363 + atomic_set_unchecked(&idev->event, 0);
33364
33365 ret = uio_get_minor(idev);
33366 if (ret)
33367 diff -urNp linux-2.6.39.4/drivers/usb/atm/cxacru.c linux-2.6.39.4/drivers/usb/atm/cxacru.c
33368 --- linux-2.6.39.4/drivers/usb/atm/cxacru.c 2011-05-19 00:06:34.000000000 -0400
33369 +++ linux-2.6.39.4/drivers/usb/atm/cxacru.c 2011-08-05 19:44:37.000000000 -0400
33370 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
33371 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
33372 if (ret < 2)
33373 return -EINVAL;
33374 - if (index < 0 || index > 0x7f)
33375 + if (index > 0x7f)
33376 return -EINVAL;
33377 pos += tmp;
33378
33379 diff -urNp linux-2.6.39.4/drivers/usb/atm/usbatm.c linux-2.6.39.4/drivers/usb/atm/usbatm.c
33380 --- linux-2.6.39.4/drivers/usb/atm/usbatm.c 2011-05-19 00:06:34.000000000 -0400
33381 +++ linux-2.6.39.4/drivers/usb/atm/usbatm.c 2011-08-05 19:44:37.000000000 -0400
33382 @@ -332,7 +332,7 @@ static void usbatm_extract_one_cell(stru
33383 if (printk_ratelimit())
33384 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
33385 __func__, vpi, vci);
33386 - atomic_inc(&vcc->stats->rx_err);
33387 + atomic_inc_unchecked(&vcc->stats->rx_err);
33388 return;
33389 }
33390
33391 @@ -360,7 +360,7 @@ static void usbatm_extract_one_cell(stru
33392 if (length > ATM_MAX_AAL5_PDU) {
33393 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
33394 __func__, length, vcc);
33395 - atomic_inc(&vcc->stats->rx_err);
33396 + atomic_inc_unchecked(&vcc->stats->rx_err);
33397 goto out;
33398 }
33399
33400 @@ -369,14 +369,14 @@ static void usbatm_extract_one_cell(stru
33401 if (sarb->len < pdu_length) {
33402 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
33403 __func__, pdu_length, sarb->len, vcc);
33404 - atomic_inc(&vcc->stats->rx_err);
33405 + atomic_inc_unchecked(&vcc->stats->rx_err);
33406 goto out;
33407 }
33408
33409 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
33410 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
33411 __func__, vcc);
33412 - atomic_inc(&vcc->stats->rx_err);
33413 + atomic_inc_unchecked(&vcc->stats->rx_err);
33414 goto out;
33415 }
33416
33417 @@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
33418 if (printk_ratelimit())
33419 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
33420 __func__, length);
33421 - atomic_inc(&vcc->stats->rx_drop);
33422 + atomic_inc_unchecked(&vcc->stats->rx_drop);
33423 goto out;
33424 }
33425
33426 @@ -411,7 +411,7 @@ static void usbatm_extract_one_cell(stru
33427
33428 vcc->push(vcc, skb);
33429
33430 - atomic_inc(&vcc->stats->rx);
33431 + atomic_inc_unchecked(&vcc->stats->rx);
33432 out:
33433 skb_trim(sarb, 0);
33434 }
33435 @@ -614,7 +614,7 @@ static void usbatm_tx_process(unsigned l
33436 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
33437
33438 usbatm_pop(vcc, skb);
33439 - atomic_inc(&vcc->stats->tx);
33440 + atomic_inc_unchecked(&vcc->stats->tx);
33441
33442 skb = skb_dequeue(&instance->sndqueue);
33443 }
33444 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
33445 if (!left--)
33446 return sprintf(page,
33447 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
33448 - atomic_read(&atm_dev->stats.aal5.tx),
33449 - atomic_read(&atm_dev->stats.aal5.tx_err),
33450 - atomic_read(&atm_dev->stats.aal5.rx),
33451 - atomic_read(&atm_dev->stats.aal5.rx_err),
33452 - atomic_read(&atm_dev->stats.aal5.rx_drop));
33453 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
33454 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
33455 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
33456 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
33457 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
33458
33459 if (!left--) {
33460 if (instance->disconnected)
33461 diff -urNp linux-2.6.39.4/drivers/usb/core/devices.c linux-2.6.39.4/drivers/usb/core/devices.c
33462 --- linux-2.6.39.4/drivers/usb/core/devices.c 2011-05-19 00:06:34.000000000 -0400
33463 +++ linux-2.6.39.4/drivers/usb/core/devices.c 2011-08-05 19:44:37.000000000 -0400
33464 @@ -126,7 +126,7 @@ static const char *format_endpt =
33465 * time it gets called.
33466 */
33467 static struct device_connect_event {
33468 - atomic_t count;
33469 + atomic_unchecked_t count;
33470 wait_queue_head_t wait;
33471 } device_event = {
33472 .count = ATOMIC_INIT(1),
33473 @@ -164,7 +164,7 @@ static const struct class_info clas_info
33474
33475 void usbfs_conn_disc_event(void)
33476 {
33477 - atomic_add(2, &device_event.count);
33478 + atomic_add_unchecked(2, &device_event.count);
33479 wake_up(&device_event.wait);
33480 }
33481
33482 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
33483
33484 poll_wait(file, &device_event.wait, wait);
33485
33486 - event_count = atomic_read(&device_event.count);
33487 + event_count = atomic_read_unchecked(&device_event.count);
33488 if (file->f_version != event_count) {
33489 file->f_version = event_count;
33490 return POLLIN | POLLRDNORM;
33491 diff -urNp linux-2.6.39.4/drivers/usb/core/message.c linux-2.6.39.4/drivers/usb/core/message.c
33492 --- linux-2.6.39.4/drivers/usb/core/message.c 2011-07-09 09:18:51.000000000 -0400
33493 +++ linux-2.6.39.4/drivers/usb/core/message.c 2011-08-05 19:44:37.000000000 -0400
33494 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
33495 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
33496 if (buf) {
33497 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
33498 - if (len > 0) {
33499 - smallbuf = kmalloc(++len, GFP_NOIO);
33500 + if (len++ > 0) {
33501 + smallbuf = kmalloc(len, GFP_NOIO);
33502 if (!smallbuf)
33503 return buf;
33504 memcpy(smallbuf, buf, len);
33505 diff -urNp linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c
33506 --- linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c 2011-05-19 00:06:34.000000000 -0400
33507 +++ linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c 2011-08-05 20:34:06.000000000 -0400
33508 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
33509
33510 #ifdef CONFIG_KGDB
33511 static struct kgdb_io kgdbdbgp_io_ops;
33512 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
33513 +static struct kgdb_io kgdbdbgp_io_ops_console;
33514 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
33515 #else
33516 #define dbgp_kgdb_mode (0)
33517 #endif
33518 @@ -1032,6 +1033,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
33519 .write_char = kgdbdbgp_write_char,
33520 };
33521
33522 +static struct kgdb_io kgdbdbgp_io_ops_console = {
33523 + .name = "kgdbdbgp",
33524 + .read_char = kgdbdbgp_read_char,
33525 + .write_char = kgdbdbgp_write_char,
33526 + .is_console = 1
33527 +};
33528 +
33529 static int kgdbdbgp_wait_time;
33530
33531 static int __init kgdbdbgp_parse_config(char *str)
33532 @@ -1047,8 +1055,10 @@ static int __init kgdbdbgp_parse_config(
33533 ptr++;
33534 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
33535 }
33536 - kgdb_register_io_module(&kgdbdbgp_io_ops);
33537 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
33538 + if (early_dbgp_console.index != -1)
33539 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
33540 + else
33541 + kgdb_register_io_module(&kgdbdbgp_io_ops);
33542
33543 return 0;
33544 }
33545 diff -urNp linux-2.6.39.4/drivers/usb/host/xhci-mem.c linux-2.6.39.4/drivers/usb/host/xhci-mem.c
33546 --- linux-2.6.39.4/drivers/usb/host/xhci-mem.c 2011-06-25 12:55:23.000000000 -0400
33547 +++ linux-2.6.39.4/drivers/usb/host/xhci-mem.c 2011-08-05 19:44:37.000000000 -0400
33548 @@ -1680,6 +1680,8 @@ static int xhci_check_trb_in_td_math(str
33549 unsigned int num_tests;
33550 int i, ret;
33551
33552 + pax_track_stack();
33553 +
33554 num_tests = ARRAY_SIZE(simple_test_vector);
33555 for (i = 0; i < num_tests; i++) {
33556 ret = xhci_test_trb_in_td(xhci,
33557 diff -urNp linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h
33558 --- linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h 2011-05-19 00:06:34.000000000 -0400
33559 +++ linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h 2011-08-05 19:44:37.000000000 -0400
33560 @@ -192,7 +192,7 @@ struct wahc {
33561 struct list_head xfer_delayed_list;
33562 spinlock_t xfer_list_lock;
33563 struct work_struct xfer_work;
33564 - atomic_t xfer_id_count;
33565 + atomic_unchecked_t xfer_id_count;
33566 };
33567
33568
33569 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
33570 INIT_LIST_HEAD(&wa->xfer_delayed_list);
33571 spin_lock_init(&wa->xfer_list_lock);
33572 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
33573 - atomic_set(&wa->xfer_id_count, 1);
33574 + atomic_set_unchecked(&wa->xfer_id_count, 1);
33575 }
33576
33577 /**
33578 diff -urNp linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c
33579 --- linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c 2011-05-19 00:06:34.000000000 -0400
33580 +++ linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c 2011-08-05 19:44:37.000000000 -0400
33581 @@ -294,7 +294,7 @@ out:
33582 */
33583 static void wa_xfer_id_init(struct wa_xfer *xfer)
33584 {
33585 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
33586 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
33587 }
33588
33589 /*
33590 diff -urNp linux-2.6.39.4/drivers/vhost/vhost.c linux-2.6.39.4/drivers/vhost/vhost.c
33591 --- linux-2.6.39.4/drivers/vhost/vhost.c 2011-05-19 00:06:34.000000000 -0400
33592 +++ linux-2.6.39.4/drivers/vhost/vhost.c 2011-08-05 19:44:37.000000000 -0400
33593 @@ -580,7 +580,7 @@ static int init_used(struct vhost_virtqu
33594 return get_user(vq->last_used_idx, &used->idx);
33595 }
33596
33597 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
33598 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
33599 {
33600 struct file *eventfp, *filep = NULL,
33601 *pollstart = NULL, *pollstop = NULL;
33602 diff -urNp linux-2.6.39.4/drivers/video/fbcmap.c linux-2.6.39.4/drivers/video/fbcmap.c
33603 --- linux-2.6.39.4/drivers/video/fbcmap.c 2011-05-19 00:06:34.000000000 -0400
33604 +++ linux-2.6.39.4/drivers/video/fbcmap.c 2011-08-05 19:44:37.000000000 -0400
33605 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
33606 rc = -ENODEV;
33607 goto out;
33608 }
33609 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
33610 - !info->fbops->fb_setcmap)) {
33611 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
33612 rc = -EINVAL;
33613 goto out1;
33614 }
33615 diff -urNp linux-2.6.39.4/drivers/video/fbmem.c linux-2.6.39.4/drivers/video/fbmem.c
33616 --- linux-2.6.39.4/drivers/video/fbmem.c 2011-05-19 00:06:34.000000000 -0400
33617 +++ linux-2.6.39.4/drivers/video/fbmem.c 2011-08-05 19:44:37.000000000 -0400
33618 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
33619 image->dx += image->width + 8;
33620 }
33621 } else if (rotate == FB_ROTATE_UD) {
33622 - for (x = 0; x < num && image->dx >= 0; x++) {
33623 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
33624 info->fbops->fb_imageblit(info, image);
33625 image->dx -= image->width + 8;
33626 }
33627 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
33628 image->dy += image->height + 8;
33629 }
33630 } else if (rotate == FB_ROTATE_CCW) {
33631 - for (x = 0; x < num && image->dy >= 0; x++) {
33632 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
33633 info->fbops->fb_imageblit(info, image);
33634 image->dy -= image->height + 8;
33635 }
33636 @@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
33637 int flags = info->flags;
33638 int ret = 0;
33639
33640 + pax_track_stack();
33641 +
33642 if (var->activate & FB_ACTIVATE_INV_MODE) {
33643 struct fb_videomode mode1, mode2;
33644
33645 @@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
33646 void __user *argp = (void __user *)arg;
33647 long ret = 0;
33648
33649 + pax_track_stack();
33650 +
33651 switch (cmd) {
33652 case FBIOGET_VSCREENINFO:
33653 if (!lock_fb_info(info))
33654 @@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
33655 return -EFAULT;
33656 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
33657 return -EINVAL;
33658 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
33659 + if (con2fb.framebuffer >= FB_MAX)
33660 return -EINVAL;
33661 if (!registered_fb[con2fb.framebuffer])
33662 request_module("fb%d", con2fb.framebuffer);
33663 diff -urNp linux-2.6.39.4/drivers/video/i810/i810_accel.c linux-2.6.39.4/drivers/video/i810/i810_accel.c
33664 --- linux-2.6.39.4/drivers/video/i810/i810_accel.c 2011-05-19 00:06:34.000000000 -0400
33665 +++ linux-2.6.39.4/drivers/video/i810/i810_accel.c 2011-08-05 19:44:37.000000000 -0400
33666 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
33667 }
33668 }
33669 printk("ringbuffer lockup!!!\n");
33670 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
33671 i810_report_error(mmio);
33672 par->dev_flags |= LOCKUP;
33673 info->pixmap.scan_align = 1;
33674 diff -urNp linux-2.6.39.4/drivers/video/udlfb.c linux-2.6.39.4/drivers/video/udlfb.c
33675 --- linux-2.6.39.4/drivers/video/udlfb.c 2011-05-19 00:06:34.000000000 -0400
33676 +++ linux-2.6.39.4/drivers/video/udlfb.c 2011-08-05 19:44:37.000000000 -0400
33677 @@ -584,11 +584,11 @@ int dlfb_handle_damage(struct dlfb_data
33678 dlfb_urb_completion(urb);
33679
33680 error:
33681 - atomic_add(bytes_sent, &dev->bytes_sent);
33682 - atomic_add(bytes_identical, &dev->bytes_identical);
33683 - atomic_add(width*height*2, &dev->bytes_rendered);
33684 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
33685 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
33686 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
33687 end_cycles = get_cycles();
33688 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
33689 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
33690 >> 10)), /* Kcycles */
33691 &dev->cpu_kcycles_used);
33692
33693 @@ -709,11 +709,11 @@ static void dlfb_dpy_deferred_io(struct
33694 dlfb_urb_completion(urb);
33695
33696 error:
33697 - atomic_add(bytes_sent, &dev->bytes_sent);
33698 - atomic_add(bytes_identical, &dev->bytes_identical);
33699 - atomic_add(bytes_rendered, &dev->bytes_rendered);
33700 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
33701 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
33702 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
33703 end_cycles = get_cycles();
33704 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
33705 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
33706 >> 10)), /* Kcycles */
33707 &dev->cpu_kcycles_used);
33708 }
33709 @@ -1301,7 +1301,7 @@ static ssize_t metrics_bytes_rendered_sh
33710 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33711 struct dlfb_data *dev = fb_info->par;
33712 return snprintf(buf, PAGE_SIZE, "%u\n",
33713 - atomic_read(&dev->bytes_rendered));
33714 + atomic_read_unchecked(&dev->bytes_rendered));
33715 }
33716
33717 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
33718 @@ -1309,7 +1309,7 @@ static ssize_t metrics_bytes_identical_s
33719 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33720 struct dlfb_data *dev = fb_info->par;
33721 return snprintf(buf, PAGE_SIZE, "%u\n",
33722 - atomic_read(&dev->bytes_identical));
33723 + atomic_read_unchecked(&dev->bytes_identical));
33724 }
33725
33726 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
33727 @@ -1317,7 +1317,7 @@ static ssize_t metrics_bytes_sent_show(s
33728 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33729 struct dlfb_data *dev = fb_info->par;
33730 return snprintf(buf, PAGE_SIZE, "%u\n",
33731 - atomic_read(&dev->bytes_sent));
33732 + atomic_read_unchecked(&dev->bytes_sent));
33733 }
33734
33735 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
33736 @@ -1325,7 +1325,7 @@ static ssize_t metrics_cpu_kcycles_used_
33737 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33738 struct dlfb_data *dev = fb_info->par;
33739 return snprintf(buf, PAGE_SIZE, "%u\n",
33740 - atomic_read(&dev->cpu_kcycles_used));
33741 + atomic_read_unchecked(&dev->cpu_kcycles_used));
33742 }
33743
33744 static ssize_t edid_show(
33745 @@ -1382,10 +1382,10 @@ static ssize_t metrics_reset_store(struc
33746 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33747 struct dlfb_data *dev = fb_info->par;
33748
33749 - atomic_set(&dev->bytes_rendered, 0);
33750 - atomic_set(&dev->bytes_identical, 0);
33751 - atomic_set(&dev->bytes_sent, 0);
33752 - atomic_set(&dev->cpu_kcycles_used, 0);
33753 + atomic_set_unchecked(&dev->bytes_rendered, 0);
33754 + atomic_set_unchecked(&dev->bytes_identical, 0);
33755 + atomic_set_unchecked(&dev->bytes_sent, 0);
33756 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
33757
33758 return count;
33759 }
33760 diff -urNp linux-2.6.39.4/drivers/video/uvesafb.c linux-2.6.39.4/drivers/video/uvesafb.c
33761 --- linux-2.6.39.4/drivers/video/uvesafb.c 2011-05-19 00:06:34.000000000 -0400
33762 +++ linux-2.6.39.4/drivers/video/uvesafb.c 2011-08-05 20:34:06.000000000 -0400
33763 @@ -19,6 +19,7 @@
33764 #include <linux/io.h>
33765 #include <linux/mutex.h>
33766 #include <linux/slab.h>
33767 +#include <linux/moduleloader.h>
33768 #include <video/edid.h>
33769 #include <video/uvesafb.h>
33770 #ifdef CONFIG_X86
33771 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
33772 NULL,
33773 };
33774
33775 - return call_usermodehelper(v86d_path, argv, envp, 1);
33776 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
33777 }
33778
33779 /*
33780 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
33781 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
33782 par->pmi_setpal = par->ypan = 0;
33783 } else {
33784 +
33785 +#ifdef CONFIG_PAX_KERNEXEC
33786 +#ifdef CONFIG_MODULES
33787 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
33788 +#endif
33789 + if (!par->pmi_code) {
33790 + par->pmi_setpal = par->ypan = 0;
33791 + return 0;
33792 + }
33793 +#endif
33794 +
33795 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
33796 + task->t.regs.edi);
33797 +
33798 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33799 + pax_open_kernel();
33800 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
33801 + pax_close_kernel();
33802 +
33803 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
33804 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
33805 +#else
33806 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
33807 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
33808 +#endif
33809 +
33810 printk(KERN_INFO "uvesafb: protected mode interface info at "
33811 "%04x:%04x\n",
33812 (u16)task->t.regs.es, (u16)task->t.regs.edi);
33813 @@ -1821,6 +1844,11 @@ out:
33814 if (par->vbe_modes)
33815 kfree(par->vbe_modes);
33816
33817 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33818 + if (par->pmi_code)
33819 + module_free_exec(NULL, par->pmi_code);
33820 +#endif
33821 +
33822 framebuffer_release(info);
33823 return err;
33824 }
33825 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
33826 kfree(par->vbe_state_orig);
33827 if (par->vbe_state_saved)
33828 kfree(par->vbe_state_saved);
33829 +
33830 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33831 + if (par->pmi_code)
33832 + module_free_exec(NULL, par->pmi_code);
33833 +#endif
33834 +
33835 }
33836
33837 framebuffer_release(info);
33838 diff -urNp linux-2.6.39.4/drivers/video/vesafb.c linux-2.6.39.4/drivers/video/vesafb.c
33839 --- linux-2.6.39.4/drivers/video/vesafb.c 2011-05-19 00:06:34.000000000 -0400
33840 +++ linux-2.6.39.4/drivers/video/vesafb.c 2011-08-05 20:34:06.000000000 -0400
33841 @@ -9,6 +9,7 @@
33842 */
33843
33844 #include <linux/module.h>
33845 +#include <linux/moduleloader.h>
33846 #include <linux/kernel.h>
33847 #include <linux/errno.h>
33848 #include <linux/string.h>
33849 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
33850 static int vram_total __initdata; /* Set total amount of memory */
33851 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
33852 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
33853 -static void (*pmi_start)(void) __read_mostly;
33854 -static void (*pmi_pal) (void) __read_mostly;
33855 +static void (*pmi_start)(void) __read_only;
33856 +static void (*pmi_pal) (void) __read_only;
33857 static int depth __read_mostly;
33858 static int vga_compat __read_mostly;
33859 /* --------------------------------------------------------------------- */
33860 @@ -232,6 +233,7 @@ static int __init vesafb_probe(struct pl
33861 unsigned int size_vmode;
33862 unsigned int size_remap;
33863 unsigned int size_total;
33864 + void *pmi_code = NULL;
33865
33866 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
33867 return -ENODEV;
33868 @@ -274,10 +276,6 @@ static int __init vesafb_probe(struct pl
33869 size_remap = size_total;
33870 vesafb_fix.smem_len = size_remap;
33871
33872 -#ifndef __i386__
33873 - screen_info.vesapm_seg = 0;
33874 -#endif
33875 -
33876 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
33877 printk(KERN_WARNING
33878 "vesafb: cannot reserve video memory at 0x%lx\n",
33879 @@ -306,9 +304,21 @@ static int __init vesafb_probe(struct pl
33880 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
33881 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
33882
33883 +#ifdef __i386__
33884 +
33885 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33886 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
33887 + if (!pmi_code)
33888 +#elif !defined(CONFIG_PAX_KERNEXEC)
33889 + if (0)
33890 +#endif
33891 +
33892 +#endif
33893 + screen_info.vesapm_seg = 0;
33894 +
33895 if (screen_info.vesapm_seg) {
33896 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
33897 - screen_info.vesapm_seg,screen_info.vesapm_off);
33898 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
33899 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
33900 }
33901
33902 if (screen_info.vesapm_seg < 0xc000)
33903 @@ -316,9 +326,25 @@ static int __init vesafb_probe(struct pl
33904
33905 if (ypan || pmi_setpal) {
33906 unsigned short *pmi_base;
33907 +
33908 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
33909 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
33910 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
33911 +
33912 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33913 + pax_open_kernel();
33914 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
33915 +#else
33916 + pmi_code = pmi_base;
33917 +#endif
33918 +
33919 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
33920 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
33921 +
33922 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33923 + pmi_start = ktva_ktla(pmi_start);
33924 + pmi_pal = ktva_ktla(pmi_pal);
33925 + pax_close_kernel();
33926 +#endif
33927 +
33928 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
33929 if (pmi_base[3]) {
33930 printk(KERN_INFO "vesafb: pmi: ports = ");
33931 @@ -487,6 +513,11 @@ static int __init vesafb_probe(struct pl
33932 info->node, info->fix.id);
33933 return 0;
33934 err:
33935 +
33936 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33937 + module_free_exec(NULL, pmi_code);
33938 +#endif
33939 +
33940 if (info->screen_base)
33941 iounmap(info->screen_base);
33942 framebuffer_release(info);
33943 diff -urNp linux-2.6.39.4/drivers/virtio/virtio_balloon.c linux-2.6.39.4/drivers/virtio/virtio_balloon.c
33944 --- linux-2.6.39.4/drivers/virtio/virtio_balloon.c 2011-05-19 00:06:34.000000000 -0400
33945 +++ linux-2.6.39.4/drivers/virtio/virtio_balloon.c 2011-08-05 19:44:37.000000000 -0400
33946 @@ -176,6 +176,8 @@ static void update_balloon_stats(struct
33947 struct sysinfo i;
33948 int idx = 0;
33949
33950 + pax_track_stack();
33951 +
33952 all_vm_events(events);
33953 si_meminfo(&i);
33954
33955 diff -urNp linux-2.6.39.4/fs/9p/vfs_inode.c linux-2.6.39.4/fs/9p/vfs_inode.c
33956 --- linux-2.6.39.4/fs/9p/vfs_inode.c 2011-05-19 00:06:34.000000000 -0400
33957 +++ linux-2.6.39.4/fs/9p/vfs_inode.c 2011-08-05 19:44:37.000000000 -0400
33958 @@ -1210,7 +1210,7 @@ static void *v9fs_vfs_follow_link(struct
33959 void
33960 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
33961 {
33962 - char *s = nd_get_link(nd);
33963 + const char *s = nd_get_link(nd);
33964
33965 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
33966 IS_ERR(s) ? "<error>" : s);
33967 diff -urNp linux-2.6.39.4/fs/aio.c linux-2.6.39.4/fs/aio.c
33968 --- linux-2.6.39.4/fs/aio.c 2011-05-19 00:06:34.000000000 -0400
33969 +++ linux-2.6.39.4/fs/aio.c 2011-08-05 19:44:37.000000000 -0400
33970 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
33971 size += sizeof(struct io_event) * nr_events;
33972 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
33973
33974 - if (nr_pages < 0)
33975 + if (nr_pages <= 0)
33976 return -EINVAL;
33977
33978 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
33979 @@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
33980 struct aio_timeout to;
33981 int retry = 0;
33982
33983 + pax_track_stack();
33984 +
33985 /* needed to zero any padding within an entry (there shouldn't be
33986 * any, but C is fun!
33987 */
33988 @@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
33989 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
33990 {
33991 ssize_t ret;
33992 + struct iovec iovstack;
33993
33994 #ifdef CONFIG_COMPAT
33995 if (compat)
33996 ret = compat_rw_copy_check_uvector(type,
33997 (struct compat_iovec __user *)kiocb->ki_buf,
33998 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
33999 + kiocb->ki_nbytes, 1, &iovstack,
34000 &kiocb->ki_iovec);
34001 else
34002 #endif
34003 ret = rw_copy_check_uvector(type,
34004 (struct iovec __user *)kiocb->ki_buf,
34005 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
34006 + kiocb->ki_nbytes, 1, &iovstack,
34007 &kiocb->ki_iovec);
34008 if (ret < 0)
34009 goto out;
34010
34011 + if (kiocb->ki_iovec == &iovstack) {
34012 + kiocb->ki_inline_vec = iovstack;
34013 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
34014 + }
34015 kiocb->ki_nr_segs = kiocb->ki_nbytes;
34016 kiocb->ki_cur_seg = 0;
34017 /* ki_nbytes/left now reflect bytes instead of segs */
34018 diff -urNp linux-2.6.39.4/fs/attr.c linux-2.6.39.4/fs/attr.c
34019 --- linux-2.6.39.4/fs/attr.c 2011-05-19 00:06:34.000000000 -0400
34020 +++ linux-2.6.39.4/fs/attr.c 2011-08-05 19:44:37.000000000 -0400
34021 @@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
34022 unsigned long limit;
34023
34024 limit = rlimit(RLIMIT_FSIZE);
34025 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
34026 if (limit != RLIM_INFINITY && offset > limit)
34027 goto out_sig;
34028 if (offset > inode->i_sb->s_maxbytes)
34029 diff -urNp linux-2.6.39.4/fs/befs/linuxvfs.c linux-2.6.39.4/fs/befs/linuxvfs.c
34030 --- linux-2.6.39.4/fs/befs/linuxvfs.c 2011-05-19 00:06:34.000000000 -0400
34031 +++ linux-2.6.39.4/fs/befs/linuxvfs.c 2011-08-05 19:44:37.000000000 -0400
34032 @@ -498,7 +498,7 @@ static void befs_put_link(struct dentry
34033 {
34034 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
34035 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
34036 - char *link = nd_get_link(nd);
34037 + const char *link = nd_get_link(nd);
34038 if (!IS_ERR(link))
34039 kfree(link);
34040 }
34041 diff -urNp linux-2.6.39.4/fs/binfmt_aout.c linux-2.6.39.4/fs/binfmt_aout.c
34042 --- linux-2.6.39.4/fs/binfmt_aout.c 2011-05-19 00:06:34.000000000 -0400
34043 +++ linux-2.6.39.4/fs/binfmt_aout.c 2011-08-05 19:44:37.000000000 -0400
34044 @@ -16,6 +16,7 @@
34045 #include <linux/string.h>
34046 #include <linux/fs.h>
34047 #include <linux/file.h>
34048 +#include <linux/security.h>
34049 #include <linux/stat.h>
34050 #include <linux/fcntl.h>
34051 #include <linux/ptrace.h>
34052 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
34053 #endif
34054 # define START_STACK(u) ((void __user *)u.start_stack)
34055
34056 + memset(&dump, 0, sizeof(dump));
34057 +
34058 fs = get_fs();
34059 set_fs(KERNEL_DS);
34060 has_dumped = 1;
34061 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
34062
34063 /* If the size of the dump file exceeds the rlimit, then see what would happen
34064 if we wrote the stack, but not the data area. */
34065 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
34066 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
34067 dump.u_dsize = 0;
34068
34069 /* Make sure we have enough room to write the stack and data areas. */
34070 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
34071 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
34072 dump.u_ssize = 0;
34073
34074 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
34075 rlim = rlimit(RLIMIT_DATA);
34076 if (rlim >= RLIM_INFINITY)
34077 rlim = ~0;
34078 +
34079 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
34080 if (ex.a_data + ex.a_bss > rlim)
34081 return -ENOMEM;
34082
34083 @@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
34084 install_exec_creds(bprm);
34085 current->flags &= ~PF_FORKNOEXEC;
34086
34087 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
34088 + current->mm->pax_flags = 0UL;
34089 +#endif
34090 +
34091 +#ifdef CONFIG_PAX_PAGEEXEC
34092 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
34093 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
34094 +
34095 +#ifdef CONFIG_PAX_EMUTRAMP
34096 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
34097 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
34098 +#endif
34099 +
34100 +#ifdef CONFIG_PAX_MPROTECT
34101 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
34102 + current->mm->pax_flags |= MF_PAX_MPROTECT;
34103 +#endif
34104 +
34105 + }
34106 +#endif
34107 +
34108 if (N_MAGIC(ex) == OMAGIC) {
34109 unsigned long text_addr, map_size;
34110 loff_t pos;
34111 @@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
34112
34113 down_write(&current->mm->mmap_sem);
34114 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
34115 - PROT_READ | PROT_WRITE | PROT_EXEC,
34116 + PROT_READ | PROT_WRITE,
34117 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
34118 fd_offset + ex.a_text);
34119 up_write(&current->mm->mmap_sem);
34120 diff -urNp linux-2.6.39.4/fs/binfmt_elf.c linux-2.6.39.4/fs/binfmt_elf.c
34121 --- linux-2.6.39.4/fs/binfmt_elf.c 2011-05-19 00:06:34.000000000 -0400
34122 +++ linux-2.6.39.4/fs/binfmt_elf.c 2011-08-05 19:44:37.000000000 -0400
34123 @@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
34124 #define elf_core_dump NULL
34125 #endif
34126
34127 +#ifdef CONFIG_PAX_MPROTECT
34128 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
34129 +#endif
34130 +
34131 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
34132 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
34133 #else
34134 @@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
34135 .load_binary = load_elf_binary,
34136 .load_shlib = load_elf_library,
34137 .core_dump = elf_core_dump,
34138 +
34139 +#ifdef CONFIG_PAX_MPROTECT
34140 + .handle_mprotect= elf_handle_mprotect,
34141 +#endif
34142 +
34143 .min_coredump = ELF_EXEC_PAGESIZE,
34144 };
34145
34146 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
34147
34148 static int set_brk(unsigned long start, unsigned long end)
34149 {
34150 + unsigned long e = end;
34151 +
34152 start = ELF_PAGEALIGN(start);
34153 end = ELF_PAGEALIGN(end);
34154 if (end > start) {
34155 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
34156 if (BAD_ADDR(addr))
34157 return addr;
34158 }
34159 - current->mm->start_brk = current->mm->brk = end;
34160 + current->mm->start_brk = current->mm->brk = e;
34161 return 0;
34162 }
34163
34164 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
34165 elf_addr_t __user *u_rand_bytes;
34166 const char *k_platform = ELF_PLATFORM;
34167 const char *k_base_platform = ELF_BASE_PLATFORM;
34168 - unsigned char k_rand_bytes[16];
34169 + u32 k_rand_bytes[4];
34170 int items;
34171 elf_addr_t *elf_info;
34172 int ei_index = 0;
34173 const struct cred *cred = current_cred();
34174 struct vm_area_struct *vma;
34175 + unsigned long saved_auxv[AT_VECTOR_SIZE];
34176 +
34177 + pax_track_stack();
34178
34179 /*
34180 * In some cases (e.g. Hyper-Threading), we want to avoid L1
34181 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
34182 * Generate 16 random bytes for userspace PRNG seeding.
34183 */
34184 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
34185 - u_rand_bytes = (elf_addr_t __user *)
34186 - STACK_ALLOC(p, sizeof(k_rand_bytes));
34187 + srandom32(k_rand_bytes[0] ^ random32());
34188 + srandom32(k_rand_bytes[1] ^ random32());
34189 + srandom32(k_rand_bytes[2] ^ random32());
34190 + srandom32(k_rand_bytes[3] ^ random32());
34191 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
34192 + u_rand_bytes = (elf_addr_t __user *) p;
34193 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
34194 return -EFAULT;
34195
34196 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
34197 return -EFAULT;
34198 current->mm->env_end = p;
34199
34200 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
34201 +
34202 /* Put the elf_info on the stack in the right place. */
34203 sp = (elf_addr_t __user *)envp + 1;
34204 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
34205 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
34206 return -EFAULT;
34207 return 0;
34208 }
34209 @@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
34210 {
34211 struct elf_phdr *elf_phdata;
34212 struct elf_phdr *eppnt;
34213 - unsigned long load_addr = 0;
34214 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
34215 int load_addr_set = 0;
34216 unsigned long last_bss = 0, elf_bss = 0;
34217 - unsigned long error = ~0UL;
34218 + unsigned long error = -EINVAL;
34219 unsigned long total_size;
34220 int retval, i, size;
34221
34222 @@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
34223 goto out_close;
34224 }
34225
34226 +#ifdef CONFIG_PAX_SEGMEXEC
34227 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
34228 + pax_task_size = SEGMEXEC_TASK_SIZE;
34229 +#endif
34230 +
34231 eppnt = elf_phdata;
34232 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
34233 if (eppnt->p_type == PT_LOAD) {
34234 @@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
34235 k = load_addr + eppnt->p_vaddr;
34236 if (BAD_ADDR(k) ||
34237 eppnt->p_filesz > eppnt->p_memsz ||
34238 - eppnt->p_memsz > TASK_SIZE ||
34239 - TASK_SIZE - eppnt->p_memsz < k) {
34240 + eppnt->p_memsz > pax_task_size ||
34241 + pax_task_size - eppnt->p_memsz < k) {
34242 error = -ENOMEM;
34243 goto out_close;
34244 }
34245 @@ -528,6 +553,193 @@ out:
34246 return error;
34247 }
34248
34249 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
34250 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
34251 +{
34252 + unsigned long pax_flags = 0UL;
34253 +
34254 +#ifdef CONFIG_PAX_PAGEEXEC
34255 + if (elf_phdata->p_flags & PF_PAGEEXEC)
34256 + pax_flags |= MF_PAX_PAGEEXEC;
34257 +#endif
34258 +
34259 +#ifdef CONFIG_PAX_SEGMEXEC
34260 + if (elf_phdata->p_flags & PF_SEGMEXEC)
34261 + pax_flags |= MF_PAX_SEGMEXEC;
34262 +#endif
34263 +
34264 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34265 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34266 + if ((__supported_pte_mask & _PAGE_NX))
34267 + pax_flags &= ~MF_PAX_SEGMEXEC;
34268 + else
34269 + pax_flags &= ~MF_PAX_PAGEEXEC;
34270 + }
34271 +#endif
34272 +
34273 +#ifdef CONFIG_PAX_EMUTRAMP
34274 + if (elf_phdata->p_flags & PF_EMUTRAMP)
34275 + pax_flags |= MF_PAX_EMUTRAMP;
34276 +#endif
34277 +
34278 +#ifdef CONFIG_PAX_MPROTECT
34279 + if (elf_phdata->p_flags & PF_MPROTECT)
34280 + pax_flags |= MF_PAX_MPROTECT;
34281 +#endif
34282 +
34283 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
34284 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
34285 + pax_flags |= MF_PAX_RANDMMAP;
34286 +#endif
34287 +
34288 + return pax_flags;
34289 +}
34290 +#endif
34291 +
34292 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
34293 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
34294 +{
34295 + unsigned long pax_flags = 0UL;
34296 +
34297 +#ifdef CONFIG_PAX_PAGEEXEC
34298 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
34299 + pax_flags |= MF_PAX_PAGEEXEC;
34300 +#endif
34301 +
34302 +#ifdef CONFIG_PAX_SEGMEXEC
34303 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
34304 + pax_flags |= MF_PAX_SEGMEXEC;
34305 +#endif
34306 +
34307 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34308 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34309 + if ((__supported_pte_mask & _PAGE_NX))
34310 + pax_flags &= ~MF_PAX_SEGMEXEC;
34311 + else
34312 + pax_flags &= ~MF_PAX_PAGEEXEC;
34313 + }
34314 +#endif
34315 +
34316 +#ifdef CONFIG_PAX_EMUTRAMP
34317 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
34318 + pax_flags |= MF_PAX_EMUTRAMP;
34319 +#endif
34320 +
34321 +#ifdef CONFIG_PAX_MPROTECT
34322 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
34323 + pax_flags |= MF_PAX_MPROTECT;
34324 +#endif
34325 +
34326 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
34327 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
34328 + pax_flags |= MF_PAX_RANDMMAP;
34329 +#endif
34330 +
34331 + return pax_flags;
34332 +}
34333 +#endif
34334 +
34335 +#ifdef CONFIG_PAX_EI_PAX
34336 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
34337 +{
34338 + unsigned long pax_flags = 0UL;
34339 +
34340 +#ifdef CONFIG_PAX_PAGEEXEC
34341 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
34342 + pax_flags |= MF_PAX_PAGEEXEC;
34343 +#endif
34344 +
34345 +#ifdef CONFIG_PAX_SEGMEXEC
34346 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
34347 + pax_flags |= MF_PAX_SEGMEXEC;
34348 +#endif
34349 +
34350 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34351 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34352 + if ((__supported_pte_mask & _PAGE_NX))
34353 + pax_flags &= ~MF_PAX_SEGMEXEC;
34354 + else
34355 + pax_flags &= ~MF_PAX_PAGEEXEC;
34356 + }
34357 +#endif
34358 +
34359 +#ifdef CONFIG_PAX_EMUTRAMP
34360 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
34361 + pax_flags |= MF_PAX_EMUTRAMP;
34362 +#endif
34363 +
34364 +#ifdef CONFIG_PAX_MPROTECT
34365 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
34366 + pax_flags |= MF_PAX_MPROTECT;
34367 +#endif
34368 +
34369 +#ifdef CONFIG_PAX_ASLR
34370 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
34371 + pax_flags |= MF_PAX_RANDMMAP;
34372 +#endif
34373 +
34374 + return pax_flags;
34375 +}
34376 +#endif
34377 +
34378 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
34379 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
34380 +{
34381 + unsigned long pax_flags = 0UL;
34382 +
34383 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
34384 + unsigned long i;
34385 + int found_flags = 0;
34386 +#endif
34387 +
34388 +#ifdef CONFIG_PAX_EI_PAX
34389 + pax_flags = pax_parse_ei_pax(elf_ex);
34390 +#endif
34391 +
34392 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
34393 + for (i = 0UL; i < elf_ex->e_phnum; i++)
34394 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
34395 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
34396 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
34397 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
34398 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
34399 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
34400 + return -EINVAL;
34401 +
34402 +#ifdef CONFIG_PAX_SOFTMODE
34403 + if (pax_softmode)
34404 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
34405 + else
34406 +#endif
34407 +
34408 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
34409 + found_flags = 1;
34410 + break;
34411 + }
34412 +#endif
34413 +
34414 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
34415 + if (found_flags == 0) {
34416 + struct elf_phdr phdr;
34417 + memset(&phdr, 0, sizeof(phdr));
34418 + phdr.p_flags = PF_NOEMUTRAMP;
34419 +#ifdef CONFIG_PAX_SOFTMODE
34420 + if (pax_softmode)
34421 + pax_flags = pax_parse_softmode(&phdr);
34422 + else
34423 +#endif
34424 + pax_flags = pax_parse_hardmode(&phdr);
34425 + }
34426 +#endif
34427 +
34428 + if (0 > pax_check_flags(&pax_flags))
34429 + return -EINVAL;
34430 +
34431 + current->mm->pax_flags = pax_flags;
34432 + return 0;
34433 +}
34434 +#endif
34435 +
34436 /*
34437 * These are the functions used to load ELF style executables and shared
34438 * libraries. There is no binary dependent code anywhere else.
34439 @@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
34440 {
34441 unsigned int random_variable = 0;
34442
34443 +#ifdef CONFIG_PAX_RANDUSTACK
34444 + if (randomize_va_space)
34445 + return stack_top - current->mm->delta_stack;
34446 +#endif
34447 +
34448 if ((current->flags & PF_RANDOMIZE) &&
34449 !(current->personality & ADDR_NO_RANDOMIZE)) {
34450 random_variable = get_random_int() & STACK_RND_MASK;
34451 @@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
34452 unsigned long load_addr = 0, load_bias = 0;
34453 int load_addr_set = 0;
34454 char * elf_interpreter = NULL;
34455 - unsigned long error;
34456 + unsigned long error = 0;
34457 struct elf_phdr *elf_ppnt, *elf_phdata;
34458 unsigned long elf_bss, elf_brk;
34459 int retval, i;
34460 @@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
34461 unsigned long start_code, end_code, start_data, end_data;
34462 unsigned long reloc_func_desc __maybe_unused = 0;
34463 int executable_stack = EXSTACK_DEFAULT;
34464 - unsigned long def_flags = 0;
34465 struct {
34466 struct elfhdr elf_ex;
34467 struct elfhdr interp_elf_ex;
34468 } *loc;
34469 + unsigned long pax_task_size = TASK_SIZE;
34470
34471 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
34472 if (!loc) {
34473 @@ -714,11 +931,81 @@ static int load_elf_binary(struct linux_
34474
34475 /* OK, This is the point of no return */
34476 current->flags &= ~PF_FORKNOEXEC;
34477 - current->mm->def_flags = def_flags;
34478 +
34479 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
34480 + current->mm->pax_flags = 0UL;
34481 +#endif
34482 +
34483 +#ifdef CONFIG_PAX_DLRESOLVE
34484 + current->mm->call_dl_resolve = 0UL;
34485 +#endif
34486 +
34487 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
34488 + current->mm->call_syscall = 0UL;
34489 +#endif
34490 +
34491 +#ifdef CONFIG_PAX_ASLR
34492 + current->mm->delta_mmap = 0UL;
34493 + current->mm->delta_stack = 0UL;
34494 +#endif
34495 +
34496 + current->mm->def_flags = 0;
34497 +
34498 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
34499 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
34500 + send_sig(SIGKILL, current, 0);
34501 + goto out_free_dentry;
34502 + }
34503 +#endif
34504 +
34505 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
34506 + pax_set_initial_flags(bprm);
34507 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
34508 + if (pax_set_initial_flags_func)
34509 + (pax_set_initial_flags_func)(bprm);
34510 +#endif
34511 +
34512 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
34513 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
34514 + current->mm->context.user_cs_limit = PAGE_SIZE;
34515 + current->mm->def_flags |= VM_PAGEEXEC;
34516 + }
34517 +#endif
34518 +
34519 +#ifdef CONFIG_PAX_SEGMEXEC
34520 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
34521 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
34522 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
34523 + pax_task_size = SEGMEXEC_TASK_SIZE;
34524 + current->mm->def_flags |= VM_NOHUGEPAGE;
34525 + }
34526 +#endif
34527 +
34528 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
34529 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34530 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
34531 + put_cpu();
34532 + }
34533 +#endif
34534
34535 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
34536 may depend on the personality. */
34537 SET_PERSONALITY(loc->elf_ex);
34538 +
34539 +#ifdef CONFIG_PAX_ASLR
34540 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
34541 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
34542 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
34543 + }
34544 +#endif
34545 +
34546 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
34547 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34548 + executable_stack = EXSTACK_DISABLE_X;
34549 + current->personality &= ~READ_IMPLIES_EXEC;
34550 + } else
34551 +#endif
34552 +
34553 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
34554 current->personality |= READ_IMPLIES_EXEC;
34555
34556 @@ -800,6 +1087,20 @@ static int load_elf_binary(struct linux_
34557 #else
34558 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
34559 #endif
34560 +
34561 +#ifdef CONFIG_PAX_RANDMMAP
34562 + /* PaX: randomize base address at the default exe base if requested */
34563 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
34564 +#ifdef CONFIG_SPARC64
34565 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
34566 +#else
34567 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
34568 +#endif
34569 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
34570 + elf_flags |= MAP_FIXED;
34571 + }
34572 +#endif
34573 +
34574 }
34575
34576 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
34577 @@ -832,9 +1133,9 @@ static int load_elf_binary(struct linux_
34578 * allowed task size. Note that p_filesz must always be
34579 * <= p_memsz so it is only necessary to check p_memsz.
34580 */
34581 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
34582 - elf_ppnt->p_memsz > TASK_SIZE ||
34583 - TASK_SIZE - elf_ppnt->p_memsz < k) {
34584 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
34585 + elf_ppnt->p_memsz > pax_task_size ||
34586 + pax_task_size - elf_ppnt->p_memsz < k) {
34587 /* set_brk can never work. Avoid overflows. */
34588 send_sig(SIGKILL, current, 0);
34589 retval = -EINVAL;
34590 @@ -862,6 +1163,11 @@ static int load_elf_binary(struct linux_
34591 start_data += load_bias;
34592 end_data += load_bias;
34593
34594 +#ifdef CONFIG_PAX_RANDMMAP
34595 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
34596 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
34597 +#endif
34598 +
34599 /* Calling set_brk effectively mmaps the pages that we need
34600 * for the bss and break sections. We must do this before
34601 * mapping in the interpreter, to make sure it doesn't wind
34602 @@ -873,9 +1179,11 @@ static int load_elf_binary(struct linux_
34603 goto out_free_dentry;
34604 }
34605 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
34606 - send_sig(SIGSEGV, current, 0);
34607 - retval = -EFAULT; /* Nobody gets to see this, but.. */
34608 - goto out_free_dentry;
34609 + /*
34610 + * This bss-zeroing can fail if the ELF
34611 + * file specifies odd protections. So
34612 + * we don't check the return value
34613 + */
34614 }
34615
34616 if (elf_interpreter) {
34617 @@ -1090,7 +1398,7 @@ out:
34618 * Decide what to dump of a segment, part, all or none.
34619 */
34620 static unsigned long vma_dump_size(struct vm_area_struct *vma,
34621 - unsigned long mm_flags)
34622 + unsigned long mm_flags, long signr)
34623 {
34624 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
34625
34626 @@ -1124,7 +1432,7 @@ static unsigned long vma_dump_size(struc
34627 if (vma->vm_file == NULL)
34628 return 0;
34629
34630 - if (FILTER(MAPPED_PRIVATE))
34631 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
34632 goto whole;
34633
34634 /*
34635 @@ -1346,9 +1654,9 @@ static void fill_auxv_note(struct memelf
34636 {
34637 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
34638 int i = 0;
34639 - do
34640 + do {
34641 i += 2;
34642 - while (auxv[i - 2] != AT_NULL);
34643 + } while (auxv[i - 2] != AT_NULL);
34644 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
34645 }
34646
34647 @@ -1854,14 +2162,14 @@ static void fill_extnum_info(struct elfh
34648 }
34649
34650 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
34651 - unsigned long mm_flags)
34652 + struct coredump_params *cprm)
34653 {
34654 struct vm_area_struct *vma;
34655 size_t size = 0;
34656
34657 for (vma = first_vma(current, gate_vma); vma != NULL;
34658 vma = next_vma(vma, gate_vma))
34659 - size += vma_dump_size(vma, mm_flags);
34660 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34661 return size;
34662 }
34663
34664 @@ -1955,7 +2263,7 @@ static int elf_core_dump(struct coredump
34665
34666 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
34667
34668 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
34669 + offset += elf_core_vma_data_size(gate_vma, cprm);
34670 offset += elf_core_extra_data_size();
34671 e_shoff = offset;
34672
34673 @@ -1969,10 +2277,12 @@ static int elf_core_dump(struct coredump
34674 offset = dataoff;
34675
34676 size += sizeof(*elf);
34677 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34678 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
34679 goto end_coredump;
34680
34681 size += sizeof(*phdr4note);
34682 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34683 if (size > cprm->limit
34684 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
34685 goto end_coredump;
34686 @@ -1986,7 +2296,7 @@ static int elf_core_dump(struct coredump
34687 phdr.p_offset = offset;
34688 phdr.p_vaddr = vma->vm_start;
34689 phdr.p_paddr = 0;
34690 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
34691 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34692 phdr.p_memsz = vma->vm_end - vma->vm_start;
34693 offset += phdr.p_filesz;
34694 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
34695 @@ -1997,6 +2307,7 @@ static int elf_core_dump(struct coredump
34696 phdr.p_align = ELF_EXEC_PAGESIZE;
34697
34698 size += sizeof(phdr);
34699 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34700 if (size > cprm->limit
34701 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
34702 goto end_coredump;
34703 @@ -2021,7 +2332,7 @@ static int elf_core_dump(struct coredump
34704 unsigned long addr;
34705 unsigned long end;
34706
34707 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
34708 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34709
34710 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
34711 struct page *page;
34712 @@ -2030,6 +2341,7 @@ static int elf_core_dump(struct coredump
34713 page = get_dump_page(addr);
34714 if (page) {
34715 void *kaddr = kmap(page);
34716 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
34717 stop = ((size += PAGE_SIZE) > cprm->limit) ||
34718 !dump_write(cprm->file, kaddr,
34719 PAGE_SIZE);
34720 @@ -2047,6 +2359,7 @@ static int elf_core_dump(struct coredump
34721
34722 if (e_phnum == PN_XNUM) {
34723 size += sizeof(*shdr4extnum);
34724 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34725 if (size > cprm->limit
34726 || !dump_write(cprm->file, shdr4extnum,
34727 sizeof(*shdr4extnum)))
34728 @@ -2067,6 +2380,97 @@ out:
34729
34730 #endif /* CONFIG_ELF_CORE */
34731
34732 +#ifdef CONFIG_PAX_MPROTECT
34733 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
34734 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
34735 + * we'll remove VM_MAYWRITE for good on RELRO segments.
34736 + *
34737 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
34738 + * basis because we want to allow the common case and not the special ones.
34739 + */
34740 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
34741 +{
34742 + struct elfhdr elf_h;
34743 + struct elf_phdr elf_p;
34744 + unsigned long i;
34745 + unsigned long oldflags;
34746 + bool is_textrel_rw, is_textrel_rx, is_relro;
34747 +
34748 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
34749 + return;
34750 +
34751 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
34752 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
34753 +
34754 +#ifdef CONFIG_PAX_ELFRELOCS
34755 + /* possible TEXTREL */
34756 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
34757 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
34758 +#else
34759 + is_textrel_rw = false;
34760 + is_textrel_rx = false;
34761 +#endif
34762 +
34763 + /* possible RELRO */
34764 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
34765 +
34766 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
34767 + return;
34768 +
34769 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
34770 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
34771 +
34772 +#ifdef CONFIG_PAX_ETEXECRELOCS
34773 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
34774 +#else
34775 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
34776 +#endif
34777 +
34778 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
34779 + !elf_check_arch(&elf_h) ||
34780 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
34781 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
34782 + return;
34783 +
34784 + for (i = 0UL; i < elf_h.e_phnum; i++) {
34785 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
34786 + return;
34787 + switch (elf_p.p_type) {
34788 + case PT_DYNAMIC:
34789 + if (!is_textrel_rw && !is_textrel_rx)
34790 + continue;
34791 + i = 0UL;
34792 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
34793 + elf_dyn dyn;
34794 +
34795 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
34796 + return;
34797 + if (dyn.d_tag == DT_NULL)
34798 + return;
34799 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
34800 + gr_log_textrel(vma);
34801 + if (is_textrel_rw)
34802 + vma->vm_flags |= VM_MAYWRITE;
34803 + else
34804 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
34805 + vma->vm_flags &= ~VM_MAYWRITE;
34806 + return;
34807 + }
34808 + i++;
34809 + }
34810 + return;
34811 +
34812 + case PT_GNU_RELRO:
34813 + if (!is_relro)
34814 + continue;
34815 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
34816 + vma->vm_flags &= ~VM_MAYWRITE;
34817 + return;
34818 + }
34819 + }
34820 +}
34821 +#endif
34822 +
34823 static int __init init_elf_binfmt(void)
34824 {
34825 return register_binfmt(&elf_format);
34826 diff -urNp linux-2.6.39.4/fs/binfmt_flat.c linux-2.6.39.4/fs/binfmt_flat.c
34827 --- linux-2.6.39.4/fs/binfmt_flat.c 2011-05-19 00:06:34.000000000 -0400
34828 +++ linux-2.6.39.4/fs/binfmt_flat.c 2011-08-05 19:44:37.000000000 -0400
34829 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
34830 realdatastart = (unsigned long) -ENOMEM;
34831 printk("Unable to allocate RAM for process data, errno %d\n",
34832 (int)-realdatastart);
34833 + down_write(&current->mm->mmap_sem);
34834 do_munmap(current->mm, textpos, text_len);
34835 + up_write(&current->mm->mmap_sem);
34836 ret = realdatastart;
34837 goto err;
34838 }
34839 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
34840 }
34841 if (IS_ERR_VALUE(result)) {
34842 printk("Unable to read data+bss, errno %d\n", (int)-result);
34843 + down_write(&current->mm->mmap_sem);
34844 do_munmap(current->mm, textpos, text_len);
34845 do_munmap(current->mm, realdatastart, len);
34846 + up_write(&current->mm->mmap_sem);
34847 ret = result;
34848 goto err;
34849 }
34850 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
34851 }
34852 if (IS_ERR_VALUE(result)) {
34853 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
34854 + down_write(&current->mm->mmap_sem);
34855 do_munmap(current->mm, textpos, text_len + data_len + extra +
34856 MAX_SHARED_LIBS * sizeof(unsigned long));
34857 + up_write(&current->mm->mmap_sem);
34858 ret = result;
34859 goto err;
34860 }
34861 diff -urNp linux-2.6.39.4/fs/bio.c linux-2.6.39.4/fs/bio.c
34862 --- linux-2.6.39.4/fs/bio.c 2011-05-19 00:06:34.000000000 -0400
34863 +++ linux-2.6.39.4/fs/bio.c 2011-08-05 19:44:37.000000000 -0400
34864 @@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
34865 const int read = bio_data_dir(bio) == READ;
34866 struct bio_map_data *bmd = bio->bi_private;
34867 int i;
34868 - char *p = bmd->sgvecs[0].iov_base;
34869 + char *p = (__force char *)bmd->sgvecs[0].iov_base;
34870
34871 __bio_for_each_segment(bvec, bio, i, 0) {
34872 char *addr = page_address(bvec->bv_page);
34873 diff -urNp linux-2.6.39.4/fs/block_dev.c linux-2.6.39.4/fs/block_dev.c
34874 --- linux-2.6.39.4/fs/block_dev.c 2011-07-09 09:18:51.000000000 -0400
34875 +++ linux-2.6.39.4/fs/block_dev.c 2011-08-05 19:44:37.000000000 -0400
34876 @@ -671,7 +671,7 @@ static bool bd_may_claim(struct block_de
34877 else if (bdev->bd_contains == bdev)
34878 return true; /* is a whole device which isn't held */
34879
34880 - else if (whole->bd_holder == bd_may_claim)
34881 + else if (whole->bd_holder == (void *)bd_may_claim)
34882 return true; /* is a partition of a device that is being partitioned */
34883 else if (whole->bd_holder != NULL)
34884 return false; /* is a partition of a held device */
34885 diff -urNp linux-2.6.39.4/fs/btrfs/ctree.c linux-2.6.39.4/fs/btrfs/ctree.c
34886 --- linux-2.6.39.4/fs/btrfs/ctree.c 2011-05-19 00:06:34.000000000 -0400
34887 +++ linux-2.6.39.4/fs/btrfs/ctree.c 2011-08-05 19:44:37.000000000 -0400
34888 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
34889 free_extent_buffer(buf);
34890 add_root_to_dirty_list(root);
34891 } else {
34892 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
34893 - parent_start = parent->start;
34894 - else
34895 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
34896 + if (parent)
34897 + parent_start = parent->start;
34898 + else
34899 + parent_start = 0;
34900 + } else
34901 parent_start = 0;
34902
34903 WARN_ON(trans->transid != btrfs_header_generation(parent));
34904 @@ -3647,7 +3650,6 @@ setup_items_for_insert(struct btrfs_tran
34905
34906 ret = 0;
34907 if (slot == 0) {
34908 - struct btrfs_disk_key disk_key;
34909 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
34910 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
34911 }
34912 diff -urNp linux-2.6.39.4/fs/btrfs/free-space-cache.c linux-2.6.39.4/fs/btrfs/free-space-cache.c
34913 --- linux-2.6.39.4/fs/btrfs/free-space-cache.c 2011-05-19 00:06:34.000000000 -0400
34914 +++ linux-2.6.39.4/fs/btrfs/free-space-cache.c 2011-08-05 19:44:37.000000000 -0400
34915 @@ -1910,8 +1910,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
34916 while(1) {
34917 if (entry->bytes < bytes ||
34918 (!entry->bitmap && entry->offset < min_start)) {
34919 - struct rb_node *node;
34920 -
34921 node = rb_next(&entry->offset_index);
34922 if (!node)
34923 break;
34924 @@ -1925,7 +1923,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
34925 cluster, entry, bytes,
34926 min_start);
34927 if (ret == 0) {
34928 - struct rb_node *node;
34929 node = rb_next(&entry->offset_index);
34930 if (!node)
34931 break;
34932 diff -urNp linux-2.6.39.4/fs/btrfs/inode.c linux-2.6.39.4/fs/btrfs/inode.c
34933 --- linux-2.6.39.4/fs/btrfs/inode.c 2011-05-19 00:06:34.000000000 -0400
34934 +++ linux-2.6.39.4/fs/btrfs/inode.c 2011-08-05 20:34:06.000000000 -0400
34935 @@ -6947,7 +6947,7 @@ fail:
34936 return -ENOMEM;
34937 }
34938
34939 -static int btrfs_getattr(struct vfsmount *mnt,
34940 +int btrfs_getattr(struct vfsmount *mnt,
34941 struct dentry *dentry, struct kstat *stat)
34942 {
34943 struct inode *inode = dentry->d_inode;
34944 @@ -6959,6 +6959,14 @@ static int btrfs_getattr(struct vfsmount
34945 return 0;
34946 }
34947
34948 +EXPORT_SYMBOL(btrfs_getattr);
34949 +
34950 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
34951 +{
34952 + return BTRFS_I(inode)->root->anon_super.s_dev;
34953 +}
34954 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
34955 +
34956 /*
34957 * If a file is moved, it will inherit the cow and compression flags of the new
34958 * directory.
34959 diff -urNp linux-2.6.39.4/fs/btrfs/ioctl.c linux-2.6.39.4/fs/btrfs/ioctl.c
34960 --- linux-2.6.39.4/fs/btrfs/ioctl.c 2011-05-19 00:06:34.000000000 -0400
34961 +++ linux-2.6.39.4/fs/btrfs/ioctl.c 2011-08-05 19:44:37.000000000 -0400
34962 @@ -2361,9 +2361,12 @@ long btrfs_ioctl_space_info(struct btrfs
34963 for (i = 0; i < num_types; i++) {
34964 struct btrfs_space_info *tmp;
34965
34966 + /* Don't copy in more than we allocated */
34967 if (!slot_count)
34968 break;
34969
34970 + slot_count--;
34971 +
34972 info = NULL;
34973 rcu_read_lock();
34974 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
34975 @@ -2385,10 +2388,7 @@ long btrfs_ioctl_space_info(struct btrfs
34976 memcpy(dest, &space, sizeof(space));
34977 dest++;
34978 space_args.total_spaces++;
34979 - slot_count--;
34980 }
34981 - if (!slot_count)
34982 - break;
34983 }
34984 up_read(&info->groups_sem);
34985 }
34986 diff -urNp linux-2.6.39.4/fs/btrfs/relocation.c linux-2.6.39.4/fs/btrfs/relocation.c
34987 --- linux-2.6.39.4/fs/btrfs/relocation.c 2011-05-19 00:06:34.000000000 -0400
34988 +++ linux-2.6.39.4/fs/btrfs/relocation.c 2011-08-05 19:44:37.000000000 -0400
34989 @@ -1239,7 +1239,7 @@ static int __update_reloc_root(struct bt
34990 }
34991 spin_unlock(&rc->reloc_root_tree.lock);
34992
34993 - BUG_ON((struct btrfs_root *)node->data != root);
34994 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
34995
34996 if (!del) {
34997 spin_lock(&rc->reloc_root_tree.lock);
34998 diff -urNp linux-2.6.39.4/fs/cachefiles/bind.c linux-2.6.39.4/fs/cachefiles/bind.c
34999 --- linux-2.6.39.4/fs/cachefiles/bind.c 2011-05-19 00:06:34.000000000 -0400
35000 +++ linux-2.6.39.4/fs/cachefiles/bind.c 2011-08-05 19:44:37.000000000 -0400
35001 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
35002 args);
35003
35004 /* start by checking things over */
35005 - ASSERT(cache->fstop_percent >= 0 &&
35006 - cache->fstop_percent < cache->fcull_percent &&
35007 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
35008 cache->fcull_percent < cache->frun_percent &&
35009 cache->frun_percent < 100);
35010
35011 - ASSERT(cache->bstop_percent >= 0 &&
35012 - cache->bstop_percent < cache->bcull_percent &&
35013 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
35014 cache->bcull_percent < cache->brun_percent &&
35015 cache->brun_percent < 100);
35016
35017 diff -urNp linux-2.6.39.4/fs/cachefiles/daemon.c linux-2.6.39.4/fs/cachefiles/daemon.c
35018 --- linux-2.6.39.4/fs/cachefiles/daemon.c 2011-05-19 00:06:34.000000000 -0400
35019 +++ linux-2.6.39.4/fs/cachefiles/daemon.c 2011-08-05 19:44:37.000000000 -0400
35020 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
35021 if (n > buflen)
35022 return -EMSGSIZE;
35023
35024 - if (copy_to_user(_buffer, buffer, n) != 0)
35025 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
35026 return -EFAULT;
35027
35028 return n;
35029 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
35030 if (test_bit(CACHEFILES_DEAD, &cache->flags))
35031 return -EIO;
35032
35033 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
35034 + if (datalen > PAGE_SIZE - 1)
35035 return -EOPNOTSUPP;
35036
35037 /* drag the command string into the kernel so we can parse it */
35038 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
35039 if (args[0] != '%' || args[1] != '\0')
35040 return -EINVAL;
35041
35042 - if (fstop < 0 || fstop >= cache->fcull_percent)
35043 + if (fstop >= cache->fcull_percent)
35044 return cachefiles_daemon_range_error(cache, args);
35045
35046 cache->fstop_percent = fstop;
35047 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
35048 if (args[0] != '%' || args[1] != '\0')
35049 return -EINVAL;
35050
35051 - if (bstop < 0 || bstop >= cache->bcull_percent)
35052 + if (bstop >= cache->bcull_percent)
35053 return cachefiles_daemon_range_error(cache, args);
35054
35055 cache->bstop_percent = bstop;
35056 diff -urNp linux-2.6.39.4/fs/cachefiles/internal.h linux-2.6.39.4/fs/cachefiles/internal.h
35057 --- linux-2.6.39.4/fs/cachefiles/internal.h 2011-05-19 00:06:34.000000000 -0400
35058 +++ linux-2.6.39.4/fs/cachefiles/internal.h 2011-08-05 19:44:37.000000000 -0400
35059 @@ -57,7 +57,7 @@ struct cachefiles_cache {
35060 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
35061 struct rb_root active_nodes; /* active nodes (can't be culled) */
35062 rwlock_t active_lock; /* lock for active_nodes */
35063 - atomic_t gravecounter; /* graveyard uniquifier */
35064 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
35065 unsigned frun_percent; /* when to stop culling (% files) */
35066 unsigned fcull_percent; /* when to start culling (% files) */
35067 unsigned fstop_percent; /* when to stop allocating (% files) */
35068 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
35069 * proc.c
35070 */
35071 #ifdef CONFIG_CACHEFILES_HISTOGRAM
35072 -extern atomic_t cachefiles_lookup_histogram[HZ];
35073 -extern atomic_t cachefiles_mkdir_histogram[HZ];
35074 -extern atomic_t cachefiles_create_histogram[HZ];
35075 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
35076 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
35077 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
35078
35079 extern int __init cachefiles_proc_init(void);
35080 extern void cachefiles_proc_cleanup(void);
35081 static inline
35082 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
35083 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
35084 {
35085 unsigned long jif = jiffies - start_jif;
35086 if (jif >= HZ)
35087 jif = HZ - 1;
35088 - atomic_inc(&histogram[jif]);
35089 + atomic_inc_unchecked(&histogram[jif]);
35090 }
35091
35092 #else
35093 diff -urNp linux-2.6.39.4/fs/cachefiles/namei.c linux-2.6.39.4/fs/cachefiles/namei.c
35094 --- linux-2.6.39.4/fs/cachefiles/namei.c 2011-05-19 00:06:34.000000000 -0400
35095 +++ linux-2.6.39.4/fs/cachefiles/namei.c 2011-08-05 19:44:37.000000000 -0400
35096 @@ -318,7 +318,7 @@ try_again:
35097 /* first step is to make up a grave dentry in the graveyard */
35098 sprintf(nbuffer, "%08x%08x",
35099 (uint32_t) get_seconds(),
35100 - (uint32_t) atomic_inc_return(&cache->gravecounter));
35101 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
35102
35103 /* do the multiway lock magic */
35104 trap = lock_rename(cache->graveyard, dir);
35105 diff -urNp linux-2.6.39.4/fs/cachefiles/proc.c linux-2.6.39.4/fs/cachefiles/proc.c
35106 --- linux-2.6.39.4/fs/cachefiles/proc.c 2011-05-19 00:06:34.000000000 -0400
35107 +++ linux-2.6.39.4/fs/cachefiles/proc.c 2011-08-05 19:44:37.000000000 -0400
35108 @@ -14,9 +14,9 @@
35109 #include <linux/seq_file.h>
35110 #include "internal.h"
35111
35112 -atomic_t cachefiles_lookup_histogram[HZ];
35113 -atomic_t cachefiles_mkdir_histogram[HZ];
35114 -atomic_t cachefiles_create_histogram[HZ];
35115 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
35116 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
35117 +atomic_unchecked_t cachefiles_create_histogram[HZ];
35118
35119 /*
35120 * display the latency histogram
35121 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
35122 return 0;
35123 default:
35124 index = (unsigned long) v - 3;
35125 - x = atomic_read(&cachefiles_lookup_histogram[index]);
35126 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
35127 - z = atomic_read(&cachefiles_create_histogram[index]);
35128 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
35129 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
35130 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
35131 if (x == 0 && y == 0 && z == 0)
35132 return 0;
35133
35134 diff -urNp linux-2.6.39.4/fs/cachefiles/rdwr.c linux-2.6.39.4/fs/cachefiles/rdwr.c
35135 --- linux-2.6.39.4/fs/cachefiles/rdwr.c 2011-05-19 00:06:34.000000000 -0400
35136 +++ linux-2.6.39.4/fs/cachefiles/rdwr.c 2011-08-05 19:44:37.000000000 -0400
35137 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
35138 old_fs = get_fs();
35139 set_fs(KERNEL_DS);
35140 ret = file->f_op->write(
35141 - file, (const void __user *) data, len, &pos);
35142 + file, (__force const void __user *) data, len, &pos);
35143 set_fs(old_fs);
35144 kunmap(page);
35145 if (ret != len)
35146 diff -urNp linux-2.6.39.4/fs/ceph/dir.c linux-2.6.39.4/fs/ceph/dir.c
35147 --- linux-2.6.39.4/fs/ceph/dir.c 2011-05-19 00:06:34.000000000 -0400
35148 +++ linux-2.6.39.4/fs/ceph/dir.c 2011-08-05 19:44:37.000000000 -0400
35149 @@ -226,7 +226,7 @@ static int ceph_readdir(struct file *fil
35150 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
35151 struct ceph_mds_client *mdsc = fsc->mdsc;
35152 unsigned frag = fpos_frag(filp->f_pos);
35153 - int off = fpos_off(filp->f_pos);
35154 + unsigned int off = fpos_off(filp->f_pos);
35155 int err;
35156 u32 ftype;
35157 struct ceph_mds_reply_info_parsed *rinfo;
35158 @@ -360,7 +360,7 @@ more:
35159 rinfo = &fi->last_readdir->r_reply_info;
35160 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
35161 rinfo->dir_nr, off, fi->offset);
35162 - while (off - fi->offset >= 0 && off - fi->offset < rinfo->dir_nr) {
35163 + while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
35164 u64 pos = ceph_make_fpos(frag, off);
35165 struct ceph_mds_reply_inode *in =
35166 rinfo->dir_in[off - fi->offset].in;
35167 diff -urNp linux-2.6.39.4/fs/cifs/cifs_debug.c linux-2.6.39.4/fs/cifs/cifs_debug.c
35168 --- linux-2.6.39.4/fs/cifs/cifs_debug.c 2011-05-19 00:06:34.000000000 -0400
35169 +++ linux-2.6.39.4/fs/cifs/cifs_debug.c 2011-08-05 19:44:37.000000000 -0400
35170 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
35171 tcon = list_entry(tmp3,
35172 struct cifsTconInfo,
35173 tcon_list);
35174 - atomic_set(&tcon->num_smbs_sent, 0);
35175 - atomic_set(&tcon->num_writes, 0);
35176 - atomic_set(&tcon->num_reads, 0);
35177 - atomic_set(&tcon->num_oplock_brks, 0);
35178 - atomic_set(&tcon->num_opens, 0);
35179 - atomic_set(&tcon->num_posixopens, 0);
35180 - atomic_set(&tcon->num_posixmkdirs, 0);
35181 - atomic_set(&tcon->num_closes, 0);
35182 - atomic_set(&tcon->num_deletes, 0);
35183 - atomic_set(&tcon->num_mkdirs, 0);
35184 - atomic_set(&tcon->num_rmdirs, 0);
35185 - atomic_set(&tcon->num_renames, 0);
35186 - atomic_set(&tcon->num_t2renames, 0);
35187 - atomic_set(&tcon->num_ffirst, 0);
35188 - atomic_set(&tcon->num_fnext, 0);
35189 - atomic_set(&tcon->num_fclose, 0);
35190 - atomic_set(&tcon->num_hardlinks, 0);
35191 - atomic_set(&tcon->num_symlinks, 0);
35192 - atomic_set(&tcon->num_locks, 0);
35193 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
35194 + atomic_set_unchecked(&tcon->num_writes, 0);
35195 + atomic_set_unchecked(&tcon->num_reads, 0);
35196 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
35197 + atomic_set_unchecked(&tcon->num_opens, 0);
35198 + atomic_set_unchecked(&tcon->num_posixopens, 0);
35199 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
35200 + atomic_set_unchecked(&tcon->num_closes, 0);
35201 + atomic_set_unchecked(&tcon->num_deletes, 0);
35202 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
35203 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
35204 + atomic_set_unchecked(&tcon->num_renames, 0);
35205 + atomic_set_unchecked(&tcon->num_t2renames, 0);
35206 + atomic_set_unchecked(&tcon->num_ffirst, 0);
35207 + atomic_set_unchecked(&tcon->num_fnext, 0);
35208 + atomic_set_unchecked(&tcon->num_fclose, 0);
35209 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
35210 + atomic_set_unchecked(&tcon->num_symlinks, 0);
35211 + atomic_set_unchecked(&tcon->num_locks, 0);
35212 }
35213 }
35214 }
35215 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
35216 if (tcon->need_reconnect)
35217 seq_puts(m, "\tDISCONNECTED ");
35218 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
35219 - atomic_read(&tcon->num_smbs_sent),
35220 - atomic_read(&tcon->num_oplock_brks));
35221 + atomic_read_unchecked(&tcon->num_smbs_sent),
35222 + atomic_read_unchecked(&tcon->num_oplock_brks));
35223 seq_printf(m, "\nReads: %d Bytes: %lld",
35224 - atomic_read(&tcon->num_reads),
35225 + atomic_read_unchecked(&tcon->num_reads),
35226 (long long)(tcon->bytes_read));
35227 seq_printf(m, "\nWrites: %d Bytes: %lld",
35228 - atomic_read(&tcon->num_writes),
35229 + atomic_read_unchecked(&tcon->num_writes),
35230 (long long)(tcon->bytes_written));
35231 seq_printf(m, "\nFlushes: %d",
35232 - atomic_read(&tcon->num_flushes));
35233 + atomic_read_unchecked(&tcon->num_flushes));
35234 seq_printf(m, "\nLocks: %d HardLinks: %d "
35235 "Symlinks: %d",
35236 - atomic_read(&tcon->num_locks),
35237 - atomic_read(&tcon->num_hardlinks),
35238 - atomic_read(&tcon->num_symlinks));
35239 + atomic_read_unchecked(&tcon->num_locks),
35240 + atomic_read_unchecked(&tcon->num_hardlinks),
35241 + atomic_read_unchecked(&tcon->num_symlinks));
35242 seq_printf(m, "\nOpens: %d Closes: %d "
35243 "Deletes: %d",
35244 - atomic_read(&tcon->num_opens),
35245 - atomic_read(&tcon->num_closes),
35246 - atomic_read(&tcon->num_deletes));
35247 + atomic_read_unchecked(&tcon->num_opens),
35248 + atomic_read_unchecked(&tcon->num_closes),
35249 + atomic_read_unchecked(&tcon->num_deletes));
35250 seq_printf(m, "\nPosix Opens: %d "
35251 "Posix Mkdirs: %d",
35252 - atomic_read(&tcon->num_posixopens),
35253 - atomic_read(&tcon->num_posixmkdirs));
35254 + atomic_read_unchecked(&tcon->num_posixopens),
35255 + atomic_read_unchecked(&tcon->num_posixmkdirs));
35256 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
35257 - atomic_read(&tcon->num_mkdirs),
35258 - atomic_read(&tcon->num_rmdirs));
35259 + atomic_read_unchecked(&tcon->num_mkdirs),
35260 + atomic_read_unchecked(&tcon->num_rmdirs));
35261 seq_printf(m, "\nRenames: %d T2 Renames %d",
35262 - atomic_read(&tcon->num_renames),
35263 - atomic_read(&tcon->num_t2renames));
35264 + atomic_read_unchecked(&tcon->num_renames),
35265 + atomic_read_unchecked(&tcon->num_t2renames));
35266 seq_printf(m, "\nFindFirst: %d FNext %d "
35267 "FClose %d",
35268 - atomic_read(&tcon->num_ffirst),
35269 - atomic_read(&tcon->num_fnext),
35270 - atomic_read(&tcon->num_fclose));
35271 + atomic_read_unchecked(&tcon->num_ffirst),
35272 + atomic_read_unchecked(&tcon->num_fnext),
35273 + atomic_read_unchecked(&tcon->num_fclose));
35274 }
35275 }
35276 }
35277 diff -urNp linux-2.6.39.4/fs/cifs/cifsglob.h linux-2.6.39.4/fs/cifs/cifsglob.h
35278 --- linux-2.6.39.4/fs/cifs/cifsglob.h 2011-05-19 00:06:34.000000000 -0400
35279 +++ linux-2.6.39.4/fs/cifs/cifsglob.h 2011-08-05 19:44:37.000000000 -0400
35280 @@ -305,28 +305,28 @@ struct cifsTconInfo {
35281 __u16 Flags; /* optional support bits */
35282 enum statusEnum tidStatus;
35283 #ifdef CONFIG_CIFS_STATS
35284 - atomic_t num_smbs_sent;
35285 - atomic_t num_writes;
35286 - atomic_t num_reads;
35287 - atomic_t num_flushes;
35288 - atomic_t num_oplock_brks;
35289 - atomic_t num_opens;
35290 - atomic_t num_closes;
35291 - atomic_t num_deletes;
35292 - atomic_t num_mkdirs;
35293 - atomic_t num_posixopens;
35294 - atomic_t num_posixmkdirs;
35295 - atomic_t num_rmdirs;
35296 - atomic_t num_renames;
35297 - atomic_t num_t2renames;
35298 - atomic_t num_ffirst;
35299 - atomic_t num_fnext;
35300 - atomic_t num_fclose;
35301 - atomic_t num_hardlinks;
35302 - atomic_t num_symlinks;
35303 - atomic_t num_locks;
35304 - atomic_t num_acl_get;
35305 - atomic_t num_acl_set;
35306 + atomic_unchecked_t num_smbs_sent;
35307 + atomic_unchecked_t num_writes;
35308 + atomic_unchecked_t num_reads;
35309 + atomic_unchecked_t num_flushes;
35310 + atomic_unchecked_t num_oplock_brks;
35311 + atomic_unchecked_t num_opens;
35312 + atomic_unchecked_t num_closes;
35313 + atomic_unchecked_t num_deletes;
35314 + atomic_unchecked_t num_mkdirs;
35315 + atomic_unchecked_t num_posixopens;
35316 + atomic_unchecked_t num_posixmkdirs;
35317 + atomic_unchecked_t num_rmdirs;
35318 + atomic_unchecked_t num_renames;
35319 + atomic_unchecked_t num_t2renames;
35320 + atomic_unchecked_t num_ffirst;
35321 + atomic_unchecked_t num_fnext;
35322 + atomic_unchecked_t num_fclose;
35323 + atomic_unchecked_t num_hardlinks;
35324 + atomic_unchecked_t num_symlinks;
35325 + atomic_unchecked_t num_locks;
35326 + atomic_unchecked_t num_acl_get;
35327 + atomic_unchecked_t num_acl_set;
35328 #ifdef CONFIG_CIFS_STATS2
35329 unsigned long long time_writes;
35330 unsigned long long time_reads;
35331 @@ -509,7 +509,7 @@ static inline char CIFS_DIR_SEP(const st
35332 }
35333
35334 #ifdef CONFIG_CIFS_STATS
35335 -#define cifs_stats_inc atomic_inc
35336 +#define cifs_stats_inc atomic_inc_unchecked
35337
35338 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
35339 unsigned int bytes)
35340 diff -urNp linux-2.6.39.4/fs/cifs/link.c linux-2.6.39.4/fs/cifs/link.c
35341 --- linux-2.6.39.4/fs/cifs/link.c 2011-05-19 00:06:34.000000000 -0400
35342 +++ linux-2.6.39.4/fs/cifs/link.c 2011-08-05 19:44:37.000000000 -0400
35343 @@ -577,7 +577,7 @@ symlink_exit:
35344
35345 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
35346 {
35347 - char *p = nd_get_link(nd);
35348 + const char *p = nd_get_link(nd);
35349 if (!IS_ERR(p))
35350 kfree(p);
35351 }
35352 diff -urNp linux-2.6.39.4/fs/coda/cache.c linux-2.6.39.4/fs/coda/cache.c
35353 --- linux-2.6.39.4/fs/coda/cache.c 2011-05-19 00:06:34.000000000 -0400
35354 +++ linux-2.6.39.4/fs/coda/cache.c 2011-08-05 19:44:37.000000000 -0400
35355 @@ -24,7 +24,7 @@
35356 #include "coda_linux.h"
35357 #include "coda_cache.h"
35358
35359 -static atomic_t permission_epoch = ATOMIC_INIT(0);
35360 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
35361
35362 /* replace or extend an acl cache hit */
35363 void coda_cache_enter(struct inode *inode, int mask)
35364 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
35365 struct coda_inode_info *cii = ITOC(inode);
35366
35367 spin_lock(&cii->c_lock);
35368 - cii->c_cached_epoch = atomic_read(&permission_epoch);
35369 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
35370 if (cii->c_uid != current_fsuid()) {
35371 cii->c_uid = current_fsuid();
35372 cii->c_cached_perm = mask;
35373 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
35374 {
35375 struct coda_inode_info *cii = ITOC(inode);
35376 spin_lock(&cii->c_lock);
35377 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
35378 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
35379 spin_unlock(&cii->c_lock);
35380 }
35381
35382 /* remove all acl caches */
35383 void coda_cache_clear_all(struct super_block *sb)
35384 {
35385 - atomic_inc(&permission_epoch);
35386 + atomic_inc_unchecked(&permission_epoch);
35387 }
35388
35389
35390 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
35391 spin_lock(&cii->c_lock);
35392 hit = (mask & cii->c_cached_perm) == mask &&
35393 cii->c_uid == current_fsuid() &&
35394 - cii->c_cached_epoch == atomic_read(&permission_epoch);
35395 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
35396 spin_unlock(&cii->c_lock);
35397
35398 return hit;
35399 diff -urNp linux-2.6.39.4/fs/compat_binfmt_elf.c linux-2.6.39.4/fs/compat_binfmt_elf.c
35400 --- linux-2.6.39.4/fs/compat_binfmt_elf.c 2011-05-19 00:06:34.000000000 -0400
35401 +++ linux-2.6.39.4/fs/compat_binfmt_elf.c 2011-08-05 19:44:37.000000000 -0400
35402 @@ -30,11 +30,13 @@
35403 #undef elf_phdr
35404 #undef elf_shdr
35405 #undef elf_note
35406 +#undef elf_dyn
35407 #undef elf_addr_t
35408 #define elfhdr elf32_hdr
35409 #define elf_phdr elf32_phdr
35410 #define elf_shdr elf32_shdr
35411 #define elf_note elf32_note
35412 +#define elf_dyn Elf32_Dyn
35413 #define elf_addr_t Elf32_Addr
35414
35415 /*
35416 diff -urNp linux-2.6.39.4/fs/compat.c linux-2.6.39.4/fs/compat.c
35417 --- linux-2.6.39.4/fs/compat.c 2011-05-19 00:06:34.000000000 -0400
35418 +++ linux-2.6.39.4/fs/compat.c 2011-08-05 19:44:37.000000000 -0400
35419 @@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
35420 goto out;
35421
35422 ret = -EINVAL;
35423 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
35424 + if (nr_segs > UIO_MAXIOV)
35425 goto out;
35426 if (nr_segs > fast_segs) {
35427 ret = -ENOMEM;
35428 @@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
35429
35430 struct compat_readdir_callback {
35431 struct compat_old_linux_dirent __user *dirent;
35432 + struct file * file;
35433 int result;
35434 };
35435
35436 @@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
35437 buf->result = -EOVERFLOW;
35438 return -EOVERFLOW;
35439 }
35440 +
35441 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35442 + return 0;
35443 +
35444 buf->result++;
35445 dirent = buf->dirent;
35446 if (!access_ok(VERIFY_WRITE, dirent,
35447 @@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
35448
35449 buf.result = 0;
35450 buf.dirent = dirent;
35451 + buf.file = file;
35452
35453 error = vfs_readdir(file, compat_fillonedir, &buf);
35454 if (buf.result)
35455 @@ -917,6 +923,7 @@ struct compat_linux_dirent {
35456 struct compat_getdents_callback {
35457 struct compat_linux_dirent __user *current_dir;
35458 struct compat_linux_dirent __user *previous;
35459 + struct file * file;
35460 int count;
35461 int error;
35462 };
35463 @@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
35464 buf->error = -EOVERFLOW;
35465 return -EOVERFLOW;
35466 }
35467 +
35468 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35469 + return 0;
35470 +
35471 dirent = buf->previous;
35472 if (dirent) {
35473 if (__put_user(offset, &dirent->d_off))
35474 @@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
35475 buf.previous = NULL;
35476 buf.count = count;
35477 buf.error = 0;
35478 + buf.file = file;
35479
35480 error = vfs_readdir(file, compat_filldir, &buf);
35481 if (error >= 0)
35482 @@ -1006,6 +1018,7 @@ out:
35483 struct compat_getdents_callback64 {
35484 struct linux_dirent64 __user *current_dir;
35485 struct linux_dirent64 __user *previous;
35486 + struct file * file;
35487 int count;
35488 int error;
35489 };
35490 @@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
35491 buf->error = -EINVAL; /* only used if we fail.. */
35492 if (reclen > buf->count)
35493 return -EINVAL;
35494 +
35495 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35496 + return 0;
35497 +
35498 dirent = buf->previous;
35499
35500 if (dirent) {
35501 @@ -1073,6 +1090,7 @@ asmlinkage long compat_sys_getdents64(un
35502 buf.previous = NULL;
35503 buf.count = count;
35504 buf.error = 0;
35505 + buf.file = file;
35506
35507 error = vfs_readdir(file, compat_filldir64, &buf);
35508 if (error >= 0)
35509 @@ -1436,6 +1454,11 @@ int compat_do_execve(char * filename,
35510 compat_uptr_t __user *envp,
35511 struct pt_regs * regs)
35512 {
35513 +#ifdef CONFIG_GRKERNSEC
35514 + struct file *old_exec_file;
35515 + struct acl_subject_label *old_acl;
35516 + struct rlimit old_rlim[RLIM_NLIMITS];
35517 +#endif
35518 struct linux_binprm *bprm;
35519 struct file *file;
35520 struct files_struct *displaced;
35521 @@ -1472,6 +1495,19 @@ int compat_do_execve(char * filename,
35522 bprm->filename = filename;
35523 bprm->interp = filename;
35524
35525 + if (gr_process_user_ban()) {
35526 + retval = -EPERM;
35527 + goto out_file;
35528 + }
35529 +
35530 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
35531 + retval = -EAGAIN;
35532 + if (gr_handle_nproc())
35533 + goto out_file;
35534 + retval = -EACCES;
35535 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
35536 + goto out_file;
35537 +
35538 retval = bprm_mm_init(bprm);
35539 if (retval)
35540 goto out_file;
35541 @@ -1501,9 +1537,40 @@ int compat_do_execve(char * filename,
35542 if (retval < 0)
35543 goto out;
35544
35545 + if (!gr_tpe_allow(file)) {
35546 + retval = -EACCES;
35547 + goto out;
35548 + }
35549 +
35550 + if (gr_check_crash_exec(file)) {
35551 + retval = -EACCES;
35552 + goto out;
35553 + }
35554 +
35555 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
35556 +
35557 + gr_handle_exec_args_compat(bprm, argv);
35558 +
35559 +#ifdef CONFIG_GRKERNSEC
35560 + old_acl = current->acl;
35561 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
35562 + old_exec_file = current->exec_file;
35563 + get_file(file);
35564 + current->exec_file = file;
35565 +#endif
35566 +
35567 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
35568 + bprm->unsafe & LSM_UNSAFE_SHARE);
35569 + if (retval < 0)
35570 + goto out_fail;
35571 +
35572 retval = search_binary_handler(bprm, regs);
35573 if (retval < 0)
35574 - goto out;
35575 + goto out_fail;
35576 +#ifdef CONFIG_GRKERNSEC
35577 + if (old_exec_file)
35578 + fput(old_exec_file);
35579 +#endif
35580
35581 /* execve succeeded */
35582 current->fs->in_exec = 0;
35583 @@ -1514,6 +1581,14 @@ int compat_do_execve(char * filename,
35584 put_files_struct(displaced);
35585 return retval;
35586
35587 +out_fail:
35588 +#ifdef CONFIG_GRKERNSEC
35589 + current->acl = old_acl;
35590 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
35591 + fput(current->exec_file);
35592 + current->exec_file = old_exec_file;
35593 +#endif
35594 +
35595 out:
35596 if (bprm->mm) {
35597 acct_arg_size(bprm, 0);
35598 @@ -1681,6 +1756,8 @@ int compat_core_sys_select(int n, compat
35599 struct fdtable *fdt;
35600 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
35601
35602 + pax_track_stack();
35603 +
35604 if (n < 0)
35605 goto out_nofds;
35606
35607 diff -urNp linux-2.6.39.4/fs/compat_ioctl.c linux-2.6.39.4/fs/compat_ioctl.c
35608 --- linux-2.6.39.4/fs/compat_ioctl.c 2011-05-19 00:06:34.000000000 -0400
35609 +++ linux-2.6.39.4/fs/compat_ioctl.c 2011-08-05 19:44:37.000000000 -0400
35610 @@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsi
35611
35612 err = get_user(palp, &up->palette);
35613 err |= get_user(length, &up->length);
35614 + if (err)
35615 + return -EFAULT;
35616
35617 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
35618 err = put_user(compat_ptr(palp), &up_native->palette);
35619 @@ -1638,8 +1640,8 @@ asmlinkage long compat_sys_ioctl(unsigne
35620 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
35621 {
35622 unsigned int a, b;
35623 - a = *(unsigned int *)p;
35624 - b = *(unsigned int *)q;
35625 + a = *(const unsigned int *)p;
35626 + b = *(const unsigned int *)q;
35627 if (a > b)
35628 return 1;
35629 if (a < b)
35630 diff -urNp linux-2.6.39.4/fs/configfs/dir.c linux-2.6.39.4/fs/configfs/dir.c
35631 --- linux-2.6.39.4/fs/configfs/dir.c 2011-05-19 00:06:34.000000000 -0400
35632 +++ linux-2.6.39.4/fs/configfs/dir.c 2011-08-05 19:44:37.000000000 -0400
35633 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
35634 }
35635 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
35636 struct configfs_dirent *next;
35637 - const char * name;
35638 + const unsigned char * name;
35639 + char d_name[sizeof(next->s_dentry->d_iname)];
35640 int len;
35641 struct inode *inode = NULL;
35642
35643 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
35644 continue;
35645
35646 name = configfs_get_name(next);
35647 - len = strlen(name);
35648 + if (next->s_dentry && name == next->s_dentry->d_iname) {
35649 + len = next->s_dentry->d_name.len;
35650 + memcpy(d_name, name, len);
35651 + name = d_name;
35652 + } else
35653 + len = strlen(name);
35654
35655 /*
35656 * We'll have a dentry and an inode for
35657 diff -urNp linux-2.6.39.4/fs/dcache.c linux-2.6.39.4/fs/dcache.c
35658 --- linux-2.6.39.4/fs/dcache.c 2011-05-19 00:06:34.000000000 -0400
35659 +++ linux-2.6.39.4/fs/dcache.c 2011-08-05 19:44:37.000000000 -0400
35660 @@ -3069,7 +3069,7 @@ void __init vfs_caches_init(unsigned lon
35661 mempages -= reserve;
35662
35663 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
35664 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
35665 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
35666
35667 dcache_init();
35668 inode_init();
35669 diff -urNp linux-2.6.39.4/fs/ecryptfs/inode.c linux-2.6.39.4/fs/ecryptfs/inode.c
35670 --- linux-2.6.39.4/fs/ecryptfs/inode.c 2011-06-03 00:04:14.000000000 -0400
35671 +++ linux-2.6.39.4/fs/ecryptfs/inode.c 2011-08-05 19:44:37.000000000 -0400
35672 @@ -623,7 +623,7 @@ static int ecryptfs_readlink_lower(struc
35673 old_fs = get_fs();
35674 set_fs(get_ds());
35675 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
35676 - (char __user *)lower_buf,
35677 + (__force char __user *)lower_buf,
35678 lower_bufsiz);
35679 set_fs(old_fs);
35680 if (rc < 0)
35681 @@ -669,7 +669,7 @@ static void *ecryptfs_follow_link(struct
35682 }
35683 old_fs = get_fs();
35684 set_fs(get_ds());
35685 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
35686 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
35687 set_fs(old_fs);
35688 if (rc < 0) {
35689 kfree(buf);
35690 @@ -684,7 +684,7 @@ out:
35691 static void
35692 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
35693 {
35694 - char *buf = nd_get_link(nd);
35695 + const char *buf = nd_get_link(nd);
35696 if (!IS_ERR(buf)) {
35697 /* Free the char* */
35698 kfree(buf);
35699 diff -urNp linux-2.6.39.4/fs/ecryptfs/miscdev.c linux-2.6.39.4/fs/ecryptfs/miscdev.c
35700 --- linux-2.6.39.4/fs/ecryptfs/miscdev.c 2011-05-19 00:06:34.000000000 -0400
35701 +++ linux-2.6.39.4/fs/ecryptfs/miscdev.c 2011-08-05 19:44:37.000000000 -0400
35702 @@ -328,7 +328,7 @@ check_list:
35703 goto out_unlock_msg_ctx;
35704 i = 5;
35705 if (msg_ctx->msg) {
35706 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
35707 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
35708 goto out_unlock_msg_ctx;
35709 i += packet_length_size;
35710 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
35711 diff -urNp linux-2.6.39.4/fs/exec.c linux-2.6.39.4/fs/exec.c
35712 --- linux-2.6.39.4/fs/exec.c 2011-06-25 12:55:23.000000000 -0400
35713 +++ linux-2.6.39.4/fs/exec.c 2011-08-05 19:44:37.000000000 -0400
35714 @@ -55,12 +55,24 @@
35715 #include <linux/fs_struct.h>
35716 #include <linux/pipe_fs_i.h>
35717 #include <linux/oom.h>
35718 +#include <linux/random.h>
35719 +#include <linux/seq_file.h>
35720 +
35721 +#ifdef CONFIG_PAX_REFCOUNT
35722 +#include <linux/kallsyms.h>
35723 +#include <linux/kdebug.h>
35724 +#endif
35725
35726 #include <asm/uaccess.h>
35727 #include <asm/mmu_context.h>
35728 #include <asm/tlb.h>
35729 #include "internal.h"
35730
35731 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
35732 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
35733 +EXPORT_SYMBOL(pax_set_initial_flags_func);
35734 +#endif
35735 +
35736 int core_uses_pid;
35737 char core_pattern[CORENAME_MAX_SIZE] = "core";
35738 unsigned int core_pipe_limit;
35739 @@ -70,7 +82,7 @@ struct core_name {
35740 char *corename;
35741 int used, size;
35742 };
35743 -static atomic_t call_count = ATOMIC_INIT(1);
35744 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
35745
35746 /* The maximal length of core_pattern is also specified in sysctl.c */
35747
35748 @@ -116,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __use
35749 char *tmp = getname(library);
35750 int error = PTR_ERR(tmp);
35751 static const struct open_flags uselib_flags = {
35752 - .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
35753 + .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
35754 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
35755 .intent = LOOKUP_OPEN
35756 };
35757 @@ -190,18 +202,10 @@ struct page *get_arg_page(struct linux_b
35758 int write)
35759 {
35760 struct page *page;
35761 - int ret;
35762
35763 -#ifdef CONFIG_STACK_GROWSUP
35764 - if (write) {
35765 - ret = expand_stack_downwards(bprm->vma, pos);
35766 - if (ret < 0)
35767 - return NULL;
35768 - }
35769 -#endif
35770 - ret = get_user_pages(current, bprm->mm, pos,
35771 - 1, write, 1, &page, NULL);
35772 - if (ret <= 0)
35773 + if (0 > expand_stack_downwards(bprm->vma, pos))
35774 + return NULL;
35775 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
35776 return NULL;
35777
35778 if (write) {
35779 @@ -276,6 +280,11 @@ static int __bprm_mm_init(struct linux_b
35780 vma->vm_end = STACK_TOP_MAX;
35781 vma->vm_start = vma->vm_end - PAGE_SIZE;
35782 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
35783 +
35784 +#ifdef CONFIG_PAX_SEGMEXEC
35785 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
35786 +#endif
35787 +
35788 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
35789 INIT_LIST_HEAD(&vma->anon_vma_chain);
35790
35791 @@ -290,6 +299,12 @@ static int __bprm_mm_init(struct linux_b
35792 mm->stack_vm = mm->total_vm = 1;
35793 up_write(&mm->mmap_sem);
35794 bprm->p = vma->vm_end - sizeof(void *);
35795 +
35796 +#ifdef CONFIG_PAX_RANDUSTACK
35797 + if (randomize_va_space)
35798 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
35799 +#endif
35800 +
35801 return 0;
35802 err:
35803 up_write(&mm->mmap_sem);
35804 @@ -525,7 +540,7 @@ int copy_strings_kernel(int argc, const
35805 int r;
35806 mm_segment_t oldfs = get_fs();
35807 set_fs(KERNEL_DS);
35808 - r = copy_strings(argc, (const char __user *const __user *)argv, bprm);
35809 + r = copy_strings(argc, (__force const char __user *const __user *)argv, bprm);
35810 set_fs(oldfs);
35811 return r;
35812 }
35813 @@ -555,7 +570,8 @@ static int shift_arg_pages(struct vm_are
35814 unsigned long new_end = old_end - shift;
35815 struct mmu_gather *tlb;
35816
35817 - BUG_ON(new_start > new_end);
35818 + if (new_start >= new_end || new_start < mmap_min_addr)
35819 + return -ENOMEM;
35820
35821 /*
35822 * ensure there are no vmas between where we want to go
35823 @@ -564,6 +580,10 @@ static int shift_arg_pages(struct vm_are
35824 if (vma != find_vma(mm, new_start))
35825 return -EFAULT;
35826
35827 +#ifdef CONFIG_PAX_SEGMEXEC
35828 + BUG_ON(pax_find_mirror_vma(vma));
35829 +#endif
35830 +
35831 /*
35832 * cover the whole range: [new_start, old_end)
35833 */
35834 @@ -644,10 +664,6 @@ int setup_arg_pages(struct linux_binprm
35835 stack_top = arch_align_stack(stack_top);
35836 stack_top = PAGE_ALIGN(stack_top);
35837
35838 - if (unlikely(stack_top < mmap_min_addr) ||
35839 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
35840 - return -ENOMEM;
35841 -
35842 stack_shift = vma->vm_end - stack_top;
35843
35844 bprm->p -= stack_shift;
35845 @@ -659,8 +675,28 @@ int setup_arg_pages(struct linux_binprm
35846 bprm->exec -= stack_shift;
35847
35848 down_write(&mm->mmap_sem);
35849 +
35850 + /* Move stack pages down in memory. */
35851 + if (stack_shift) {
35852 + ret = shift_arg_pages(vma, stack_shift);
35853 + if (ret)
35854 + goto out_unlock;
35855 + }
35856 +
35857 vm_flags = VM_STACK_FLAGS;
35858
35859 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
35860 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
35861 + vm_flags &= ~VM_EXEC;
35862 +
35863 +#ifdef CONFIG_PAX_MPROTECT
35864 + if (mm->pax_flags & MF_PAX_MPROTECT)
35865 + vm_flags &= ~VM_MAYEXEC;
35866 +#endif
35867 +
35868 + }
35869 +#endif
35870 +
35871 /*
35872 * Adjust stack execute permissions; explicitly enable for
35873 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
35874 @@ -679,13 +715,6 @@ int setup_arg_pages(struct linux_binprm
35875 goto out_unlock;
35876 BUG_ON(prev != vma);
35877
35878 - /* Move stack pages down in memory. */
35879 - if (stack_shift) {
35880 - ret = shift_arg_pages(vma, stack_shift);
35881 - if (ret)
35882 - goto out_unlock;
35883 - }
35884 -
35885 /* mprotect_fixup is overkill to remove the temporary stack flags */
35886 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
35887
35888 @@ -725,7 +754,7 @@ struct file *open_exec(const char *name)
35889 struct file *file;
35890 int err;
35891 static const struct open_flags open_exec_flags = {
35892 - .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
35893 + .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
35894 .acc_mode = MAY_EXEC | MAY_OPEN,
35895 .intent = LOOKUP_OPEN
35896 };
35897 @@ -766,7 +795,7 @@ int kernel_read(struct file *file, loff_
35898 old_fs = get_fs();
35899 set_fs(get_ds());
35900 /* The cast to a user pointer is valid due to the set_fs() */
35901 - result = vfs_read(file, (void __user *)addr, count, &pos);
35902 + result = vfs_read(file, (__force void __user *)addr, count, &pos);
35903 set_fs(old_fs);
35904 return result;
35905 }
35906 @@ -1189,7 +1218,7 @@ int check_unsafe_exec(struct linux_binpr
35907 }
35908 rcu_read_unlock();
35909
35910 - if (p->fs->users > n_fs) {
35911 + if (atomic_read(&p->fs->users) > n_fs) {
35912 bprm->unsafe |= LSM_UNSAFE_SHARE;
35913 } else {
35914 res = -EAGAIN;
35915 @@ -1381,6 +1410,11 @@ int do_execve(const char * filename,
35916 const char __user *const __user *envp,
35917 struct pt_regs * regs)
35918 {
35919 +#ifdef CONFIG_GRKERNSEC
35920 + struct file *old_exec_file;
35921 + struct acl_subject_label *old_acl;
35922 + struct rlimit old_rlim[RLIM_NLIMITS];
35923 +#endif
35924 struct linux_binprm *bprm;
35925 struct file *file;
35926 struct files_struct *displaced;
35927 @@ -1417,6 +1451,23 @@ int do_execve(const char * filename,
35928 bprm->filename = filename;
35929 bprm->interp = filename;
35930
35931 + if (gr_process_user_ban()) {
35932 + retval = -EPERM;
35933 + goto out_file;
35934 + }
35935 +
35936 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
35937 +
35938 + if (gr_handle_nproc()) {
35939 + retval = -EAGAIN;
35940 + goto out_file;
35941 + }
35942 +
35943 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
35944 + retval = -EACCES;
35945 + goto out_file;
35946 + }
35947 +
35948 retval = bprm_mm_init(bprm);
35949 if (retval)
35950 goto out_file;
35951 @@ -1446,9 +1497,40 @@ int do_execve(const char * filename,
35952 if (retval < 0)
35953 goto out;
35954
35955 + if (!gr_tpe_allow(file)) {
35956 + retval = -EACCES;
35957 + goto out;
35958 + }
35959 +
35960 + if (gr_check_crash_exec(file)) {
35961 + retval = -EACCES;
35962 + goto out;
35963 + }
35964 +
35965 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
35966 +
35967 + gr_handle_exec_args(bprm, argv);
35968 +
35969 +#ifdef CONFIG_GRKERNSEC
35970 + old_acl = current->acl;
35971 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
35972 + old_exec_file = current->exec_file;
35973 + get_file(file);
35974 + current->exec_file = file;
35975 +#endif
35976 +
35977 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
35978 + bprm->unsafe & LSM_UNSAFE_SHARE);
35979 + if (retval < 0)
35980 + goto out_fail;
35981 +
35982 retval = search_binary_handler(bprm,regs);
35983 if (retval < 0)
35984 - goto out;
35985 + goto out_fail;
35986 +#ifdef CONFIG_GRKERNSEC
35987 + if (old_exec_file)
35988 + fput(old_exec_file);
35989 +#endif
35990
35991 /* execve succeeded */
35992 current->fs->in_exec = 0;
35993 @@ -1459,6 +1541,14 @@ int do_execve(const char * filename,
35994 put_files_struct(displaced);
35995 return retval;
35996
35997 +out_fail:
35998 +#ifdef CONFIG_GRKERNSEC
35999 + current->acl = old_acl;
36000 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
36001 + fput(current->exec_file);
36002 + current->exec_file = old_exec_file;
36003 +#endif
36004 +
36005 out:
36006 if (bprm->mm) {
36007 acct_arg_size(bprm, 0);
36008 @@ -1504,7 +1594,7 @@ static int expand_corename(struct core_n
36009 {
36010 char *old_corename = cn->corename;
36011
36012 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
36013 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
36014 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
36015
36016 if (!cn->corename) {
36017 @@ -1557,7 +1647,7 @@ static int format_corename(struct core_n
36018 int pid_in_pattern = 0;
36019 int err = 0;
36020
36021 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
36022 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
36023 cn->corename = kmalloc(cn->size, GFP_KERNEL);
36024 cn->used = 0;
36025
36026 @@ -1645,6 +1735,219 @@ out:
36027 return ispipe;
36028 }
36029
36030 +int pax_check_flags(unsigned long *flags)
36031 +{
36032 + int retval = 0;
36033 +
36034 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
36035 + if (*flags & MF_PAX_SEGMEXEC)
36036 + {
36037 + *flags &= ~MF_PAX_SEGMEXEC;
36038 + retval = -EINVAL;
36039 + }
36040 +#endif
36041 +
36042 + if ((*flags & MF_PAX_PAGEEXEC)
36043 +
36044 +#ifdef CONFIG_PAX_PAGEEXEC
36045 + && (*flags & MF_PAX_SEGMEXEC)
36046 +#endif
36047 +
36048 + )
36049 + {
36050 + *flags &= ~MF_PAX_PAGEEXEC;
36051 + retval = -EINVAL;
36052 + }
36053 +
36054 + if ((*flags & MF_PAX_MPROTECT)
36055 +
36056 +#ifdef CONFIG_PAX_MPROTECT
36057 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
36058 +#endif
36059 +
36060 + )
36061 + {
36062 + *flags &= ~MF_PAX_MPROTECT;
36063 + retval = -EINVAL;
36064 + }
36065 +
36066 + if ((*flags & MF_PAX_EMUTRAMP)
36067 +
36068 +#ifdef CONFIG_PAX_EMUTRAMP
36069 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
36070 +#endif
36071 +
36072 + )
36073 + {
36074 + *flags &= ~MF_PAX_EMUTRAMP;
36075 + retval = -EINVAL;
36076 + }
36077 +
36078 + return retval;
36079 +}
36080 +
36081 +EXPORT_SYMBOL(pax_check_flags);
36082 +
36083 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
36084 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
36085 +{
36086 + struct task_struct *tsk = current;
36087 + struct mm_struct *mm = current->mm;
36088 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
36089 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
36090 + char *path_exec = NULL;
36091 + char *path_fault = NULL;
36092 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
36093 +
36094 + if (buffer_exec && buffer_fault) {
36095 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
36096 +
36097 + down_read(&mm->mmap_sem);
36098 + vma = mm->mmap;
36099 + while (vma && (!vma_exec || !vma_fault)) {
36100 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
36101 + vma_exec = vma;
36102 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
36103 + vma_fault = vma;
36104 + vma = vma->vm_next;
36105 + }
36106 + if (vma_exec) {
36107 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
36108 + if (IS_ERR(path_exec))
36109 + path_exec = "<path too long>";
36110 + else {
36111 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
36112 + if (path_exec) {
36113 + *path_exec = 0;
36114 + path_exec = buffer_exec;
36115 + } else
36116 + path_exec = "<path too long>";
36117 + }
36118 + }
36119 + if (vma_fault) {
36120 + start = vma_fault->vm_start;
36121 + end = vma_fault->vm_end;
36122 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
36123 + if (vma_fault->vm_file) {
36124 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
36125 + if (IS_ERR(path_fault))
36126 + path_fault = "<path too long>";
36127 + else {
36128 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
36129 + if (path_fault) {
36130 + *path_fault = 0;
36131 + path_fault = buffer_fault;
36132 + } else
36133 + path_fault = "<path too long>";
36134 + }
36135 + } else
36136 + path_fault = "<anonymous mapping>";
36137 + }
36138 + up_read(&mm->mmap_sem);
36139 + }
36140 + if (tsk->signal->curr_ip)
36141 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
36142 + else
36143 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
36144 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
36145 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
36146 + task_uid(tsk), task_euid(tsk), pc, sp);
36147 + free_page((unsigned long)buffer_exec);
36148 + free_page((unsigned long)buffer_fault);
36149 + pax_report_insns(pc, sp);
36150 + do_coredump(SIGKILL, SIGKILL, regs);
36151 +}
36152 +#endif
36153 +
36154 +#ifdef CONFIG_PAX_REFCOUNT
36155 +void pax_report_refcount_overflow(struct pt_regs *regs)
36156 +{
36157 + if (current->signal->curr_ip)
36158 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
36159 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
36160 + else
36161 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
36162 + current->comm, task_pid_nr(current), current_uid(), current_euid());
36163 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
36164 + show_regs(regs);
36165 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
36166 +}
36167 +#endif
36168 +
36169 +#ifdef CONFIG_PAX_USERCOPY
36170 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
36171 +int object_is_on_stack(const void *obj, unsigned long len)
36172 +{
36173 + const void * const stack = task_stack_page(current);
36174 + const void * const stackend = stack + THREAD_SIZE;
36175 +
36176 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
36177 + const void *frame = NULL;
36178 + const void *oldframe;
36179 +#endif
36180 +
36181 + if (obj + len < obj)
36182 + return -1;
36183 +
36184 + if (obj + len <= stack || stackend <= obj)
36185 + return 0;
36186 +
36187 + if (obj < stack || stackend < obj + len)
36188 + return -1;
36189 +
36190 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
36191 + oldframe = __builtin_frame_address(1);
36192 + if (oldframe)
36193 + frame = __builtin_frame_address(2);
36194 + /*
36195 + low ----------------------------------------------> high
36196 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
36197 + ^----------------^
36198 + allow copies only within here
36199 + */
36200 + while (stack <= frame && frame < stackend) {
36201 + /* if obj + len extends past the last frame, this
36202 + check won't pass and the next frame will be 0,
36203 + causing us to bail out and correctly report
36204 + the copy as invalid
36205 + */
36206 + if (obj + len <= frame)
36207 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
36208 + oldframe = frame;
36209 + frame = *(const void * const *)frame;
36210 + }
36211 + return -1;
36212 +#else
36213 + return 1;
36214 +#endif
36215 +}
36216 +
36217 +
36218 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
36219 +{
36220 + if (current->signal->curr_ip)
36221 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
36222 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
36223 + else
36224 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
36225 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
36226 + dump_stack();
36227 + gr_handle_kernel_exploit();
36228 + do_group_exit(SIGKILL);
36229 +}
36230 +#endif
36231 +
36232 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
36233 +void pax_track_stack(void)
36234 +{
36235 + unsigned long sp = (unsigned long)&sp;
36236 + if (sp < current_thread_info()->lowest_stack &&
36237 + sp > (unsigned long)task_stack_page(current))
36238 + current_thread_info()->lowest_stack = sp;
36239 +}
36240 +EXPORT_SYMBOL(pax_track_stack);
36241 +#endif
36242 +
36243 static int zap_process(struct task_struct *start, int exit_code)
36244 {
36245 struct task_struct *t;
36246 @@ -1855,17 +2158,17 @@ static void wait_for_dump_helpers(struct
36247 pipe = file->f_path.dentry->d_inode->i_pipe;
36248
36249 pipe_lock(pipe);
36250 - pipe->readers++;
36251 - pipe->writers--;
36252 + atomic_inc(&pipe->readers);
36253 + atomic_dec(&pipe->writers);
36254
36255 - while ((pipe->readers > 1) && (!signal_pending(current))) {
36256 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
36257 wake_up_interruptible_sync(&pipe->wait);
36258 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
36259 pipe_wait(pipe);
36260 }
36261
36262 - pipe->readers--;
36263 - pipe->writers++;
36264 + atomic_dec(&pipe->readers);
36265 + atomic_inc(&pipe->writers);
36266 pipe_unlock(pipe);
36267
36268 }
36269 @@ -1926,7 +2229,7 @@ void do_coredump(long signr, int exit_co
36270 int retval = 0;
36271 int flag = 0;
36272 int ispipe;
36273 - static atomic_t core_dump_count = ATOMIC_INIT(0);
36274 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
36275 struct coredump_params cprm = {
36276 .signr = signr,
36277 .regs = regs,
36278 @@ -1941,6 +2244,9 @@ void do_coredump(long signr, int exit_co
36279
36280 audit_core_dumps(signr);
36281
36282 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
36283 + gr_handle_brute_attach(current, cprm.mm_flags);
36284 +
36285 binfmt = mm->binfmt;
36286 if (!binfmt || !binfmt->core_dump)
36287 goto fail;
36288 @@ -1981,6 +2287,8 @@ void do_coredump(long signr, int exit_co
36289 goto fail_corename;
36290 }
36291
36292 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
36293 +
36294 if (ispipe) {
36295 int dump_count;
36296 char **helper_argv;
36297 @@ -2008,7 +2316,7 @@ void do_coredump(long signr, int exit_co
36298 }
36299 cprm.limit = RLIM_INFINITY;
36300
36301 - dump_count = atomic_inc_return(&core_dump_count);
36302 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
36303 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
36304 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
36305 task_tgid_vnr(current), current->comm);
36306 @@ -2078,7 +2386,7 @@ close_fail:
36307 filp_close(cprm.file, NULL);
36308 fail_dropcount:
36309 if (ispipe)
36310 - atomic_dec(&core_dump_count);
36311 + atomic_dec_unchecked(&core_dump_count);
36312 fail_unlock:
36313 kfree(cn.corename);
36314 fail_corename:
36315 diff -urNp linux-2.6.39.4/fs/ext2/balloc.c linux-2.6.39.4/fs/ext2/balloc.c
36316 --- linux-2.6.39.4/fs/ext2/balloc.c 2011-05-19 00:06:34.000000000 -0400
36317 +++ linux-2.6.39.4/fs/ext2/balloc.c 2011-08-05 19:44:37.000000000 -0400
36318 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
36319
36320 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
36321 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
36322 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
36323 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
36324 sbi->s_resuid != current_fsuid() &&
36325 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
36326 return 0;
36327 diff -urNp linux-2.6.39.4/fs/ext3/balloc.c linux-2.6.39.4/fs/ext3/balloc.c
36328 --- linux-2.6.39.4/fs/ext3/balloc.c 2011-05-19 00:06:34.000000000 -0400
36329 +++ linux-2.6.39.4/fs/ext3/balloc.c 2011-08-05 19:44:37.000000000 -0400
36330 @@ -1441,7 +1441,7 @@ static int ext3_has_free_blocks(struct e
36331
36332 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
36333 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
36334 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
36335 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
36336 sbi->s_resuid != current_fsuid() &&
36337 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
36338 return 0;
36339 diff -urNp linux-2.6.39.4/fs/ext4/balloc.c linux-2.6.39.4/fs/ext4/balloc.c
36340 --- linux-2.6.39.4/fs/ext4/balloc.c 2011-05-19 00:06:34.000000000 -0400
36341 +++ linux-2.6.39.4/fs/ext4/balloc.c 2011-08-05 19:44:37.000000000 -0400
36342 @@ -522,7 +522,7 @@ static int ext4_has_free_blocks(struct e
36343 /* Hm, nope. Are (enough) root reserved blocks available? */
36344 if (sbi->s_resuid == current_fsuid() ||
36345 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
36346 - capable(CAP_SYS_RESOURCE)) {
36347 + capable_nolog(CAP_SYS_RESOURCE)) {
36348 if (free_blocks >= (nblocks + dirty_blocks))
36349 return 1;
36350 }
36351 diff -urNp linux-2.6.39.4/fs/ext4/ext4.h linux-2.6.39.4/fs/ext4/ext4.h
36352 --- linux-2.6.39.4/fs/ext4/ext4.h 2011-06-03 00:04:14.000000000 -0400
36353 +++ linux-2.6.39.4/fs/ext4/ext4.h 2011-08-05 19:44:37.000000000 -0400
36354 @@ -1166,19 +1166,19 @@ struct ext4_sb_info {
36355 unsigned long s_mb_last_start;
36356
36357 /* stats for buddy allocator */
36358 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
36359 - atomic_t s_bal_success; /* we found long enough chunks */
36360 - atomic_t s_bal_allocated; /* in blocks */
36361 - atomic_t s_bal_ex_scanned; /* total extents scanned */
36362 - atomic_t s_bal_goals; /* goal hits */
36363 - atomic_t s_bal_breaks; /* too long searches */
36364 - atomic_t s_bal_2orders; /* 2^order hits */
36365 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
36366 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
36367 + atomic_unchecked_t s_bal_allocated; /* in blocks */
36368 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
36369 + atomic_unchecked_t s_bal_goals; /* goal hits */
36370 + atomic_unchecked_t s_bal_breaks; /* too long searches */
36371 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
36372 spinlock_t s_bal_lock;
36373 unsigned long s_mb_buddies_generated;
36374 unsigned long long s_mb_generation_time;
36375 - atomic_t s_mb_lost_chunks;
36376 - atomic_t s_mb_preallocated;
36377 - atomic_t s_mb_discarded;
36378 + atomic_unchecked_t s_mb_lost_chunks;
36379 + atomic_unchecked_t s_mb_preallocated;
36380 + atomic_unchecked_t s_mb_discarded;
36381 atomic_t s_lock_busy;
36382
36383 /* locality groups */
36384 diff -urNp linux-2.6.39.4/fs/ext4/mballoc.c linux-2.6.39.4/fs/ext4/mballoc.c
36385 --- linux-2.6.39.4/fs/ext4/mballoc.c 2011-06-03 00:04:14.000000000 -0400
36386 +++ linux-2.6.39.4/fs/ext4/mballoc.c 2011-08-05 19:44:37.000000000 -0400
36387 @@ -1853,7 +1853,7 @@ void ext4_mb_simple_scan_group(struct ex
36388 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
36389
36390 if (EXT4_SB(sb)->s_mb_stats)
36391 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
36392 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
36393
36394 break;
36395 }
36396 @@ -2147,7 +2147,7 @@ repeat:
36397 ac->ac_status = AC_STATUS_CONTINUE;
36398 ac->ac_flags |= EXT4_MB_HINT_FIRST;
36399 cr = 3;
36400 - atomic_inc(&sbi->s_mb_lost_chunks);
36401 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
36402 goto repeat;
36403 }
36404 }
36405 @@ -2190,6 +2190,8 @@ static int ext4_mb_seq_groups_show(struc
36406 ext4_grpblk_t counters[16];
36407 } sg;
36408
36409 + pax_track_stack();
36410 +
36411 group--;
36412 if (group == 0)
36413 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
36414 @@ -2613,25 +2615,25 @@ int ext4_mb_release(struct super_block *
36415 if (sbi->s_mb_stats) {
36416 printk(KERN_INFO
36417 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
36418 - atomic_read(&sbi->s_bal_allocated),
36419 - atomic_read(&sbi->s_bal_reqs),
36420 - atomic_read(&sbi->s_bal_success));
36421 + atomic_read_unchecked(&sbi->s_bal_allocated),
36422 + atomic_read_unchecked(&sbi->s_bal_reqs),
36423 + atomic_read_unchecked(&sbi->s_bal_success));
36424 printk(KERN_INFO
36425 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
36426 "%u 2^N hits, %u breaks, %u lost\n",
36427 - atomic_read(&sbi->s_bal_ex_scanned),
36428 - atomic_read(&sbi->s_bal_goals),
36429 - atomic_read(&sbi->s_bal_2orders),
36430 - atomic_read(&sbi->s_bal_breaks),
36431 - atomic_read(&sbi->s_mb_lost_chunks));
36432 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
36433 + atomic_read_unchecked(&sbi->s_bal_goals),
36434 + atomic_read_unchecked(&sbi->s_bal_2orders),
36435 + atomic_read_unchecked(&sbi->s_bal_breaks),
36436 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
36437 printk(KERN_INFO
36438 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
36439 sbi->s_mb_buddies_generated++,
36440 sbi->s_mb_generation_time);
36441 printk(KERN_INFO
36442 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
36443 - atomic_read(&sbi->s_mb_preallocated),
36444 - atomic_read(&sbi->s_mb_discarded));
36445 + atomic_read_unchecked(&sbi->s_mb_preallocated),
36446 + atomic_read_unchecked(&sbi->s_mb_discarded));
36447 }
36448
36449 free_percpu(sbi->s_locality_groups);
36450 @@ -3107,16 +3109,16 @@ static void ext4_mb_collect_stats(struct
36451 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
36452
36453 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
36454 - atomic_inc(&sbi->s_bal_reqs);
36455 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
36456 + atomic_inc_unchecked(&sbi->s_bal_reqs);
36457 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
36458 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
36459 - atomic_inc(&sbi->s_bal_success);
36460 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
36461 + atomic_inc_unchecked(&sbi->s_bal_success);
36462 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
36463 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
36464 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
36465 - atomic_inc(&sbi->s_bal_goals);
36466 + atomic_inc_unchecked(&sbi->s_bal_goals);
36467 if (ac->ac_found > sbi->s_mb_max_to_scan)
36468 - atomic_inc(&sbi->s_bal_breaks);
36469 + atomic_inc_unchecked(&sbi->s_bal_breaks);
36470 }
36471
36472 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
36473 @@ -3514,7 +3516,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
36474 trace_ext4_mb_new_inode_pa(ac, pa);
36475
36476 ext4_mb_use_inode_pa(ac, pa);
36477 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36478 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36479
36480 ei = EXT4_I(ac->ac_inode);
36481 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
36482 @@ -3574,7 +3576,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
36483 trace_ext4_mb_new_group_pa(ac, pa);
36484
36485 ext4_mb_use_group_pa(ac, pa);
36486 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36487 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36488
36489 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
36490 lg = ac->ac_lg;
36491 @@ -3661,7 +3663,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
36492 * from the bitmap and continue.
36493 */
36494 }
36495 - atomic_add(free, &sbi->s_mb_discarded);
36496 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
36497
36498 return err;
36499 }
36500 @@ -3679,7 +3681,7 @@ ext4_mb_release_group_pa(struct ext4_bud
36501 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
36502 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
36503 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
36504 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
36505 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
36506 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
36507
36508 return 0;
36509 diff -urNp linux-2.6.39.4/fs/fcntl.c linux-2.6.39.4/fs/fcntl.c
36510 --- linux-2.6.39.4/fs/fcntl.c 2011-05-19 00:06:34.000000000 -0400
36511 +++ linux-2.6.39.4/fs/fcntl.c 2011-08-05 19:44:37.000000000 -0400
36512 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
36513 if (err)
36514 return err;
36515
36516 + if (gr_handle_chroot_fowner(pid, type))
36517 + return -ENOENT;
36518 + if (gr_check_protected_task_fowner(pid, type))
36519 + return -EACCES;
36520 +
36521 f_modown(filp, pid, type, force);
36522 return 0;
36523 }
36524 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
36525 switch (cmd) {
36526 case F_DUPFD:
36527 case F_DUPFD_CLOEXEC:
36528 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
36529 if (arg >= rlimit(RLIMIT_NOFILE))
36530 break;
36531 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
36532 @@ -835,14 +841,14 @@ static int __init fcntl_init(void)
36533 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
36534 * is defined as O_NONBLOCK on some platforms and not on others.
36535 */
36536 - BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
36537 + BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
36538 O_RDONLY | O_WRONLY | O_RDWR |
36539 O_CREAT | O_EXCL | O_NOCTTY |
36540 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
36541 __O_SYNC | O_DSYNC | FASYNC |
36542 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
36543 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
36544 - __FMODE_EXEC | O_PATH
36545 + __FMODE_EXEC | O_PATH | FMODE_GREXEC
36546 ));
36547
36548 fasync_cache = kmem_cache_create("fasync_cache",
36549 diff -urNp linux-2.6.39.4/fs/fifo.c linux-2.6.39.4/fs/fifo.c
36550 --- linux-2.6.39.4/fs/fifo.c 2011-05-19 00:06:34.000000000 -0400
36551 +++ linux-2.6.39.4/fs/fifo.c 2011-08-05 19:44:37.000000000 -0400
36552 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
36553 */
36554 filp->f_op = &read_pipefifo_fops;
36555 pipe->r_counter++;
36556 - if (pipe->readers++ == 0)
36557 + if (atomic_inc_return(&pipe->readers) == 1)
36558 wake_up_partner(inode);
36559
36560 - if (!pipe->writers) {
36561 + if (!atomic_read(&pipe->writers)) {
36562 if ((filp->f_flags & O_NONBLOCK)) {
36563 /* suppress POLLHUP until we have
36564 * seen a writer */
36565 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
36566 * errno=ENXIO when there is no process reading the FIFO.
36567 */
36568 ret = -ENXIO;
36569 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
36570 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
36571 goto err;
36572
36573 filp->f_op = &write_pipefifo_fops;
36574 pipe->w_counter++;
36575 - if (!pipe->writers++)
36576 + if (atomic_inc_return(&pipe->writers) == 1)
36577 wake_up_partner(inode);
36578
36579 - if (!pipe->readers) {
36580 + if (!atomic_read(&pipe->readers)) {
36581 wait_for_partner(inode, &pipe->r_counter);
36582 if (signal_pending(current))
36583 goto err_wr;
36584 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
36585 */
36586 filp->f_op = &rdwr_pipefifo_fops;
36587
36588 - pipe->readers++;
36589 - pipe->writers++;
36590 + atomic_inc(&pipe->readers);
36591 + atomic_inc(&pipe->writers);
36592 pipe->r_counter++;
36593 pipe->w_counter++;
36594 - if (pipe->readers == 1 || pipe->writers == 1)
36595 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
36596 wake_up_partner(inode);
36597 break;
36598
36599 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
36600 return 0;
36601
36602 err_rd:
36603 - if (!--pipe->readers)
36604 + if (atomic_dec_and_test(&pipe->readers))
36605 wake_up_interruptible(&pipe->wait);
36606 ret = -ERESTARTSYS;
36607 goto err;
36608
36609 err_wr:
36610 - if (!--pipe->writers)
36611 + if (atomic_dec_and_test(&pipe->writers))
36612 wake_up_interruptible(&pipe->wait);
36613 ret = -ERESTARTSYS;
36614 goto err;
36615
36616 err:
36617 - if (!pipe->readers && !pipe->writers)
36618 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
36619 free_pipe_info(inode);
36620
36621 err_nocleanup:
36622 diff -urNp linux-2.6.39.4/fs/file.c linux-2.6.39.4/fs/file.c
36623 --- linux-2.6.39.4/fs/file.c 2011-05-19 00:06:34.000000000 -0400
36624 +++ linux-2.6.39.4/fs/file.c 2011-08-05 19:44:37.000000000 -0400
36625 @@ -15,6 +15,7 @@
36626 #include <linux/slab.h>
36627 #include <linux/vmalloc.h>
36628 #include <linux/file.h>
36629 +#include <linux/security.h>
36630 #include <linux/fdtable.h>
36631 #include <linux/bitops.h>
36632 #include <linux/interrupt.h>
36633 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
36634 * N.B. For clone tasks sharing a files structure, this test
36635 * will limit the total number of files that can be opened.
36636 */
36637 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
36638 if (nr >= rlimit(RLIMIT_NOFILE))
36639 return -EMFILE;
36640
36641 diff -urNp linux-2.6.39.4/fs/filesystems.c linux-2.6.39.4/fs/filesystems.c
36642 --- linux-2.6.39.4/fs/filesystems.c 2011-05-19 00:06:34.000000000 -0400
36643 +++ linux-2.6.39.4/fs/filesystems.c 2011-08-05 19:44:37.000000000 -0400
36644 @@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
36645 int len = dot ? dot - name : strlen(name);
36646
36647 fs = __get_fs_type(name, len);
36648 +
36649 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
36650 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
36651 +#else
36652 if (!fs && (request_module("%.*s", len, name) == 0))
36653 +#endif
36654 fs = __get_fs_type(name, len);
36655
36656 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
36657 diff -urNp linux-2.6.39.4/fs/fscache/cookie.c linux-2.6.39.4/fs/fscache/cookie.c
36658 --- linux-2.6.39.4/fs/fscache/cookie.c 2011-05-19 00:06:34.000000000 -0400
36659 +++ linux-2.6.39.4/fs/fscache/cookie.c 2011-08-05 19:44:37.000000000 -0400
36660 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
36661 parent ? (char *) parent->def->name : "<no-parent>",
36662 def->name, netfs_data);
36663
36664 - fscache_stat(&fscache_n_acquires);
36665 + fscache_stat_unchecked(&fscache_n_acquires);
36666
36667 /* if there's no parent cookie, then we don't create one here either */
36668 if (!parent) {
36669 - fscache_stat(&fscache_n_acquires_null);
36670 + fscache_stat_unchecked(&fscache_n_acquires_null);
36671 _leave(" [no parent]");
36672 return NULL;
36673 }
36674 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
36675 /* allocate and initialise a cookie */
36676 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
36677 if (!cookie) {
36678 - fscache_stat(&fscache_n_acquires_oom);
36679 + fscache_stat_unchecked(&fscache_n_acquires_oom);
36680 _leave(" [ENOMEM]");
36681 return NULL;
36682 }
36683 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
36684
36685 switch (cookie->def->type) {
36686 case FSCACHE_COOKIE_TYPE_INDEX:
36687 - fscache_stat(&fscache_n_cookie_index);
36688 + fscache_stat_unchecked(&fscache_n_cookie_index);
36689 break;
36690 case FSCACHE_COOKIE_TYPE_DATAFILE:
36691 - fscache_stat(&fscache_n_cookie_data);
36692 + fscache_stat_unchecked(&fscache_n_cookie_data);
36693 break;
36694 default:
36695 - fscache_stat(&fscache_n_cookie_special);
36696 + fscache_stat_unchecked(&fscache_n_cookie_special);
36697 break;
36698 }
36699
36700 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
36701 if (fscache_acquire_non_index_cookie(cookie) < 0) {
36702 atomic_dec(&parent->n_children);
36703 __fscache_cookie_put(cookie);
36704 - fscache_stat(&fscache_n_acquires_nobufs);
36705 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
36706 _leave(" = NULL");
36707 return NULL;
36708 }
36709 }
36710
36711 - fscache_stat(&fscache_n_acquires_ok);
36712 + fscache_stat_unchecked(&fscache_n_acquires_ok);
36713 _leave(" = %p", cookie);
36714 return cookie;
36715 }
36716 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
36717 cache = fscache_select_cache_for_object(cookie->parent);
36718 if (!cache) {
36719 up_read(&fscache_addremove_sem);
36720 - fscache_stat(&fscache_n_acquires_no_cache);
36721 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
36722 _leave(" = -ENOMEDIUM [no cache]");
36723 return -ENOMEDIUM;
36724 }
36725 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
36726 object = cache->ops->alloc_object(cache, cookie);
36727 fscache_stat_d(&fscache_n_cop_alloc_object);
36728 if (IS_ERR(object)) {
36729 - fscache_stat(&fscache_n_object_no_alloc);
36730 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
36731 ret = PTR_ERR(object);
36732 goto error;
36733 }
36734
36735 - fscache_stat(&fscache_n_object_alloc);
36736 + fscache_stat_unchecked(&fscache_n_object_alloc);
36737
36738 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
36739
36740 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
36741 struct fscache_object *object;
36742 struct hlist_node *_p;
36743
36744 - fscache_stat(&fscache_n_updates);
36745 + fscache_stat_unchecked(&fscache_n_updates);
36746
36747 if (!cookie) {
36748 - fscache_stat(&fscache_n_updates_null);
36749 + fscache_stat_unchecked(&fscache_n_updates_null);
36750 _leave(" [no cookie]");
36751 return;
36752 }
36753 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
36754 struct fscache_object *object;
36755 unsigned long event;
36756
36757 - fscache_stat(&fscache_n_relinquishes);
36758 + fscache_stat_unchecked(&fscache_n_relinquishes);
36759 if (retire)
36760 - fscache_stat(&fscache_n_relinquishes_retire);
36761 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
36762
36763 if (!cookie) {
36764 - fscache_stat(&fscache_n_relinquishes_null);
36765 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
36766 _leave(" [no cookie]");
36767 return;
36768 }
36769 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
36770
36771 /* wait for the cookie to finish being instantiated (or to fail) */
36772 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
36773 - fscache_stat(&fscache_n_relinquishes_waitcrt);
36774 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
36775 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
36776 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
36777 }
36778 diff -urNp linux-2.6.39.4/fs/fscache/internal.h linux-2.6.39.4/fs/fscache/internal.h
36779 --- linux-2.6.39.4/fs/fscache/internal.h 2011-05-19 00:06:34.000000000 -0400
36780 +++ linux-2.6.39.4/fs/fscache/internal.h 2011-08-05 19:44:37.000000000 -0400
36781 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
36782 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
36783 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
36784
36785 -extern atomic_t fscache_n_op_pend;
36786 -extern atomic_t fscache_n_op_run;
36787 -extern atomic_t fscache_n_op_enqueue;
36788 -extern atomic_t fscache_n_op_deferred_release;
36789 -extern atomic_t fscache_n_op_release;
36790 -extern atomic_t fscache_n_op_gc;
36791 -extern atomic_t fscache_n_op_cancelled;
36792 -extern atomic_t fscache_n_op_rejected;
36793 -
36794 -extern atomic_t fscache_n_attr_changed;
36795 -extern atomic_t fscache_n_attr_changed_ok;
36796 -extern atomic_t fscache_n_attr_changed_nobufs;
36797 -extern atomic_t fscache_n_attr_changed_nomem;
36798 -extern atomic_t fscache_n_attr_changed_calls;
36799 -
36800 -extern atomic_t fscache_n_allocs;
36801 -extern atomic_t fscache_n_allocs_ok;
36802 -extern atomic_t fscache_n_allocs_wait;
36803 -extern atomic_t fscache_n_allocs_nobufs;
36804 -extern atomic_t fscache_n_allocs_intr;
36805 -extern atomic_t fscache_n_allocs_object_dead;
36806 -extern atomic_t fscache_n_alloc_ops;
36807 -extern atomic_t fscache_n_alloc_op_waits;
36808 -
36809 -extern atomic_t fscache_n_retrievals;
36810 -extern atomic_t fscache_n_retrievals_ok;
36811 -extern atomic_t fscache_n_retrievals_wait;
36812 -extern atomic_t fscache_n_retrievals_nodata;
36813 -extern atomic_t fscache_n_retrievals_nobufs;
36814 -extern atomic_t fscache_n_retrievals_intr;
36815 -extern atomic_t fscache_n_retrievals_nomem;
36816 -extern atomic_t fscache_n_retrievals_object_dead;
36817 -extern atomic_t fscache_n_retrieval_ops;
36818 -extern atomic_t fscache_n_retrieval_op_waits;
36819 -
36820 -extern atomic_t fscache_n_stores;
36821 -extern atomic_t fscache_n_stores_ok;
36822 -extern atomic_t fscache_n_stores_again;
36823 -extern atomic_t fscache_n_stores_nobufs;
36824 -extern atomic_t fscache_n_stores_oom;
36825 -extern atomic_t fscache_n_store_ops;
36826 -extern atomic_t fscache_n_store_calls;
36827 -extern atomic_t fscache_n_store_pages;
36828 -extern atomic_t fscache_n_store_radix_deletes;
36829 -extern atomic_t fscache_n_store_pages_over_limit;
36830 -
36831 -extern atomic_t fscache_n_store_vmscan_not_storing;
36832 -extern atomic_t fscache_n_store_vmscan_gone;
36833 -extern atomic_t fscache_n_store_vmscan_busy;
36834 -extern atomic_t fscache_n_store_vmscan_cancelled;
36835 -
36836 -extern atomic_t fscache_n_marks;
36837 -extern atomic_t fscache_n_uncaches;
36838 -
36839 -extern atomic_t fscache_n_acquires;
36840 -extern atomic_t fscache_n_acquires_null;
36841 -extern atomic_t fscache_n_acquires_no_cache;
36842 -extern atomic_t fscache_n_acquires_ok;
36843 -extern atomic_t fscache_n_acquires_nobufs;
36844 -extern atomic_t fscache_n_acquires_oom;
36845 -
36846 -extern atomic_t fscache_n_updates;
36847 -extern atomic_t fscache_n_updates_null;
36848 -extern atomic_t fscache_n_updates_run;
36849 -
36850 -extern atomic_t fscache_n_relinquishes;
36851 -extern atomic_t fscache_n_relinquishes_null;
36852 -extern atomic_t fscache_n_relinquishes_waitcrt;
36853 -extern atomic_t fscache_n_relinquishes_retire;
36854 -
36855 -extern atomic_t fscache_n_cookie_index;
36856 -extern atomic_t fscache_n_cookie_data;
36857 -extern atomic_t fscache_n_cookie_special;
36858 -
36859 -extern atomic_t fscache_n_object_alloc;
36860 -extern atomic_t fscache_n_object_no_alloc;
36861 -extern atomic_t fscache_n_object_lookups;
36862 -extern atomic_t fscache_n_object_lookups_negative;
36863 -extern atomic_t fscache_n_object_lookups_positive;
36864 -extern atomic_t fscache_n_object_lookups_timed_out;
36865 -extern atomic_t fscache_n_object_created;
36866 -extern atomic_t fscache_n_object_avail;
36867 -extern atomic_t fscache_n_object_dead;
36868 -
36869 -extern atomic_t fscache_n_checkaux_none;
36870 -extern atomic_t fscache_n_checkaux_okay;
36871 -extern atomic_t fscache_n_checkaux_update;
36872 -extern atomic_t fscache_n_checkaux_obsolete;
36873 +extern atomic_unchecked_t fscache_n_op_pend;
36874 +extern atomic_unchecked_t fscache_n_op_run;
36875 +extern atomic_unchecked_t fscache_n_op_enqueue;
36876 +extern atomic_unchecked_t fscache_n_op_deferred_release;
36877 +extern atomic_unchecked_t fscache_n_op_release;
36878 +extern atomic_unchecked_t fscache_n_op_gc;
36879 +extern atomic_unchecked_t fscache_n_op_cancelled;
36880 +extern atomic_unchecked_t fscache_n_op_rejected;
36881 +
36882 +extern atomic_unchecked_t fscache_n_attr_changed;
36883 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
36884 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
36885 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
36886 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
36887 +
36888 +extern atomic_unchecked_t fscache_n_allocs;
36889 +extern atomic_unchecked_t fscache_n_allocs_ok;
36890 +extern atomic_unchecked_t fscache_n_allocs_wait;
36891 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
36892 +extern atomic_unchecked_t fscache_n_allocs_intr;
36893 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
36894 +extern atomic_unchecked_t fscache_n_alloc_ops;
36895 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
36896 +
36897 +extern atomic_unchecked_t fscache_n_retrievals;
36898 +extern atomic_unchecked_t fscache_n_retrievals_ok;
36899 +extern atomic_unchecked_t fscache_n_retrievals_wait;
36900 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
36901 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
36902 +extern atomic_unchecked_t fscache_n_retrievals_intr;
36903 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
36904 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
36905 +extern atomic_unchecked_t fscache_n_retrieval_ops;
36906 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
36907 +
36908 +extern atomic_unchecked_t fscache_n_stores;
36909 +extern atomic_unchecked_t fscache_n_stores_ok;
36910 +extern atomic_unchecked_t fscache_n_stores_again;
36911 +extern atomic_unchecked_t fscache_n_stores_nobufs;
36912 +extern atomic_unchecked_t fscache_n_stores_oom;
36913 +extern atomic_unchecked_t fscache_n_store_ops;
36914 +extern atomic_unchecked_t fscache_n_store_calls;
36915 +extern atomic_unchecked_t fscache_n_store_pages;
36916 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
36917 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
36918 +
36919 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
36920 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
36921 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
36922 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
36923 +
36924 +extern atomic_unchecked_t fscache_n_marks;
36925 +extern atomic_unchecked_t fscache_n_uncaches;
36926 +
36927 +extern atomic_unchecked_t fscache_n_acquires;
36928 +extern atomic_unchecked_t fscache_n_acquires_null;
36929 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
36930 +extern atomic_unchecked_t fscache_n_acquires_ok;
36931 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
36932 +extern atomic_unchecked_t fscache_n_acquires_oom;
36933 +
36934 +extern atomic_unchecked_t fscache_n_updates;
36935 +extern atomic_unchecked_t fscache_n_updates_null;
36936 +extern atomic_unchecked_t fscache_n_updates_run;
36937 +
36938 +extern atomic_unchecked_t fscache_n_relinquishes;
36939 +extern atomic_unchecked_t fscache_n_relinquishes_null;
36940 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
36941 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
36942 +
36943 +extern atomic_unchecked_t fscache_n_cookie_index;
36944 +extern atomic_unchecked_t fscache_n_cookie_data;
36945 +extern atomic_unchecked_t fscache_n_cookie_special;
36946 +
36947 +extern atomic_unchecked_t fscache_n_object_alloc;
36948 +extern atomic_unchecked_t fscache_n_object_no_alloc;
36949 +extern atomic_unchecked_t fscache_n_object_lookups;
36950 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
36951 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
36952 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
36953 +extern atomic_unchecked_t fscache_n_object_created;
36954 +extern atomic_unchecked_t fscache_n_object_avail;
36955 +extern atomic_unchecked_t fscache_n_object_dead;
36956 +
36957 +extern atomic_unchecked_t fscache_n_checkaux_none;
36958 +extern atomic_unchecked_t fscache_n_checkaux_okay;
36959 +extern atomic_unchecked_t fscache_n_checkaux_update;
36960 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
36961
36962 extern atomic_t fscache_n_cop_alloc_object;
36963 extern atomic_t fscache_n_cop_lookup_object;
36964 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
36965 atomic_inc(stat);
36966 }
36967
36968 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
36969 +{
36970 + atomic_inc_unchecked(stat);
36971 +}
36972 +
36973 static inline void fscache_stat_d(atomic_t *stat)
36974 {
36975 atomic_dec(stat);
36976 @@ -267,6 +272,7 @@ extern const struct file_operations fsca
36977
36978 #define __fscache_stat(stat) (NULL)
36979 #define fscache_stat(stat) do {} while (0)
36980 +#define fscache_stat_unchecked(stat) do {} while (0)
36981 #define fscache_stat_d(stat) do {} while (0)
36982 #endif
36983
36984 diff -urNp linux-2.6.39.4/fs/fscache/object.c linux-2.6.39.4/fs/fscache/object.c
36985 --- linux-2.6.39.4/fs/fscache/object.c 2011-05-19 00:06:34.000000000 -0400
36986 +++ linux-2.6.39.4/fs/fscache/object.c 2011-08-05 19:44:37.000000000 -0400
36987 @@ -128,7 +128,7 @@ static void fscache_object_state_machine
36988 /* update the object metadata on disk */
36989 case FSCACHE_OBJECT_UPDATING:
36990 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
36991 - fscache_stat(&fscache_n_updates_run);
36992 + fscache_stat_unchecked(&fscache_n_updates_run);
36993 fscache_stat(&fscache_n_cop_update_object);
36994 object->cache->ops->update_object(object);
36995 fscache_stat_d(&fscache_n_cop_update_object);
36996 @@ -217,7 +217,7 @@ static void fscache_object_state_machine
36997 spin_lock(&object->lock);
36998 object->state = FSCACHE_OBJECT_DEAD;
36999 spin_unlock(&object->lock);
37000 - fscache_stat(&fscache_n_object_dead);
37001 + fscache_stat_unchecked(&fscache_n_object_dead);
37002 goto terminal_transit;
37003
37004 /* handle the parent cache of this object being withdrawn from
37005 @@ -232,7 +232,7 @@ static void fscache_object_state_machine
37006 spin_lock(&object->lock);
37007 object->state = FSCACHE_OBJECT_DEAD;
37008 spin_unlock(&object->lock);
37009 - fscache_stat(&fscache_n_object_dead);
37010 + fscache_stat_unchecked(&fscache_n_object_dead);
37011 goto terminal_transit;
37012
37013 /* complain about the object being woken up once it is
37014 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
37015 parent->cookie->def->name, cookie->def->name,
37016 object->cache->tag->name);
37017
37018 - fscache_stat(&fscache_n_object_lookups);
37019 + fscache_stat_unchecked(&fscache_n_object_lookups);
37020 fscache_stat(&fscache_n_cop_lookup_object);
37021 ret = object->cache->ops->lookup_object(object);
37022 fscache_stat_d(&fscache_n_cop_lookup_object);
37023 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
37024 if (ret == -ETIMEDOUT) {
37025 /* probably stuck behind another object, so move this one to
37026 * the back of the queue */
37027 - fscache_stat(&fscache_n_object_lookups_timed_out);
37028 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
37029 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
37030 }
37031
37032 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
37033
37034 spin_lock(&object->lock);
37035 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
37036 - fscache_stat(&fscache_n_object_lookups_negative);
37037 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
37038
37039 /* transit here to allow write requests to begin stacking up
37040 * and read requests to begin returning ENODATA */
37041 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
37042 * result, in which case there may be data available */
37043 spin_lock(&object->lock);
37044 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
37045 - fscache_stat(&fscache_n_object_lookups_positive);
37046 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
37047
37048 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
37049
37050 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
37051 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
37052 } else {
37053 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
37054 - fscache_stat(&fscache_n_object_created);
37055 + fscache_stat_unchecked(&fscache_n_object_created);
37056
37057 object->state = FSCACHE_OBJECT_AVAILABLE;
37058 spin_unlock(&object->lock);
37059 @@ -602,7 +602,7 @@ static void fscache_object_available(str
37060 fscache_enqueue_dependents(object);
37061
37062 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
37063 - fscache_stat(&fscache_n_object_avail);
37064 + fscache_stat_unchecked(&fscache_n_object_avail);
37065
37066 _leave("");
37067 }
37068 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
37069 enum fscache_checkaux result;
37070
37071 if (!object->cookie->def->check_aux) {
37072 - fscache_stat(&fscache_n_checkaux_none);
37073 + fscache_stat_unchecked(&fscache_n_checkaux_none);
37074 return FSCACHE_CHECKAUX_OKAY;
37075 }
37076
37077 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
37078 switch (result) {
37079 /* entry okay as is */
37080 case FSCACHE_CHECKAUX_OKAY:
37081 - fscache_stat(&fscache_n_checkaux_okay);
37082 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
37083 break;
37084
37085 /* entry requires update */
37086 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
37087 - fscache_stat(&fscache_n_checkaux_update);
37088 + fscache_stat_unchecked(&fscache_n_checkaux_update);
37089 break;
37090
37091 /* entry requires deletion */
37092 case FSCACHE_CHECKAUX_OBSOLETE:
37093 - fscache_stat(&fscache_n_checkaux_obsolete);
37094 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
37095 break;
37096
37097 default:
37098 diff -urNp linux-2.6.39.4/fs/fscache/operation.c linux-2.6.39.4/fs/fscache/operation.c
37099 --- linux-2.6.39.4/fs/fscache/operation.c 2011-05-19 00:06:34.000000000 -0400
37100 +++ linux-2.6.39.4/fs/fscache/operation.c 2011-08-05 19:44:37.000000000 -0400
37101 @@ -17,7 +17,7 @@
37102 #include <linux/slab.h>
37103 #include "internal.h"
37104
37105 -atomic_t fscache_op_debug_id;
37106 +atomic_unchecked_t fscache_op_debug_id;
37107 EXPORT_SYMBOL(fscache_op_debug_id);
37108
37109 /**
37110 @@ -40,7 +40,7 @@ void fscache_enqueue_operation(struct fs
37111 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
37112 ASSERTCMP(atomic_read(&op->usage), >, 0);
37113
37114 - fscache_stat(&fscache_n_op_enqueue);
37115 + fscache_stat_unchecked(&fscache_n_op_enqueue);
37116 switch (op->flags & FSCACHE_OP_TYPE) {
37117 case FSCACHE_OP_ASYNC:
37118 _debug("queue async");
37119 @@ -73,7 +73,7 @@ static void fscache_run_op(struct fscach
37120 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
37121 if (op->processor)
37122 fscache_enqueue_operation(op);
37123 - fscache_stat(&fscache_n_op_run);
37124 + fscache_stat_unchecked(&fscache_n_op_run);
37125 }
37126
37127 /*
37128 @@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct f
37129 if (object->n_ops > 1) {
37130 atomic_inc(&op->usage);
37131 list_add_tail(&op->pend_link, &object->pending_ops);
37132 - fscache_stat(&fscache_n_op_pend);
37133 + fscache_stat_unchecked(&fscache_n_op_pend);
37134 } else if (!list_empty(&object->pending_ops)) {
37135 atomic_inc(&op->usage);
37136 list_add_tail(&op->pend_link, &object->pending_ops);
37137 - fscache_stat(&fscache_n_op_pend);
37138 + fscache_stat_unchecked(&fscache_n_op_pend);
37139 fscache_start_operations(object);
37140 } else {
37141 ASSERTCMP(object->n_in_progress, ==, 0);
37142 @@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct f
37143 object->n_exclusive++; /* reads and writes must wait */
37144 atomic_inc(&op->usage);
37145 list_add_tail(&op->pend_link, &object->pending_ops);
37146 - fscache_stat(&fscache_n_op_pend);
37147 + fscache_stat_unchecked(&fscache_n_op_pend);
37148 ret = 0;
37149 } else {
37150 /* not allowed to submit ops in any other state */
37151 @@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_obj
37152 if (object->n_exclusive > 0) {
37153 atomic_inc(&op->usage);
37154 list_add_tail(&op->pend_link, &object->pending_ops);
37155 - fscache_stat(&fscache_n_op_pend);
37156 + fscache_stat_unchecked(&fscache_n_op_pend);
37157 } else if (!list_empty(&object->pending_ops)) {
37158 atomic_inc(&op->usage);
37159 list_add_tail(&op->pend_link, &object->pending_ops);
37160 - fscache_stat(&fscache_n_op_pend);
37161 + fscache_stat_unchecked(&fscache_n_op_pend);
37162 fscache_start_operations(object);
37163 } else {
37164 ASSERTCMP(object->n_exclusive, ==, 0);
37165 @@ -227,12 +227,12 @@ int fscache_submit_op(struct fscache_obj
37166 object->n_ops++;
37167 atomic_inc(&op->usage);
37168 list_add_tail(&op->pend_link, &object->pending_ops);
37169 - fscache_stat(&fscache_n_op_pend);
37170 + fscache_stat_unchecked(&fscache_n_op_pend);
37171 ret = 0;
37172 } else if (object->state == FSCACHE_OBJECT_DYING ||
37173 object->state == FSCACHE_OBJECT_LC_DYING ||
37174 object->state == FSCACHE_OBJECT_WITHDRAWING) {
37175 - fscache_stat(&fscache_n_op_rejected);
37176 + fscache_stat_unchecked(&fscache_n_op_rejected);
37177 ret = -ENOBUFS;
37178 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
37179 fscache_report_unexpected_submission(object, op, ostate);
37180 @@ -302,7 +302,7 @@ int fscache_cancel_op(struct fscache_ope
37181
37182 ret = -EBUSY;
37183 if (!list_empty(&op->pend_link)) {
37184 - fscache_stat(&fscache_n_op_cancelled);
37185 + fscache_stat_unchecked(&fscache_n_op_cancelled);
37186 list_del_init(&op->pend_link);
37187 object->n_ops--;
37188 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
37189 @@ -341,7 +341,7 @@ void fscache_put_operation(struct fscach
37190 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
37191 BUG();
37192
37193 - fscache_stat(&fscache_n_op_release);
37194 + fscache_stat_unchecked(&fscache_n_op_release);
37195
37196 if (op->release) {
37197 op->release(op);
37198 @@ -358,7 +358,7 @@ void fscache_put_operation(struct fscach
37199 * lock, and defer it otherwise */
37200 if (!spin_trylock(&object->lock)) {
37201 _debug("defer put");
37202 - fscache_stat(&fscache_n_op_deferred_release);
37203 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
37204
37205 cache = object->cache;
37206 spin_lock(&cache->op_gc_list_lock);
37207 @@ -420,7 +420,7 @@ void fscache_operation_gc(struct work_st
37208
37209 _debug("GC DEFERRED REL OBJ%x OP%x",
37210 object->debug_id, op->debug_id);
37211 - fscache_stat(&fscache_n_op_gc);
37212 + fscache_stat_unchecked(&fscache_n_op_gc);
37213
37214 ASSERTCMP(atomic_read(&op->usage), ==, 0);
37215
37216 diff -urNp linux-2.6.39.4/fs/fscache/page.c linux-2.6.39.4/fs/fscache/page.c
37217 --- linux-2.6.39.4/fs/fscache/page.c 2011-08-05 21:11:51.000000000 -0400
37218 +++ linux-2.6.39.4/fs/fscache/page.c 2011-08-05 21:12:20.000000000 -0400
37219 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
37220 val = radix_tree_lookup(&cookie->stores, page->index);
37221 if (!val) {
37222 rcu_read_unlock();
37223 - fscache_stat(&fscache_n_store_vmscan_not_storing);
37224 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
37225 __fscache_uncache_page(cookie, page);
37226 return true;
37227 }
37228 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
37229 spin_unlock(&cookie->stores_lock);
37230
37231 if (xpage) {
37232 - fscache_stat(&fscache_n_store_vmscan_cancelled);
37233 - fscache_stat(&fscache_n_store_radix_deletes);
37234 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
37235 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
37236 ASSERTCMP(xpage, ==, page);
37237 } else {
37238 - fscache_stat(&fscache_n_store_vmscan_gone);
37239 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
37240 }
37241
37242 wake_up_bit(&cookie->flags, 0);
37243 @@ -107,7 +107,7 @@ page_busy:
37244 /* we might want to wait here, but that could deadlock the allocator as
37245 * the work threads writing to the cache may all end up sleeping
37246 * on memory allocation */
37247 - fscache_stat(&fscache_n_store_vmscan_busy);
37248 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
37249 return false;
37250 }
37251 EXPORT_SYMBOL(__fscache_maybe_release_page);
37252 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
37253 FSCACHE_COOKIE_STORING_TAG);
37254 if (!radix_tree_tag_get(&cookie->stores, page->index,
37255 FSCACHE_COOKIE_PENDING_TAG)) {
37256 - fscache_stat(&fscache_n_store_radix_deletes);
37257 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
37258 xpage = radix_tree_delete(&cookie->stores, page->index);
37259 }
37260 spin_unlock(&cookie->stores_lock);
37261 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
37262
37263 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
37264
37265 - fscache_stat(&fscache_n_attr_changed_calls);
37266 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
37267
37268 if (fscache_object_is_active(object)) {
37269 fscache_set_op_state(op, "CallFS");
37270 @@ -179,11 +179,11 @@ int __fscache_attr_changed(struct fscach
37271
37272 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37273
37274 - fscache_stat(&fscache_n_attr_changed);
37275 + fscache_stat_unchecked(&fscache_n_attr_changed);
37276
37277 op = kzalloc(sizeof(*op), GFP_KERNEL);
37278 if (!op) {
37279 - fscache_stat(&fscache_n_attr_changed_nomem);
37280 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
37281 _leave(" = -ENOMEM");
37282 return -ENOMEM;
37283 }
37284 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
37285 if (fscache_submit_exclusive_op(object, op) < 0)
37286 goto nobufs;
37287 spin_unlock(&cookie->lock);
37288 - fscache_stat(&fscache_n_attr_changed_ok);
37289 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
37290 fscache_put_operation(op);
37291 _leave(" = 0");
37292 return 0;
37293 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
37294 nobufs:
37295 spin_unlock(&cookie->lock);
37296 kfree(op);
37297 - fscache_stat(&fscache_n_attr_changed_nobufs);
37298 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
37299 _leave(" = %d", -ENOBUFS);
37300 return -ENOBUFS;
37301 }
37302 @@ -246,7 +246,7 @@ static struct fscache_retrieval *fscache
37303 /* allocate a retrieval operation and attempt to submit it */
37304 op = kzalloc(sizeof(*op), GFP_NOIO);
37305 if (!op) {
37306 - fscache_stat(&fscache_n_retrievals_nomem);
37307 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37308 return NULL;
37309 }
37310
37311 @@ -275,13 +275,13 @@ static int fscache_wait_for_deferred_loo
37312 return 0;
37313 }
37314
37315 - fscache_stat(&fscache_n_retrievals_wait);
37316 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
37317
37318 jif = jiffies;
37319 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
37320 fscache_wait_bit_interruptible,
37321 TASK_INTERRUPTIBLE) != 0) {
37322 - fscache_stat(&fscache_n_retrievals_intr);
37323 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
37324 _leave(" = -ERESTARTSYS");
37325 return -ERESTARTSYS;
37326 }
37327 @@ -299,8 +299,8 @@ static int fscache_wait_for_deferred_loo
37328 */
37329 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
37330 struct fscache_retrieval *op,
37331 - atomic_t *stat_op_waits,
37332 - atomic_t *stat_object_dead)
37333 + atomic_unchecked_t *stat_op_waits,
37334 + atomic_unchecked_t *stat_object_dead)
37335 {
37336 int ret;
37337
37338 @@ -308,7 +308,7 @@ static int fscache_wait_for_retrieval_ac
37339 goto check_if_dead;
37340
37341 _debug(">>> WT");
37342 - fscache_stat(stat_op_waits);
37343 + fscache_stat_unchecked(stat_op_waits);
37344 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
37345 fscache_wait_bit_interruptible,
37346 TASK_INTERRUPTIBLE) < 0) {
37347 @@ -325,7 +325,7 @@ static int fscache_wait_for_retrieval_ac
37348
37349 check_if_dead:
37350 if (unlikely(fscache_object_is_dead(object))) {
37351 - fscache_stat(stat_object_dead);
37352 + fscache_stat_unchecked(stat_object_dead);
37353 return -ENOBUFS;
37354 }
37355 return 0;
37356 @@ -352,7 +352,7 @@ int __fscache_read_or_alloc_page(struct
37357
37358 _enter("%p,%p,,,", cookie, page);
37359
37360 - fscache_stat(&fscache_n_retrievals);
37361 + fscache_stat_unchecked(&fscache_n_retrievals);
37362
37363 if (hlist_empty(&cookie->backing_objects))
37364 goto nobufs;
37365 @@ -386,7 +386,7 @@ int __fscache_read_or_alloc_page(struct
37366 goto nobufs_unlock;
37367 spin_unlock(&cookie->lock);
37368
37369 - fscache_stat(&fscache_n_retrieval_ops);
37370 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
37371
37372 /* pin the netfs read context in case we need to do the actual netfs
37373 * read because we've encountered a cache read failure */
37374 @@ -416,15 +416,15 @@ int __fscache_read_or_alloc_page(struct
37375
37376 error:
37377 if (ret == -ENOMEM)
37378 - fscache_stat(&fscache_n_retrievals_nomem);
37379 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37380 else if (ret == -ERESTARTSYS)
37381 - fscache_stat(&fscache_n_retrievals_intr);
37382 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
37383 else if (ret == -ENODATA)
37384 - fscache_stat(&fscache_n_retrievals_nodata);
37385 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
37386 else if (ret < 0)
37387 - fscache_stat(&fscache_n_retrievals_nobufs);
37388 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37389 else
37390 - fscache_stat(&fscache_n_retrievals_ok);
37391 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
37392
37393 fscache_put_retrieval(op);
37394 _leave(" = %d", ret);
37395 @@ -434,7 +434,7 @@ nobufs_unlock:
37396 spin_unlock(&cookie->lock);
37397 kfree(op);
37398 nobufs:
37399 - fscache_stat(&fscache_n_retrievals_nobufs);
37400 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37401 _leave(" = -ENOBUFS");
37402 return -ENOBUFS;
37403 }
37404 @@ -472,7 +472,7 @@ int __fscache_read_or_alloc_pages(struct
37405
37406 _enter("%p,,%d,,,", cookie, *nr_pages);
37407
37408 - fscache_stat(&fscache_n_retrievals);
37409 + fscache_stat_unchecked(&fscache_n_retrievals);
37410
37411 if (hlist_empty(&cookie->backing_objects))
37412 goto nobufs;
37413 @@ -503,7 +503,7 @@ int __fscache_read_or_alloc_pages(struct
37414 goto nobufs_unlock;
37415 spin_unlock(&cookie->lock);
37416
37417 - fscache_stat(&fscache_n_retrieval_ops);
37418 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
37419
37420 /* pin the netfs read context in case we need to do the actual netfs
37421 * read because we've encountered a cache read failure */
37422 @@ -533,15 +533,15 @@ int __fscache_read_or_alloc_pages(struct
37423
37424 error:
37425 if (ret == -ENOMEM)
37426 - fscache_stat(&fscache_n_retrievals_nomem);
37427 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37428 else if (ret == -ERESTARTSYS)
37429 - fscache_stat(&fscache_n_retrievals_intr);
37430 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
37431 else if (ret == -ENODATA)
37432 - fscache_stat(&fscache_n_retrievals_nodata);
37433 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
37434 else if (ret < 0)
37435 - fscache_stat(&fscache_n_retrievals_nobufs);
37436 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37437 else
37438 - fscache_stat(&fscache_n_retrievals_ok);
37439 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
37440
37441 fscache_put_retrieval(op);
37442 _leave(" = %d", ret);
37443 @@ -551,7 +551,7 @@ nobufs_unlock:
37444 spin_unlock(&cookie->lock);
37445 kfree(op);
37446 nobufs:
37447 - fscache_stat(&fscache_n_retrievals_nobufs);
37448 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37449 _leave(" = -ENOBUFS");
37450 return -ENOBUFS;
37451 }
37452 @@ -575,7 +575,7 @@ int __fscache_alloc_page(struct fscache_
37453
37454 _enter("%p,%p,,,", cookie, page);
37455
37456 - fscache_stat(&fscache_n_allocs);
37457 + fscache_stat_unchecked(&fscache_n_allocs);
37458
37459 if (hlist_empty(&cookie->backing_objects))
37460 goto nobufs;
37461 @@ -602,7 +602,7 @@ int __fscache_alloc_page(struct fscache_
37462 goto nobufs_unlock;
37463 spin_unlock(&cookie->lock);
37464
37465 - fscache_stat(&fscache_n_alloc_ops);
37466 + fscache_stat_unchecked(&fscache_n_alloc_ops);
37467
37468 ret = fscache_wait_for_retrieval_activation(
37469 object, op,
37470 @@ -618,11 +618,11 @@ int __fscache_alloc_page(struct fscache_
37471
37472 error:
37473 if (ret == -ERESTARTSYS)
37474 - fscache_stat(&fscache_n_allocs_intr);
37475 + fscache_stat_unchecked(&fscache_n_allocs_intr);
37476 else if (ret < 0)
37477 - fscache_stat(&fscache_n_allocs_nobufs);
37478 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
37479 else
37480 - fscache_stat(&fscache_n_allocs_ok);
37481 + fscache_stat_unchecked(&fscache_n_allocs_ok);
37482
37483 fscache_put_retrieval(op);
37484 _leave(" = %d", ret);
37485 @@ -632,7 +632,7 @@ nobufs_unlock:
37486 spin_unlock(&cookie->lock);
37487 kfree(op);
37488 nobufs:
37489 - fscache_stat(&fscache_n_allocs_nobufs);
37490 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
37491 _leave(" = -ENOBUFS");
37492 return -ENOBUFS;
37493 }
37494 @@ -675,7 +675,7 @@ static void fscache_write_op(struct fsca
37495
37496 spin_lock(&cookie->stores_lock);
37497
37498 - fscache_stat(&fscache_n_store_calls);
37499 + fscache_stat_unchecked(&fscache_n_store_calls);
37500
37501 /* find a page to store */
37502 page = NULL;
37503 @@ -686,7 +686,7 @@ static void fscache_write_op(struct fsca
37504 page = results[0];
37505 _debug("gang %d [%lx]", n, page->index);
37506 if (page->index > op->store_limit) {
37507 - fscache_stat(&fscache_n_store_pages_over_limit);
37508 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
37509 goto superseded;
37510 }
37511
37512 @@ -699,7 +699,7 @@ static void fscache_write_op(struct fsca
37513 spin_unlock(&object->lock);
37514
37515 fscache_set_op_state(&op->op, "Store");
37516 - fscache_stat(&fscache_n_store_pages);
37517 + fscache_stat_unchecked(&fscache_n_store_pages);
37518 fscache_stat(&fscache_n_cop_write_page);
37519 ret = object->cache->ops->write_page(op, page);
37520 fscache_stat_d(&fscache_n_cop_write_page);
37521 @@ -769,7 +769,7 @@ int __fscache_write_page(struct fscache_
37522 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37523 ASSERT(PageFsCache(page));
37524
37525 - fscache_stat(&fscache_n_stores);
37526 + fscache_stat_unchecked(&fscache_n_stores);
37527
37528 op = kzalloc(sizeof(*op), GFP_NOIO);
37529 if (!op)
37530 @@ -821,7 +821,7 @@ int __fscache_write_page(struct fscache_
37531 spin_unlock(&cookie->stores_lock);
37532 spin_unlock(&object->lock);
37533
37534 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
37535 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
37536 op->store_limit = object->store_limit;
37537
37538 if (fscache_submit_op(object, &op->op) < 0)
37539 @@ -829,8 +829,8 @@ int __fscache_write_page(struct fscache_
37540
37541 spin_unlock(&cookie->lock);
37542 radix_tree_preload_end();
37543 - fscache_stat(&fscache_n_store_ops);
37544 - fscache_stat(&fscache_n_stores_ok);
37545 + fscache_stat_unchecked(&fscache_n_store_ops);
37546 + fscache_stat_unchecked(&fscache_n_stores_ok);
37547
37548 /* the work queue now carries its own ref on the object */
37549 fscache_put_operation(&op->op);
37550 @@ -838,14 +838,14 @@ int __fscache_write_page(struct fscache_
37551 return 0;
37552
37553 already_queued:
37554 - fscache_stat(&fscache_n_stores_again);
37555 + fscache_stat_unchecked(&fscache_n_stores_again);
37556 already_pending:
37557 spin_unlock(&cookie->stores_lock);
37558 spin_unlock(&object->lock);
37559 spin_unlock(&cookie->lock);
37560 radix_tree_preload_end();
37561 kfree(op);
37562 - fscache_stat(&fscache_n_stores_ok);
37563 + fscache_stat_unchecked(&fscache_n_stores_ok);
37564 _leave(" = 0");
37565 return 0;
37566
37567 @@ -864,14 +864,14 @@ nobufs:
37568 spin_unlock(&cookie->lock);
37569 radix_tree_preload_end();
37570 kfree(op);
37571 - fscache_stat(&fscache_n_stores_nobufs);
37572 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
37573 _leave(" = -ENOBUFS");
37574 return -ENOBUFS;
37575
37576 nomem_free:
37577 kfree(op);
37578 nomem:
37579 - fscache_stat(&fscache_n_stores_oom);
37580 + fscache_stat_unchecked(&fscache_n_stores_oom);
37581 _leave(" = -ENOMEM");
37582 return -ENOMEM;
37583 }
37584 @@ -889,7 +889,7 @@ void __fscache_uncache_page(struct fscac
37585 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37586 ASSERTCMP(page, !=, NULL);
37587
37588 - fscache_stat(&fscache_n_uncaches);
37589 + fscache_stat_unchecked(&fscache_n_uncaches);
37590
37591 /* cache withdrawal may beat us to it */
37592 if (!PageFsCache(page))
37593 @@ -942,7 +942,7 @@ void fscache_mark_pages_cached(struct fs
37594 unsigned long loop;
37595
37596 #ifdef CONFIG_FSCACHE_STATS
37597 - atomic_add(pagevec->nr, &fscache_n_marks);
37598 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
37599 #endif
37600
37601 for (loop = 0; loop < pagevec->nr; loop++) {
37602 diff -urNp linux-2.6.39.4/fs/fscache/stats.c linux-2.6.39.4/fs/fscache/stats.c
37603 --- linux-2.6.39.4/fs/fscache/stats.c 2011-05-19 00:06:34.000000000 -0400
37604 +++ linux-2.6.39.4/fs/fscache/stats.c 2011-08-05 19:44:37.000000000 -0400
37605 @@ -18,95 +18,95 @@
37606 /*
37607 * operation counters
37608 */
37609 -atomic_t fscache_n_op_pend;
37610 -atomic_t fscache_n_op_run;
37611 -atomic_t fscache_n_op_enqueue;
37612 -atomic_t fscache_n_op_requeue;
37613 -atomic_t fscache_n_op_deferred_release;
37614 -atomic_t fscache_n_op_release;
37615 -atomic_t fscache_n_op_gc;
37616 -atomic_t fscache_n_op_cancelled;
37617 -atomic_t fscache_n_op_rejected;
37618 -
37619 -atomic_t fscache_n_attr_changed;
37620 -atomic_t fscache_n_attr_changed_ok;
37621 -atomic_t fscache_n_attr_changed_nobufs;
37622 -atomic_t fscache_n_attr_changed_nomem;
37623 -atomic_t fscache_n_attr_changed_calls;
37624 -
37625 -atomic_t fscache_n_allocs;
37626 -atomic_t fscache_n_allocs_ok;
37627 -atomic_t fscache_n_allocs_wait;
37628 -atomic_t fscache_n_allocs_nobufs;
37629 -atomic_t fscache_n_allocs_intr;
37630 -atomic_t fscache_n_allocs_object_dead;
37631 -atomic_t fscache_n_alloc_ops;
37632 -atomic_t fscache_n_alloc_op_waits;
37633 -
37634 -atomic_t fscache_n_retrievals;
37635 -atomic_t fscache_n_retrievals_ok;
37636 -atomic_t fscache_n_retrievals_wait;
37637 -atomic_t fscache_n_retrievals_nodata;
37638 -atomic_t fscache_n_retrievals_nobufs;
37639 -atomic_t fscache_n_retrievals_intr;
37640 -atomic_t fscache_n_retrievals_nomem;
37641 -atomic_t fscache_n_retrievals_object_dead;
37642 -atomic_t fscache_n_retrieval_ops;
37643 -atomic_t fscache_n_retrieval_op_waits;
37644 -
37645 -atomic_t fscache_n_stores;
37646 -atomic_t fscache_n_stores_ok;
37647 -atomic_t fscache_n_stores_again;
37648 -atomic_t fscache_n_stores_nobufs;
37649 -atomic_t fscache_n_stores_oom;
37650 -atomic_t fscache_n_store_ops;
37651 -atomic_t fscache_n_store_calls;
37652 -atomic_t fscache_n_store_pages;
37653 -atomic_t fscache_n_store_radix_deletes;
37654 -atomic_t fscache_n_store_pages_over_limit;
37655 -
37656 -atomic_t fscache_n_store_vmscan_not_storing;
37657 -atomic_t fscache_n_store_vmscan_gone;
37658 -atomic_t fscache_n_store_vmscan_busy;
37659 -atomic_t fscache_n_store_vmscan_cancelled;
37660 -
37661 -atomic_t fscache_n_marks;
37662 -atomic_t fscache_n_uncaches;
37663 -
37664 -atomic_t fscache_n_acquires;
37665 -atomic_t fscache_n_acquires_null;
37666 -atomic_t fscache_n_acquires_no_cache;
37667 -atomic_t fscache_n_acquires_ok;
37668 -atomic_t fscache_n_acquires_nobufs;
37669 -atomic_t fscache_n_acquires_oom;
37670 -
37671 -atomic_t fscache_n_updates;
37672 -atomic_t fscache_n_updates_null;
37673 -atomic_t fscache_n_updates_run;
37674 -
37675 -atomic_t fscache_n_relinquishes;
37676 -atomic_t fscache_n_relinquishes_null;
37677 -atomic_t fscache_n_relinquishes_waitcrt;
37678 -atomic_t fscache_n_relinquishes_retire;
37679 -
37680 -atomic_t fscache_n_cookie_index;
37681 -atomic_t fscache_n_cookie_data;
37682 -atomic_t fscache_n_cookie_special;
37683 -
37684 -atomic_t fscache_n_object_alloc;
37685 -atomic_t fscache_n_object_no_alloc;
37686 -atomic_t fscache_n_object_lookups;
37687 -atomic_t fscache_n_object_lookups_negative;
37688 -atomic_t fscache_n_object_lookups_positive;
37689 -atomic_t fscache_n_object_lookups_timed_out;
37690 -atomic_t fscache_n_object_created;
37691 -atomic_t fscache_n_object_avail;
37692 -atomic_t fscache_n_object_dead;
37693 -
37694 -atomic_t fscache_n_checkaux_none;
37695 -atomic_t fscache_n_checkaux_okay;
37696 -atomic_t fscache_n_checkaux_update;
37697 -atomic_t fscache_n_checkaux_obsolete;
37698 +atomic_unchecked_t fscache_n_op_pend;
37699 +atomic_unchecked_t fscache_n_op_run;
37700 +atomic_unchecked_t fscache_n_op_enqueue;
37701 +atomic_unchecked_t fscache_n_op_requeue;
37702 +atomic_unchecked_t fscache_n_op_deferred_release;
37703 +atomic_unchecked_t fscache_n_op_release;
37704 +atomic_unchecked_t fscache_n_op_gc;
37705 +atomic_unchecked_t fscache_n_op_cancelled;
37706 +atomic_unchecked_t fscache_n_op_rejected;
37707 +
37708 +atomic_unchecked_t fscache_n_attr_changed;
37709 +atomic_unchecked_t fscache_n_attr_changed_ok;
37710 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
37711 +atomic_unchecked_t fscache_n_attr_changed_nomem;
37712 +atomic_unchecked_t fscache_n_attr_changed_calls;
37713 +
37714 +atomic_unchecked_t fscache_n_allocs;
37715 +atomic_unchecked_t fscache_n_allocs_ok;
37716 +atomic_unchecked_t fscache_n_allocs_wait;
37717 +atomic_unchecked_t fscache_n_allocs_nobufs;
37718 +atomic_unchecked_t fscache_n_allocs_intr;
37719 +atomic_unchecked_t fscache_n_allocs_object_dead;
37720 +atomic_unchecked_t fscache_n_alloc_ops;
37721 +atomic_unchecked_t fscache_n_alloc_op_waits;
37722 +
37723 +atomic_unchecked_t fscache_n_retrievals;
37724 +atomic_unchecked_t fscache_n_retrievals_ok;
37725 +atomic_unchecked_t fscache_n_retrievals_wait;
37726 +atomic_unchecked_t fscache_n_retrievals_nodata;
37727 +atomic_unchecked_t fscache_n_retrievals_nobufs;
37728 +atomic_unchecked_t fscache_n_retrievals_intr;
37729 +atomic_unchecked_t fscache_n_retrievals_nomem;
37730 +atomic_unchecked_t fscache_n_retrievals_object_dead;
37731 +atomic_unchecked_t fscache_n_retrieval_ops;
37732 +atomic_unchecked_t fscache_n_retrieval_op_waits;
37733 +
37734 +atomic_unchecked_t fscache_n_stores;
37735 +atomic_unchecked_t fscache_n_stores_ok;
37736 +atomic_unchecked_t fscache_n_stores_again;
37737 +atomic_unchecked_t fscache_n_stores_nobufs;
37738 +atomic_unchecked_t fscache_n_stores_oom;
37739 +atomic_unchecked_t fscache_n_store_ops;
37740 +atomic_unchecked_t fscache_n_store_calls;
37741 +atomic_unchecked_t fscache_n_store_pages;
37742 +atomic_unchecked_t fscache_n_store_radix_deletes;
37743 +atomic_unchecked_t fscache_n_store_pages_over_limit;
37744 +
37745 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
37746 +atomic_unchecked_t fscache_n_store_vmscan_gone;
37747 +atomic_unchecked_t fscache_n_store_vmscan_busy;
37748 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
37749 +
37750 +atomic_unchecked_t fscache_n_marks;
37751 +atomic_unchecked_t fscache_n_uncaches;
37752 +
37753 +atomic_unchecked_t fscache_n_acquires;
37754 +atomic_unchecked_t fscache_n_acquires_null;
37755 +atomic_unchecked_t fscache_n_acquires_no_cache;
37756 +atomic_unchecked_t fscache_n_acquires_ok;
37757 +atomic_unchecked_t fscache_n_acquires_nobufs;
37758 +atomic_unchecked_t fscache_n_acquires_oom;
37759 +
37760 +atomic_unchecked_t fscache_n_updates;
37761 +atomic_unchecked_t fscache_n_updates_null;
37762 +atomic_unchecked_t fscache_n_updates_run;
37763 +
37764 +atomic_unchecked_t fscache_n_relinquishes;
37765 +atomic_unchecked_t fscache_n_relinquishes_null;
37766 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
37767 +atomic_unchecked_t fscache_n_relinquishes_retire;
37768 +
37769 +atomic_unchecked_t fscache_n_cookie_index;
37770 +atomic_unchecked_t fscache_n_cookie_data;
37771 +atomic_unchecked_t fscache_n_cookie_special;
37772 +
37773 +atomic_unchecked_t fscache_n_object_alloc;
37774 +atomic_unchecked_t fscache_n_object_no_alloc;
37775 +atomic_unchecked_t fscache_n_object_lookups;
37776 +atomic_unchecked_t fscache_n_object_lookups_negative;
37777 +atomic_unchecked_t fscache_n_object_lookups_positive;
37778 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
37779 +atomic_unchecked_t fscache_n_object_created;
37780 +atomic_unchecked_t fscache_n_object_avail;
37781 +atomic_unchecked_t fscache_n_object_dead;
37782 +
37783 +atomic_unchecked_t fscache_n_checkaux_none;
37784 +atomic_unchecked_t fscache_n_checkaux_okay;
37785 +atomic_unchecked_t fscache_n_checkaux_update;
37786 +atomic_unchecked_t fscache_n_checkaux_obsolete;
37787
37788 atomic_t fscache_n_cop_alloc_object;
37789 atomic_t fscache_n_cop_lookup_object;
37790 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
37791 seq_puts(m, "FS-Cache statistics\n");
37792
37793 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
37794 - atomic_read(&fscache_n_cookie_index),
37795 - atomic_read(&fscache_n_cookie_data),
37796 - atomic_read(&fscache_n_cookie_special));
37797 + atomic_read_unchecked(&fscache_n_cookie_index),
37798 + atomic_read_unchecked(&fscache_n_cookie_data),
37799 + atomic_read_unchecked(&fscache_n_cookie_special));
37800
37801 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
37802 - atomic_read(&fscache_n_object_alloc),
37803 - atomic_read(&fscache_n_object_no_alloc),
37804 - atomic_read(&fscache_n_object_avail),
37805 - atomic_read(&fscache_n_object_dead));
37806 + atomic_read_unchecked(&fscache_n_object_alloc),
37807 + atomic_read_unchecked(&fscache_n_object_no_alloc),
37808 + atomic_read_unchecked(&fscache_n_object_avail),
37809 + atomic_read_unchecked(&fscache_n_object_dead));
37810 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
37811 - atomic_read(&fscache_n_checkaux_none),
37812 - atomic_read(&fscache_n_checkaux_okay),
37813 - atomic_read(&fscache_n_checkaux_update),
37814 - atomic_read(&fscache_n_checkaux_obsolete));
37815 + atomic_read_unchecked(&fscache_n_checkaux_none),
37816 + atomic_read_unchecked(&fscache_n_checkaux_okay),
37817 + atomic_read_unchecked(&fscache_n_checkaux_update),
37818 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
37819
37820 seq_printf(m, "Pages : mrk=%u unc=%u\n",
37821 - atomic_read(&fscache_n_marks),
37822 - atomic_read(&fscache_n_uncaches));
37823 + atomic_read_unchecked(&fscache_n_marks),
37824 + atomic_read_unchecked(&fscache_n_uncaches));
37825
37826 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
37827 " oom=%u\n",
37828 - atomic_read(&fscache_n_acquires),
37829 - atomic_read(&fscache_n_acquires_null),
37830 - atomic_read(&fscache_n_acquires_no_cache),
37831 - atomic_read(&fscache_n_acquires_ok),
37832 - atomic_read(&fscache_n_acquires_nobufs),
37833 - atomic_read(&fscache_n_acquires_oom));
37834 + atomic_read_unchecked(&fscache_n_acquires),
37835 + atomic_read_unchecked(&fscache_n_acquires_null),
37836 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
37837 + atomic_read_unchecked(&fscache_n_acquires_ok),
37838 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
37839 + atomic_read_unchecked(&fscache_n_acquires_oom));
37840
37841 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
37842 - atomic_read(&fscache_n_object_lookups),
37843 - atomic_read(&fscache_n_object_lookups_negative),
37844 - atomic_read(&fscache_n_object_lookups_positive),
37845 - atomic_read(&fscache_n_object_created),
37846 - atomic_read(&fscache_n_object_lookups_timed_out));
37847 + atomic_read_unchecked(&fscache_n_object_lookups),
37848 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
37849 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
37850 + atomic_read_unchecked(&fscache_n_object_created),
37851 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
37852
37853 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
37854 - atomic_read(&fscache_n_updates),
37855 - atomic_read(&fscache_n_updates_null),
37856 - atomic_read(&fscache_n_updates_run));
37857 + atomic_read_unchecked(&fscache_n_updates),
37858 + atomic_read_unchecked(&fscache_n_updates_null),
37859 + atomic_read_unchecked(&fscache_n_updates_run));
37860
37861 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
37862 - atomic_read(&fscache_n_relinquishes),
37863 - atomic_read(&fscache_n_relinquishes_null),
37864 - atomic_read(&fscache_n_relinquishes_waitcrt),
37865 - atomic_read(&fscache_n_relinquishes_retire));
37866 + atomic_read_unchecked(&fscache_n_relinquishes),
37867 + atomic_read_unchecked(&fscache_n_relinquishes_null),
37868 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
37869 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
37870
37871 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
37872 - atomic_read(&fscache_n_attr_changed),
37873 - atomic_read(&fscache_n_attr_changed_ok),
37874 - atomic_read(&fscache_n_attr_changed_nobufs),
37875 - atomic_read(&fscache_n_attr_changed_nomem),
37876 - atomic_read(&fscache_n_attr_changed_calls));
37877 + atomic_read_unchecked(&fscache_n_attr_changed),
37878 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
37879 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
37880 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
37881 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
37882
37883 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
37884 - atomic_read(&fscache_n_allocs),
37885 - atomic_read(&fscache_n_allocs_ok),
37886 - atomic_read(&fscache_n_allocs_wait),
37887 - atomic_read(&fscache_n_allocs_nobufs),
37888 - atomic_read(&fscache_n_allocs_intr));
37889 + atomic_read_unchecked(&fscache_n_allocs),
37890 + atomic_read_unchecked(&fscache_n_allocs_ok),
37891 + atomic_read_unchecked(&fscache_n_allocs_wait),
37892 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
37893 + atomic_read_unchecked(&fscache_n_allocs_intr));
37894 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
37895 - atomic_read(&fscache_n_alloc_ops),
37896 - atomic_read(&fscache_n_alloc_op_waits),
37897 - atomic_read(&fscache_n_allocs_object_dead));
37898 + atomic_read_unchecked(&fscache_n_alloc_ops),
37899 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
37900 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
37901
37902 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
37903 " int=%u oom=%u\n",
37904 - atomic_read(&fscache_n_retrievals),
37905 - atomic_read(&fscache_n_retrievals_ok),
37906 - atomic_read(&fscache_n_retrievals_wait),
37907 - atomic_read(&fscache_n_retrievals_nodata),
37908 - atomic_read(&fscache_n_retrievals_nobufs),
37909 - atomic_read(&fscache_n_retrievals_intr),
37910 - atomic_read(&fscache_n_retrievals_nomem));
37911 + atomic_read_unchecked(&fscache_n_retrievals),
37912 + atomic_read_unchecked(&fscache_n_retrievals_ok),
37913 + atomic_read_unchecked(&fscache_n_retrievals_wait),
37914 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
37915 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
37916 + atomic_read_unchecked(&fscache_n_retrievals_intr),
37917 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
37918 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
37919 - atomic_read(&fscache_n_retrieval_ops),
37920 - atomic_read(&fscache_n_retrieval_op_waits),
37921 - atomic_read(&fscache_n_retrievals_object_dead));
37922 + atomic_read_unchecked(&fscache_n_retrieval_ops),
37923 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
37924 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
37925
37926 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
37927 - atomic_read(&fscache_n_stores),
37928 - atomic_read(&fscache_n_stores_ok),
37929 - atomic_read(&fscache_n_stores_again),
37930 - atomic_read(&fscache_n_stores_nobufs),
37931 - atomic_read(&fscache_n_stores_oom));
37932 + atomic_read_unchecked(&fscache_n_stores),
37933 + atomic_read_unchecked(&fscache_n_stores_ok),
37934 + atomic_read_unchecked(&fscache_n_stores_again),
37935 + atomic_read_unchecked(&fscache_n_stores_nobufs),
37936 + atomic_read_unchecked(&fscache_n_stores_oom));
37937 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
37938 - atomic_read(&fscache_n_store_ops),
37939 - atomic_read(&fscache_n_store_calls),
37940 - atomic_read(&fscache_n_store_pages),
37941 - atomic_read(&fscache_n_store_radix_deletes),
37942 - atomic_read(&fscache_n_store_pages_over_limit));
37943 + atomic_read_unchecked(&fscache_n_store_ops),
37944 + atomic_read_unchecked(&fscache_n_store_calls),
37945 + atomic_read_unchecked(&fscache_n_store_pages),
37946 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
37947 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
37948
37949 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
37950 - atomic_read(&fscache_n_store_vmscan_not_storing),
37951 - atomic_read(&fscache_n_store_vmscan_gone),
37952 - atomic_read(&fscache_n_store_vmscan_busy),
37953 - atomic_read(&fscache_n_store_vmscan_cancelled));
37954 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
37955 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
37956 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
37957 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
37958
37959 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
37960 - atomic_read(&fscache_n_op_pend),
37961 - atomic_read(&fscache_n_op_run),
37962 - atomic_read(&fscache_n_op_enqueue),
37963 - atomic_read(&fscache_n_op_cancelled),
37964 - atomic_read(&fscache_n_op_rejected));
37965 + atomic_read_unchecked(&fscache_n_op_pend),
37966 + atomic_read_unchecked(&fscache_n_op_run),
37967 + atomic_read_unchecked(&fscache_n_op_enqueue),
37968 + atomic_read_unchecked(&fscache_n_op_cancelled),
37969 + atomic_read_unchecked(&fscache_n_op_rejected));
37970 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
37971 - atomic_read(&fscache_n_op_deferred_release),
37972 - atomic_read(&fscache_n_op_release),
37973 - atomic_read(&fscache_n_op_gc));
37974 + atomic_read_unchecked(&fscache_n_op_deferred_release),
37975 + atomic_read_unchecked(&fscache_n_op_release),
37976 + atomic_read_unchecked(&fscache_n_op_gc));
37977
37978 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
37979 atomic_read(&fscache_n_cop_alloc_object),
37980 diff -urNp linux-2.6.39.4/fs/fs_struct.c linux-2.6.39.4/fs/fs_struct.c
37981 --- linux-2.6.39.4/fs/fs_struct.c 2011-05-19 00:06:34.000000000 -0400
37982 +++ linux-2.6.39.4/fs/fs_struct.c 2011-08-05 19:44:37.000000000 -0400
37983 @@ -4,6 +4,7 @@
37984 #include <linux/path.h>
37985 #include <linux/slab.h>
37986 #include <linux/fs_struct.h>
37987 +#include <linux/grsecurity.h>
37988 #include "internal.h"
37989
37990 static inline void path_get_longterm(struct path *path)
37991 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
37992 old_root = fs->root;
37993 fs->root = *path;
37994 path_get_longterm(path);
37995 + gr_set_chroot_entries(current, path);
37996 write_seqcount_end(&fs->seq);
37997 spin_unlock(&fs->lock);
37998 if (old_root.dentry)
37999 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
38000 && fs->root.mnt == old_root->mnt) {
38001 path_get_longterm(new_root);
38002 fs->root = *new_root;
38003 + gr_set_chroot_entries(p, new_root);
38004 count++;
38005 }
38006 if (fs->pwd.dentry == old_root->dentry
38007 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
38008 spin_lock(&fs->lock);
38009 write_seqcount_begin(&fs->seq);
38010 tsk->fs = NULL;
38011 - kill = !--fs->users;
38012 + gr_clear_chroot_entries(tsk);
38013 + kill = !atomic_dec_return(&fs->users);
38014 write_seqcount_end(&fs->seq);
38015 spin_unlock(&fs->lock);
38016 task_unlock(tsk);
38017 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
38018 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
38019 /* We don't need to lock fs - think why ;-) */
38020 if (fs) {
38021 - fs->users = 1;
38022 + atomic_set(&fs->users, 1);
38023 fs->in_exec = 0;
38024 spin_lock_init(&fs->lock);
38025 seqcount_init(&fs->seq);
38026 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
38027 spin_lock(&old->lock);
38028 fs->root = old->root;
38029 path_get_longterm(&fs->root);
38030 + /* instead of calling gr_set_chroot_entries here,
38031 + we call it from every caller of this function
38032 + */
38033 fs->pwd = old->pwd;
38034 path_get_longterm(&fs->pwd);
38035 spin_unlock(&old->lock);
38036 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
38037
38038 task_lock(current);
38039 spin_lock(&fs->lock);
38040 - kill = !--fs->users;
38041 + kill = !atomic_dec_return(&fs->users);
38042 current->fs = new_fs;
38043 + gr_set_chroot_entries(current, &new_fs->root);
38044 spin_unlock(&fs->lock);
38045 task_unlock(current);
38046
38047 @@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
38048
38049 /* to be mentioned only in INIT_TASK */
38050 struct fs_struct init_fs = {
38051 - .users = 1,
38052 + .users = ATOMIC_INIT(1),
38053 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
38054 .seq = SEQCNT_ZERO,
38055 .umask = 0022,
38056 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
38057 task_lock(current);
38058
38059 spin_lock(&init_fs.lock);
38060 - init_fs.users++;
38061 + atomic_inc(&init_fs.users);
38062 spin_unlock(&init_fs.lock);
38063
38064 spin_lock(&fs->lock);
38065 current->fs = &init_fs;
38066 - kill = !--fs->users;
38067 + gr_set_chroot_entries(current, &current->fs->root);
38068 + kill = !atomic_dec_return(&fs->users);
38069 spin_unlock(&fs->lock);
38070
38071 task_unlock(current);
38072 diff -urNp linux-2.6.39.4/fs/fuse/cuse.c linux-2.6.39.4/fs/fuse/cuse.c
38073 --- linux-2.6.39.4/fs/fuse/cuse.c 2011-05-19 00:06:34.000000000 -0400
38074 +++ linux-2.6.39.4/fs/fuse/cuse.c 2011-08-05 20:34:06.000000000 -0400
38075 @@ -586,10 +586,12 @@ static int __init cuse_init(void)
38076 INIT_LIST_HEAD(&cuse_conntbl[i]);
38077
38078 /* inherit and extend fuse_dev_operations */
38079 - cuse_channel_fops = fuse_dev_operations;
38080 - cuse_channel_fops.owner = THIS_MODULE;
38081 - cuse_channel_fops.open = cuse_channel_open;
38082 - cuse_channel_fops.release = cuse_channel_release;
38083 + pax_open_kernel();
38084 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
38085 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
38086 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
38087 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
38088 + pax_close_kernel();
38089
38090 cuse_class = class_create(THIS_MODULE, "cuse");
38091 if (IS_ERR(cuse_class))
38092 diff -urNp linux-2.6.39.4/fs/fuse/dev.c linux-2.6.39.4/fs/fuse/dev.c
38093 --- linux-2.6.39.4/fs/fuse/dev.c 2011-05-19 00:06:34.000000000 -0400
38094 +++ linux-2.6.39.4/fs/fuse/dev.c 2011-08-05 20:34:06.000000000 -0400
38095 @@ -1238,7 +1238,7 @@ static ssize_t fuse_dev_splice_read(stru
38096 ret = 0;
38097 pipe_lock(pipe);
38098
38099 - if (!pipe->readers) {
38100 + if (!atomic_read(&pipe->readers)) {
38101 send_sig(SIGPIPE, current, 0);
38102 if (!ret)
38103 ret = -EPIPE;
38104 diff -urNp linux-2.6.39.4/fs/fuse/dir.c linux-2.6.39.4/fs/fuse/dir.c
38105 --- linux-2.6.39.4/fs/fuse/dir.c 2011-05-19 00:06:34.000000000 -0400
38106 +++ linux-2.6.39.4/fs/fuse/dir.c 2011-08-05 19:44:37.000000000 -0400
38107 @@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *de
38108 return link;
38109 }
38110
38111 -static void free_link(char *link)
38112 +static void free_link(const char *link)
38113 {
38114 if (!IS_ERR(link))
38115 free_page((unsigned long) link);
38116 diff -urNp linux-2.6.39.4/fs/gfs2/ops_inode.c linux-2.6.39.4/fs/gfs2/ops_inode.c
38117 --- linux-2.6.39.4/fs/gfs2/ops_inode.c 2011-05-19 00:06:34.000000000 -0400
38118 +++ linux-2.6.39.4/fs/gfs2/ops_inode.c 2011-08-05 19:44:37.000000000 -0400
38119 @@ -740,6 +740,8 @@ static int gfs2_rename(struct inode *odi
38120 unsigned int x;
38121 int error;
38122
38123 + pax_track_stack();
38124 +
38125 if (ndentry->d_inode) {
38126 nip = GFS2_I(ndentry->d_inode);
38127 if (ip == nip)
38128 @@ -1019,7 +1021,7 @@ out:
38129
38130 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
38131 {
38132 - char *s = nd_get_link(nd);
38133 + const char *s = nd_get_link(nd);
38134 if (!IS_ERR(s))
38135 kfree(s);
38136 }
38137 diff -urNp linux-2.6.39.4/fs/hfsplus/catalog.c linux-2.6.39.4/fs/hfsplus/catalog.c
38138 --- linux-2.6.39.4/fs/hfsplus/catalog.c 2011-05-19 00:06:34.000000000 -0400
38139 +++ linux-2.6.39.4/fs/hfsplus/catalog.c 2011-08-05 19:44:37.000000000 -0400
38140 @@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
38141 int err;
38142 u16 type;
38143
38144 + pax_track_stack();
38145 +
38146 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
38147 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
38148 if (err)
38149 @@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
38150 int entry_size;
38151 int err;
38152
38153 + pax_track_stack();
38154 +
38155 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
38156 str->name, cnid, inode->i_nlink);
38157 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
38158 @@ -349,6 +353,8 @@ int hfsplus_rename_cat(u32 cnid,
38159 int entry_size, type;
38160 int err = 0;
38161
38162 + pax_track_stack();
38163 +
38164 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
38165 cnid, src_dir->i_ino, src_name->name,
38166 dst_dir->i_ino, dst_name->name);
38167 diff -urNp linux-2.6.39.4/fs/hfsplus/dir.c linux-2.6.39.4/fs/hfsplus/dir.c
38168 --- linux-2.6.39.4/fs/hfsplus/dir.c 2011-05-19 00:06:34.000000000 -0400
38169 +++ linux-2.6.39.4/fs/hfsplus/dir.c 2011-08-05 19:44:37.000000000 -0400
38170 @@ -129,6 +129,8 @@ static int hfsplus_readdir(struct file *
38171 struct hfsplus_readdir_data *rd;
38172 u16 type;
38173
38174 + pax_track_stack();
38175 +
38176 if (filp->f_pos >= inode->i_size)
38177 return 0;
38178
38179 diff -urNp linux-2.6.39.4/fs/hfsplus/inode.c linux-2.6.39.4/fs/hfsplus/inode.c
38180 --- linux-2.6.39.4/fs/hfsplus/inode.c 2011-05-19 00:06:34.000000000 -0400
38181 +++ linux-2.6.39.4/fs/hfsplus/inode.c 2011-08-05 19:44:37.000000000 -0400
38182 @@ -489,6 +489,8 @@ int hfsplus_cat_read_inode(struct inode
38183 int res = 0;
38184 u16 type;
38185
38186 + pax_track_stack();
38187 +
38188 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
38189
38190 HFSPLUS_I(inode)->linkid = 0;
38191 @@ -552,6 +554,8 @@ int hfsplus_cat_write_inode(struct inode
38192 struct hfs_find_data fd;
38193 hfsplus_cat_entry entry;
38194
38195 + pax_track_stack();
38196 +
38197 if (HFSPLUS_IS_RSRC(inode))
38198 main_inode = HFSPLUS_I(inode)->rsrc_inode;
38199
38200 diff -urNp linux-2.6.39.4/fs/hfsplus/ioctl.c linux-2.6.39.4/fs/hfsplus/ioctl.c
38201 --- linux-2.6.39.4/fs/hfsplus/ioctl.c 2011-05-19 00:06:34.000000000 -0400
38202 +++ linux-2.6.39.4/fs/hfsplus/ioctl.c 2011-08-05 19:44:37.000000000 -0400
38203 @@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
38204 struct hfsplus_cat_file *file;
38205 int res;
38206
38207 + pax_track_stack();
38208 +
38209 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
38210 return -EOPNOTSUPP;
38211
38212 @@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
38213 struct hfsplus_cat_file *file;
38214 ssize_t res = 0;
38215
38216 + pax_track_stack();
38217 +
38218 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
38219 return -EOPNOTSUPP;
38220
38221 diff -urNp linux-2.6.39.4/fs/hfsplus/super.c linux-2.6.39.4/fs/hfsplus/super.c
38222 --- linux-2.6.39.4/fs/hfsplus/super.c 2011-05-19 00:06:34.000000000 -0400
38223 +++ linux-2.6.39.4/fs/hfsplus/super.c 2011-08-05 19:44:37.000000000 -0400
38224 @@ -340,6 +340,8 @@ static int hfsplus_fill_super(struct sup
38225 struct nls_table *nls = NULL;
38226 int err;
38227
38228 + pax_track_stack();
38229 +
38230 err = -EINVAL;
38231 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
38232 if (!sbi)
38233 diff -urNp linux-2.6.39.4/fs/hugetlbfs/inode.c linux-2.6.39.4/fs/hugetlbfs/inode.c
38234 --- linux-2.6.39.4/fs/hugetlbfs/inode.c 2011-05-19 00:06:34.000000000 -0400
38235 +++ linux-2.6.39.4/fs/hugetlbfs/inode.c 2011-08-05 19:44:37.000000000 -0400
38236 @@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs
38237 .kill_sb = kill_litter_super,
38238 };
38239
38240 -static struct vfsmount *hugetlbfs_vfsmount;
38241 +struct vfsmount *hugetlbfs_vfsmount;
38242
38243 static int can_do_hugetlb_shm(void)
38244 {
38245 diff -urNp linux-2.6.39.4/fs/inode.c linux-2.6.39.4/fs/inode.c
38246 --- linux-2.6.39.4/fs/inode.c 2011-05-19 00:06:34.000000000 -0400
38247 +++ linux-2.6.39.4/fs/inode.c 2011-08-05 19:44:37.000000000 -0400
38248 @@ -862,8 +862,8 @@ unsigned int get_next_ino(void)
38249
38250 #ifdef CONFIG_SMP
38251 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
38252 - static atomic_t shared_last_ino;
38253 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
38254 + static atomic_unchecked_t shared_last_ino;
38255 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
38256
38257 res = next - LAST_INO_BATCH;
38258 }
38259 diff -urNp linux-2.6.39.4/fs/jbd/checkpoint.c linux-2.6.39.4/fs/jbd/checkpoint.c
38260 --- linux-2.6.39.4/fs/jbd/checkpoint.c 2011-05-19 00:06:34.000000000 -0400
38261 +++ linux-2.6.39.4/fs/jbd/checkpoint.c 2011-08-05 19:44:37.000000000 -0400
38262 @@ -350,6 +350,8 @@ int log_do_checkpoint(journal_t *journal
38263 tid_t this_tid;
38264 int result;
38265
38266 + pax_track_stack();
38267 +
38268 jbd_debug(1, "Start checkpoint\n");
38269
38270 /*
38271 diff -urNp linux-2.6.39.4/fs/jffs2/compr_rtime.c linux-2.6.39.4/fs/jffs2/compr_rtime.c
38272 --- linux-2.6.39.4/fs/jffs2/compr_rtime.c 2011-05-19 00:06:34.000000000 -0400
38273 +++ linux-2.6.39.4/fs/jffs2/compr_rtime.c 2011-08-05 19:44:37.000000000 -0400
38274 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
38275 int outpos = 0;
38276 int pos=0;
38277
38278 + pax_track_stack();
38279 +
38280 memset(positions,0,sizeof(positions));
38281
38282 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
38283 @@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
38284 int outpos = 0;
38285 int pos=0;
38286
38287 + pax_track_stack();
38288 +
38289 memset(positions,0,sizeof(positions));
38290
38291 while (outpos<destlen) {
38292 diff -urNp linux-2.6.39.4/fs/jffs2/compr_rubin.c linux-2.6.39.4/fs/jffs2/compr_rubin.c
38293 --- linux-2.6.39.4/fs/jffs2/compr_rubin.c 2011-05-19 00:06:34.000000000 -0400
38294 +++ linux-2.6.39.4/fs/jffs2/compr_rubin.c 2011-08-05 19:44:37.000000000 -0400
38295 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
38296 int ret;
38297 uint32_t mysrclen, mydstlen;
38298
38299 + pax_track_stack();
38300 +
38301 mysrclen = *sourcelen;
38302 mydstlen = *dstlen - 8;
38303
38304 diff -urNp linux-2.6.39.4/fs/jffs2/erase.c linux-2.6.39.4/fs/jffs2/erase.c
38305 --- linux-2.6.39.4/fs/jffs2/erase.c 2011-05-19 00:06:34.000000000 -0400
38306 +++ linux-2.6.39.4/fs/jffs2/erase.c 2011-08-05 19:44:37.000000000 -0400
38307 @@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
38308 struct jffs2_unknown_node marker = {
38309 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
38310 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
38311 - .totlen = cpu_to_je32(c->cleanmarker_size)
38312 + .totlen = cpu_to_je32(c->cleanmarker_size),
38313 + .hdr_crc = cpu_to_je32(0)
38314 };
38315
38316 jffs2_prealloc_raw_node_refs(c, jeb, 1);
38317 diff -urNp linux-2.6.39.4/fs/jffs2/wbuf.c linux-2.6.39.4/fs/jffs2/wbuf.c
38318 --- linux-2.6.39.4/fs/jffs2/wbuf.c 2011-05-19 00:06:34.000000000 -0400
38319 +++ linux-2.6.39.4/fs/jffs2/wbuf.c 2011-08-05 19:44:37.000000000 -0400
38320 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
38321 {
38322 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
38323 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
38324 - .totlen = constant_cpu_to_je32(8)
38325 + .totlen = constant_cpu_to_je32(8),
38326 + .hdr_crc = constant_cpu_to_je32(0)
38327 };
38328
38329 /*
38330 diff -urNp linux-2.6.39.4/fs/jffs2/xattr.c linux-2.6.39.4/fs/jffs2/xattr.c
38331 --- linux-2.6.39.4/fs/jffs2/xattr.c 2011-05-19 00:06:34.000000000 -0400
38332 +++ linux-2.6.39.4/fs/jffs2/xattr.c 2011-08-05 19:44:37.000000000 -0400
38333 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
38334
38335 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
38336
38337 + pax_track_stack();
38338 +
38339 /* Phase.1 : Merge same xref */
38340 for (i=0; i < XREF_TMPHASH_SIZE; i++)
38341 xref_tmphash[i] = NULL;
38342 diff -urNp linux-2.6.39.4/fs/jfs/super.c linux-2.6.39.4/fs/jfs/super.c
38343 --- linux-2.6.39.4/fs/jfs/super.c 2011-05-19 00:06:34.000000000 -0400
38344 +++ linux-2.6.39.4/fs/jfs/super.c 2011-08-05 19:44:37.000000000 -0400
38345 @@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
38346
38347 jfs_inode_cachep =
38348 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
38349 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
38350 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
38351 init_once);
38352 if (jfs_inode_cachep == NULL)
38353 return -ENOMEM;
38354 diff -urNp linux-2.6.39.4/fs/Kconfig.binfmt linux-2.6.39.4/fs/Kconfig.binfmt
38355 --- linux-2.6.39.4/fs/Kconfig.binfmt 2011-05-19 00:06:34.000000000 -0400
38356 +++ linux-2.6.39.4/fs/Kconfig.binfmt 2011-08-05 19:44:37.000000000 -0400
38357 @@ -86,7 +86,7 @@ config HAVE_AOUT
38358
38359 config BINFMT_AOUT
38360 tristate "Kernel support for a.out and ECOFF binaries"
38361 - depends on HAVE_AOUT
38362 + depends on HAVE_AOUT && BROKEN
38363 ---help---
38364 A.out (Assembler.OUTput) is a set of formats for libraries and
38365 executables used in the earliest versions of UNIX. Linux used
38366 diff -urNp linux-2.6.39.4/fs/libfs.c linux-2.6.39.4/fs/libfs.c
38367 --- linux-2.6.39.4/fs/libfs.c 2011-05-19 00:06:34.000000000 -0400
38368 +++ linux-2.6.39.4/fs/libfs.c 2011-08-05 19:44:37.000000000 -0400
38369 @@ -163,6 +163,9 @@ int dcache_readdir(struct file * filp, v
38370
38371 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
38372 struct dentry *next;
38373 + char d_name[sizeof(next->d_iname)];
38374 + const unsigned char *name;
38375 +
38376 next = list_entry(p, struct dentry, d_u.d_child);
38377 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
38378 if (!simple_positive(next)) {
38379 @@ -172,7 +175,12 @@ int dcache_readdir(struct file * filp, v
38380
38381 spin_unlock(&next->d_lock);
38382 spin_unlock(&dentry->d_lock);
38383 - if (filldir(dirent, next->d_name.name,
38384 + name = next->d_name.name;
38385 + if (name == next->d_iname) {
38386 + memcpy(d_name, name, next->d_name.len);
38387 + name = d_name;
38388 + }
38389 + if (filldir(dirent, name,
38390 next->d_name.len, filp->f_pos,
38391 next->d_inode->i_ino,
38392 dt_type(next->d_inode)) < 0)
38393 diff -urNp linux-2.6.39.4/fs/lockd/clntproc.c linux-2.6.39.4/fs/lockd/clntproc.c
38394 --- linux-2.6.39.4/fs/lockd/clntproc.c 2011-07-09 09:18:51.000000000 -0400
38395 +++ linux-2.6.39.4/fs/lockd/clntproc.c 2011-08-05 19:44:37.000000000 -0400
38396 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
38397 /*
38398 * Cookie counter for NLM requests
38399 */
38400 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
38401 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
38402
38403 void nlmclnt_next_cookie(struct nlm_cookie *c)
38404 {
38405 - u32 cookie = atomic_inc_return(&nlm_cookie);
38406 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
38407
38408 memcpy(c->data, &cookie, 4);
38409 c->len=4;
38410 @@ -620,6 +620,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
38411 struct nlm_rqst reqst, *req;
38412 int status;
38413
38414 + pax_track_stack();
38415 +
38416 req = &reqst;
38417 memset(req, 0, sizeof(*req));
38418 locks_init_lock(&req->a_args.lock.fl);
38419 diff -urNp linux-2.6.39.4/fs/locks.c linux-2.6.39.4/fs/locks.c
38420 --- linux-2.6.39.4/fs/locks.c 2011-07-09 09:18:51.000000000 -0400
38421 +++ linux-2.6.39.4/fs/locks.c 2011-08-05 19:44:37.000000000 -0400
38422 @@ -2043,16 +2043,16 @@ void locks_remove_flock(struct file *fil
38423 return;
38424
38425 if (filp->f_op && filp->f_op->flock) {
38426 - struct file_lock fl = {
38427 + struct file_lock flock = {
38428 .fl_pid = current->tgid,
38429 .fl_file = filp,
38430 .fl_flags = FL_FLOCK,
38431 .fl_type = F_UNLCK,
38432 .fl_end = OFFSET_MAX,
38433 };
38434 - filp->f_op->flock(filp, F_SETLKW, &fl);
38435 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
38436 - fl.fl_ops->fl_release_private(&fl);
38437 + filp->f_op->flock(filp, F_SETLKW, &flock);
38438 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
38439 + flock.fl_ops->fl_release_private(&flock);
38440 }
38441
38442 lock_flocks();
38443 diff -urNp linux-2.6.39.4/fs/logfs/super.c linux-2.6.39.4/fs/logfs/super.c
38444 --- linux-2.6.39.4/fs/logfs/super.c 2011-05-19 00:06:34.000000000 -0400
38445 +++ linux-2.6.39.4/fs/logfs/super.c 2011-08-05 19:44:37.000000000 -0400
38446 @@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
38447 struct logfs_disk_super _ds1, *ds1 = &_ds1;
38448 int err, valid0, valid1;
38449
38450 + pax_track_stack();
38451 +
38452 /* read first superblock */
38453 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
38454 if (err)
38455 diff -urNp linux-2.6.39.4/fs/namei.c linux-2.6.39.4/fs/namei.c
38456 --- linux-2.6.39.4/fs/namei.c 2011-08-05 21:11:51.000000000 -0400
38457 +++ linux-2.6.39.4/fs/namei.c 2011-08-05 21:12:20.000000000 -0400
38458 @@ -237,20 +237,30 @@ int generic_permission(struct inode *ino
38459 return ret;
38460
38461 /*
38462 - * Read/write DACs are always overridable.
38463 - * Executable DACs are overridable if at least one exec bit is set.
38464 + * Searching includes executable on directories, else just read.
38465 */
38466 - if (!(mask & MAY_EXEC) || execute_ok(inode))
38467 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
38468 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
38469 + if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) {
38470 +#ifdef CONFIG_GRKERNSEC
38471 + if (flags & IPERM_FLAG_RCU)
38472 + return -ECHILD;
38473 +#endif
38474 + if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
38475 return 0;
38476 + }
38477
38478 /*
38479 - * Searching includes executable on directories, else just read.
38480 + * Read/write DACs are always overridable.
38481 + * Executable DACs are overridable if at least one exec bit is set.
38482 */
38483 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
38484 - if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
38485 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
38486 + if (!(mask & MAY_EXEC) || execute_ok(inode)) {
38487 +#ifdef CONFIG_GRKERNSEC
38488 + if (flags & IPERM_FLAG_RCU)
38489 + return -ECHILD;
38490 +#endif
38491 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
38492 return 0;
38493 + }
38494
38495 return -EACCES;
38496 }
38497 @@ -626,6 +636,9 @@ static inline int handle_reval_path(stru
38498 struct dentry *dentry = nd->path.dentry;
38499 int status;
38500
38501 + if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
38502 + return -ENOENT;
38503 +
38504 if (likely(!(nd->flags & LOOKUP_JUMPED)))
38505 return 0;
38506
38507 @@ -671,9 +684,16 @@ static inline int exec_permission(struct
38508 if (ret == -ECHILD)
38509 return ret;
38510
38511 - if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
38512 - ns_capable(ns, CAP_DAC_READ_SEARCH))
38513 + if (ns_capable_nolog(ns, CAP_DAC_OVERRIDE))
38514 goto ok;
38515 + else {
38516 +#ifdef CONFIG_GRKERNSEC
38517 + if (flags & IPERM_FLAG_RCU)
38518 + return -ECHILD;
38519 +#endif
38520 + if (ns_capable(ns, CAP_DAC_READ_SEARCH) || ns_capable(ns, CAP_DAC_OVERRIDE))
38521 + goto ok;
38522 + }
38523
38524 return ret;
38525 ok:
38526 @@ -781,11 +801,19 @@ follow_link(struct path *link, struct na
38527 return error;
38528 }
38529
38530 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
38531 + dentry->d_inode, dentry, nd->path.mnt)) {
38532 + error = -EACCES;
38533 + *p = ERR_PTR(error); /* no ->put_link(), please */
38534 + path_put(&nd->path);
38535 + return error;
38536 + }
38537 +
38538 nd->last_type = LAST_BIND;
38539 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
38540 error = PTR_ERR(*p);
38541 if (!IS_ERR(*p)) {
38542 - char *s = nd_get_link(nd);
38543 + const char *s = nd_get_link(nd);
38544 error = 0;
38545 if (s)
38546 error = __vfs_follow_link(nd, s);
38547 @@ -1702,6 +1730,9 @@ static int do_path_lookup(int dfd, const
38548 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
38549
38550 if (likely(!retval)) {
38551 + if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
38552 + return -ENOENT;
38553 +
38554 if (unlikely(!audit_dummy_context())) {
38555 if (nd->path.dentry && nd->inode)
38556 audit_inode(name, nd->path.dentry);
38557 @@ -2012,6 +2043,30 @@ int vfs_create(struct inode *dir, struct
38558 return error;
38559 }
38560
38561 +/*
38562 + * Note that while the flag value (low two bits) for sys_open means:
38563 + * 00 - read-only
38564 + * 01 - write-only
38565 + * 10 - read-write
38566 + * 11 - special
38567 + * it is changed into
38568 + * 00 - no permissions needed
38569 + * 01 - read-permission
38570 + * 10 - write-permission
38571 + * 11 - read-write
38572 + * for the internal routines (ie open_namei()/follow_link() etc)
38573 + * This is more logical, and also allows the 00 "no perm needed"
38574 + * to be used for symlinks (where the permissions are checked
38575 + * later).
38576 + *
38577 +*/
38578 +static inline int open_to_namei_flags(int flag)
38579 +{
38580 + if ((flag+1) & O_ACCMODE)
38581 + flag++;
38582 + return flag;
38583 +}
38584 +
38585 static int may_open(struct path *path, int acc_mode, int flag)
38586 {
38587 struct dentry *dentry = path->dentry;
38588 @@ -2064,7 +2119,27 @@ static int may_open(struct path *path, i
38589 /*
38590 * Ensure there are no outstanding leases on the file.
38591 */
38592 - return break_lease(inode, flag);
38593 + error = break_lease(inode, flag);
38594 +
38595 + if (error)
38596 + return error;
38597 +
38598 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
38599 + error = -EPERM;
38600 + goto exit;
38601 + }
38602 +
38603 + if (gr_handle_rawio(inode)) {
38604 + error = -EPERM;
38605 + goto exit;
38606 + }
38607 +
38608 + if (!gr_acl_handle_open(dentry, path->mnt, open_to_namei_flags(flag))) {
38609 + error = -EACCES;
38610 + goto exit;
38611 + }
38612 +exit:
38613 + return error;
38614 }
38615
38616 static int handle_truncate(struct file *filp)
38617 @@ -2090,30 +2165,6 @@ static int handle_truncate(struct file *
38618 }
38619
38620 /*
38621 - * Note that while the flag value (low two bits) for sys_open means:
38622 - * 00 - read-only
38623 - * 01 - write-only
38624 - * 10 - read-write
38625 - * 11 - special
38626 - * it is changed into
38627 - * 00 - no permissions needed
38628 - * 01 - read-permission
38629 - * 10 - write-permission
38630 - * 11 - read-write
38631 - * for the internal routines (ie open_namei()/follow_link() etc)
38632 - * This is more logical, and also allows the 00 "no perm needed"
38633 - * to be used for symlinks (where the permissions are checked
38634 - * later).
38635 - *
38636 -*/
38637 -static inline int open_to_namei_flags(int flag)
38638 -{
38639 - if ((flag+1) & O_ACCMODE)
38640 - flag++;
38641 - return flag;
38642 -}
38643 -
38644 -/*
38645 * Handle the last step of open()
38646 */
38647 static struct file *do_last(struct nameidata *nd, struct path *path,
38648 @@ -2122,6 +2173,7 @@ static struct file *do_last(struct namei
38649 struct dentry *dir = nd->path.dentry;
38650 struct dentry *dentry;
38651 int open_flag = op->open_flag;
38652 + int flag = open_to_namei_flags(open_flag);
38653 int will_truncate = open_flag & O_TRUNC;
38654 int want_write = 0;
38655 int acc_mode = op->acc_mode;
38656 @@ -2217,6 +2269,12 @@ static struct file *do_last(struct namei
38657 /* Negative dentry, just create the file */
38658 if (!dentry->d_inode) {
38659 int mode = op->mode;
38660 +
38661 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, flag, mode)) {
38662 + error = -EACCES;
38663 + goto exit_mutex_unlock;
38664 + }
38665 +
38666 if (!IS_POSIXACL(dir->d_inode))
38667 mode &= ~current_umask();
38668 /*
38669 @@ -2240,6 +2298,8 @@ static struct file *do_last(struct namei
38670 error = vfs_create(dir->d_inode, dentry, mode, nd);
38671 if (error)
38672 goto exit_mutex_unlock;
38673 + else
38674 + gr_handle_create(path->dentry, path->mnt);
38675 mutex_unlock(&dir->d_inode->i_mutex);
38676 dput(nd->path.dentry);
38677 nd->path.dentry = dentry;
38678 @@ -2249,6 +2309,14 @@ static struct file *do_last(struct namei
38679 /*
38680 * It already exists.
38681 */
38682 +
38683 + /* only check if O_CREAT is specified, all other checks need to go
38684 + into may_open */
38685 + if (gr_handle_fifo(path->dentry, path->mnt, dir, flag, acc_mode)) {
38686 + error = -EACCES;
38687 + goto exit_mutex_unlock;
38688 + }
38689 +
38690 mutex_unlock(&dir->d_inode->i_mutex);
38691 audit_inode(pathname, path->dentry);
38692
38693 @@ -2535,6 +2603,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
38694 error = may_mknod(mode);
38695 if (error)
38696 goto out_dput;
38697 +
38698 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
38699 + error = -EPERM;
38700 + goto out_dput;
38701 + }
38702 +
38703 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
38704 + error = -EACCES;
38705 + goto out_dput;
38706 + }
38707 +
38708 error = mnt_want_write(nd.path.mnt);
38709 if (error)
38710 goto out_dput;
38711 @@ -2555,6 +2634,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
38712 }
38713 out_drop_write:
38714 mnt_drop_write(nd.path.mnt);
38715 +
38716 + if (!error)
38717 + gr_handle_create(dentry, nd.path.mnt);
38718 out_dput:
38719 dput(dentry);
38720 out_unlock:
38721 @@ -2607,6 +2689,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
38722 if (IS_ERR(dentry))
38723 goto out_unlock;
38724
38725 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
38726 + error = -EACCES;
38727 + goto out_dput;
38728 + }
38729 +
38730 if (!IS_POSIXACL(nd.path.dentry->d_inode))
38731 mode &= ~current_umask();
38732 error = mnt_want_write(nd.path.mnt);
38733 @@ -2618,6 +2705,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
38734 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
38735 out_drop_write:
38736 mnt_drop_write(nd.path.mnt);
38737 +
38738 + if (!error)
38739 + gr_handle_create(dentry, nd.path.mnt);
38740 +
38741 out_dput:
38742 dput(dentry);
38743 out_unlock:
38744 @@ -2697,6 +2788,8 @@ static long do_rmdir(int dfd, const char
38745 char * name;
38746 struct dentry *dentry;
38747 struct nameidata nd;
38748 + ino_t saved_ino = 0;
38749 + dev_t saved_dev = 0;
38750
38751 error = user_path_parent(dfd, pathname, &nd, &name);
38752 if (error)
38753 @@ -2721,6 +2814,19 @@ static long do_rmdir(int dfd, const char
38754 error = PTR_ERR(dentry);
38755 if (IS_ERR(dentry))
38756 goto exit2;
38757 +
38758 + if (dentry->d_inode != NULL) {
38759 + if (dentry->d_inode->i_nlink <= 1) {
38760 + saved_ino = dentry->d_inode->i_ino;
38761 + saved_dev = gr_get_dev_from_dentry(dentry);
38762 + }
38763 +
38764 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
38765 + error = -EACCES;
38766 + goto exit3;
38767 + }
38768 + }
38769 +
38770 error = mnt_want_write(nd.path.mnt);
38771 if (error)
38772 goto exit3;
38773 @@ -2728,6 +2834,8 @@ static long do_rmdir(int dfd, const char
38774 if (error)
38775 goto exit4;
38776 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
38777 + if (!error && (saved_dev || saved_ino))
38778 + gr_handle_delete(saved_ino, saved_dev);
38779 exit4:
38780 mnt_drop_write(nd.path.mnt);
38781 exit3:
38782 @@ -2790,6 +2898,8 @@ static long do_unlinkat(int dfd, const c
38783 struct dentry *dentry;
38784 struct nameidata nd;
38785 struct inode *inode = NULL;
38786 + ino_t saved_ino = 0;
38787 + dev_t saved_dev = 0;
38788
38789 error = user_path_parent(dfd, pathname, &nd, &name);
38790 if (error)
38791 @@ -2809,8 +2919,17 @@ static long do_unlinkat(int dfd, const c
38792 if (nd.last.name[nd.last.len])
38793 goto slashes;
38794 inode = dentry->d_inode;
38795 - if (inode)
38796 + if (inode) {
38797 ihold(inode);
38798 + if (inode->i_nlink <= 1) {
38799 + saved_ino = inode->i_ino;
38800 + saved_dev = gr_get_dev_from_dentry(dentry);
38801 + }
38802 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
38803 + error = -EACCES;
38804 + goto exit2;
38805 + }
38806 + }
38807 error = mnt_want_write(nd.path.mnt);
38808 if (error)
38809 goto exit2;
38810 @@ -2818,6 +2937,8 @@ static long do_unlinkat(int dfd, const c
38811 if (error)
38812 goto exit3;
38813 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
38814 + if (!error && (saved_ino || saved_dev))
38815 + gr_handle_delete(saved_ino, saved_dev);
38816 exit3:
38817 mnt_drop_write(nd.path.mnt);
38818 exit2:
38819 @@ -2895,6 +3016,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
38820 if (IS_ERR(dentry))
38821 goto out_unlock;
38822
38823 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
38824 + error = -EACCES;
38825 + goto out_dput;
38826 + }
38827 +
38828 error = mnt_want_write(nd.path.mnt);
38829 if (error)
38830 goto out_dput;
38831 @@ -2902,6 +3028,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
38832 if (error)
38833 goto out_drop_write;
38834 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
38835 + if (!error)
38836 + gr_handle_create(dentry, nd.path.mnt);
38837 out_drop_write:
38838 mnt_drop_write(nd.path.mnt);
38839 out_dput:
38840 @@ -3010,6 +3138,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
38841 error = PTR_ERR(new_dentry);
38842 if (IS_ERR(new_dentry))
38843 goto out_unlock;
38844 +
38845 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
38846 + old_path.dentry->d_inode,
38847 + old_path.dentry->d_inode->i_mode, to)) {
38848 + error = -EACCES;
38849 + goto out_dput;
38850 + }
38851 +
38852 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
38853 + old_path.dentry, old_path.mnt, to)) {
38854 + error = -EACCES;
38855 + goto out_dput;
38856 + }
38857 +
38858 error = mnt_want_write(nd.path.mnt);
38859 if (error)
38860 goto out_dput;
38861 @@ -3017,6 +3159,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
38862 if (error)
38863 goto out_drop_write;
38864 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
38865 + if (!error)
38866 + gr_handle_create(new_dentry, nd.path.mnt);
38867 out_drop_write:
38868 mnt_drop_write(nd.path.mnt);
38869 out_dput:
38870 @@ -3194,6 +3338,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38871 char *to;
38872 int error;
38873
38874 + pax_track_stack();
38875 +
38876 error = user_path_parent(olddfd, oldname, &oldnd, &from);
38877 if (error)
38878 goto exit;
38879 @@ -3250,6 +3396,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38880 if (new_dentry == trap)
38881 goto exit5;
38882
38883 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
38884 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
38885 + to);
38886 + if (error)
38887 + goto exit5;
38888 +
38889 error = mnt_want_write(oldnd.path.mnt);
38890 if (error)
38891 goto exit5;
38892 @@ -3259,6 +3411,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38893 goto exit6;
38894 error = vfs_rename(old_dir->d_inode, old_dentry,
38895 new_dir->d_inode, new_dentry);
38896 + if (!error)
38897 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
38898 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
38899 exit6:
38900 mnt_drop_write(oldnd.path.mnt);
38901 exit5:
38902 @@ -3284,6 +3439,8 @@ SYSCALL_DEFINE2(rename, const char __use
38903
38904 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
38905 {
38906 + char tmpbuf[64];
38907 + const char *newlink;
38908 int len;
38909
38910 len = PTR_ERR(link);
38911 @@ -3293,7 +3450,14 @@ int vfs_readlink(struct dentry *dentry,
38912 len = strlen(link);
38913 if (len > (unsigned) buflen)
38914 len = buflen;
38915 - if (copy_to_user(buffer, link, len))
38916 +
38917 + if (len < sizeof(tmpbuf)) {
38918 + memcpy(tmpbuf, link, len);
38919 + newlink = tmpbuf;
38920 + } else
38921 + newlink = link;
38922 +
38923 + if (copy_to_user(buffer, newlink, len))
38924 len = -EFAULT;
38925 out:
38926 return len;
38927 diff -urNp linux-2.6.39.4/fs/namespace.c linux-2.6.39.4/fs/namespace.c
38928 --- linux-2.6.39.4/fs/namespace.c 2011-05-19 00:06:34.000000000 -0400
38929 +++ linux-2.6.39.4/fs/namespace.c 2011-08-05 19:44:37.000000000 -0400
38930 @@ -1328,6 +1328,9 @@ static int do_umount(struct vfsmount *mn
38931 if (!(sb->s_flags & MS_RDONLY))
38932 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
38933 up_write(&sb->s_umount);
38934 +
38935 + gr_log_remount(mnt->mnt_devname, retval);
38936 +
38937 return retval;
38938 }
38939
38940 @@ -1347,6 +1350,9 @@ static int do_umount(struct vfsmount *mn
38941 br_write_unlock(vfsmount_lock);
38942 up_write(&namespace_sem);
38943 release_mounts(&umount_list);
38944 +
38945 + gr_log_unmount(mnt->mnt_devname, retval);
38946 +
38947 return retval;
38948 }
38949
38950 @@ -2338,6 +2344,16 @@ long do_mount(char *dev_name, char *dir_
38951 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
38952 MS_STRICTATIME);
38953
38954 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
38955 + retval = -EPERM;
38956 + goto dput_out;
38957 + }
38958 +
38959 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
38960 + retval = -EPERM;
38961 + goto dput_out;
38962 + }
38963 +
38964 if (flags & MS_REMOUNT)
38965 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
38966 data_page);
38967 @@ -2352,6 +2368,9 @@ long do_mount(char *dev_name, char *dir_
38968 dev_name, data_page);
38969 dput_out:
38970 path_put(&path);
38971 +
38972 + gr_log_mount(dev_name, dir_name, retval);
38973 +
38974 return retval;
38975 }
38976
38977 @@ -2575,6 +2594,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
38978 if (error)
38979 goto out2;
38980
38981 + if (gr_handle_chroot_pivot()) {
38982 + error = -EPERM;
38983 + goto out2;
38984 + }
38985 +
38986 get_fs_root(current->fs, &root);
38987 error = lock_mount(&old);
38988 if (error)
38989 diff -urNp linux-2.6.39.4/fs/ncpfs/dir.c linux-2.6.39.4/fs/ncpfs/dir.c
38990 --- linux-2.6.39.4/fs/ncpfs/dir.c 2011-05-19 00:06:34.000000000 -0400
38991 +++ linux-2.6.39.4/fs/ncpfs/dir.c 2011-08-05 19:44:37.000000000 -0400
38992 @@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
38993 int res, val = 0, len;
38994 __u8 __name[NCP_MAXPATHLEN + 1];
38995
38996 + pax_track_stack();
38997 +
38998 if (dentry == dentry->d_sb->s_root)
38999 return 1;
39000
39001 @@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
39002 int error, res, len;
39003 __u8 __name[NCP_MAXPATHLEN + 1];
39004
39005 + pax_track_stack();
39006 +
39007 error = -EIO;
39008 if (!ncp_conn_valid(server))
39009 goto finished;
39010 @@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
39011 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
39012 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
39013
39014 + pax_track_stack();
39015 +
39016 ncp_age_dentry(server, dentry);
39017 len = sizeof(__name);
39018 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
39019 @@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
39020 int error, len;
39021 __u8 __name[NCP_MAXPATHLEN + 1];
39022
39023 + pax_track_stack();
39024 +
39025 DPRINTK("ncp_mkdir: making %s/%s\n",
39026 dentry->d_parent->d_name.name, dentry->d_name.name);
39027
39028 @@ -1135,6 +1143,8 @@ static int ncp_rename(struct inode *old_
39029 int old_len, new_len;
39030 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
39031
39032 + pax_track_stack();
39033 +
39034 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
39035 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
39036 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
39037 diff -urNp linux-2.6.39.4/fs/ncpfs/inode.c linux-2.6.39.4/fs/ncpfs/inode.c
39038 --- linux-2.6.39.4/fs/ncpfs/inode.c 2011-05-19 00:06:34.000000000 -0400
39039 +++ linux-2.6.39.4/fs/ncpfs/inode.c 2011-08-05 19:44:37.000000000 -0400
39040 @@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
39041 #endif
39042 struct ncp_entry_info finfo;
39043
39044 + pax_track_stack();
39045 +
39046 data.wdog_pid = NULL;
39047 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
39048 if (!server)
39049 diff -urNp linux-2.6.39.4/fs/nfs/inode.c linux-2.6.39.4/fs/nfs/inode.c
39050 --- linux-2.6.39.4/fs/nfs/inode.c 2011-07-09 09:18:51.000000000 -0400
39051 +++ linux-2.6.39.4/fs/nfs/inode.c 2011-08-05 19:44:37.000000000 -0400
39052 @@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
39053 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
39054 nfsi->attrtimeo_timestamp = jiffies;
39055
39056 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
39057 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
39058 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
39059 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
39060 else
39061 @@ -1000,16 +1000,16 @@ static int nfs_size_need_update(const st
39062 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
39063 }
39064
39065 -static atomic_long_t nfs_attr_generation_counter;
39066 +static atomic_long_unchecked_t nfs_attr_generation_counter;
39067
39068 static unsigned long nfs_read_attr_generation_counter(void)
39069 {
39070 - return atomic_long_read(&nfs_attr_generation_counter);
39071 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
39072 }
39073
39074 unsigned long nfs_inc_attr_generation_counter(void)
39075 {
39076 - return atomic_long_inc_return(&nfs_attr_generation_counter);
39077 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
39078 }
39079
39080 void nfs_fattr_init(struct nfs_fattr *fattr)
39081 diff -urNp linux-2.6.39.4/fs/nfsd/nfs4state.c linux-2.6.39.4/fs/nfsd/nfs4state.c
39082 --- linux-2.6.39.4/fs/nfsd/nfs4state.c 2011-05-19 00:06:34.000000000 -0400
39083 +++ linux-2.6.39.4/fs/nfsd/nfs4state.c 2011-08-05 19:44:37.000000000 -0400
39084 @@ -3784,6 +3784,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
39085 unsigned int strhashval;
39086 int err;
39087
39088 + pax_track_stack();
39089 +
39090 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
39091 (long long) lock->lk_offset,
39092 (long long) lock->lk_length);
39093 diff -urNp linux-2.6.39.4/fs/nfsd/nfs4xdr.c linux-2.6.39.4/fs/nfsd/nfs4xdr.c
39094 --- linux-2.6.39.4/fs/nfsd/nfs4xdr.c 2011-05-19 00:06:34.000000000 -0400
39095 +++ linux-2.6.39.4/fs/nfsd/nfs4xdr.c 2011-08-05 19:44:37.000000000 -0400
39096 @@ -1793,6 +1793,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
39097 .dentry = dentry,
39098 };
39099
39100 + pax_track_stack();
39101 +
39102 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
39103 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
39104 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
39105 diff -urNp linux-2.6.39.4/fs/nfsd/vfs.c linux-2.6.39.4/fs/nfsd/vfs.c
39106 --- linux-2.6.39.4/fs/nfsd/vfs.c 2011-07-09 09:18:51.000000000 -0400
39107 +++ linux-2.6.39.4/fs/nfsd/vfs.c 2011-08-05 19:44:37.000000000 -0400
39108 @@ -901,7 +901,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
39109 } else {
39110 oldfs = get_fs();
39111 set_fs(KERNEL_DS);
39112 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
39113 + host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
39114 set_fs(oldfs);
39115 }
39116
39117 @@ -1005,7 +1005,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
39118
39119 /* Write the data. */
39120 oldfs = get_fs(); set_fs(KERNEL_DS);
39121 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
39122 + host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
39123 set_fs(oldfs);
39124 if (host_err < 0)
39125 goto out_nfserr;
39126 @@ -1528,7 +1528,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
39127 */
39128
39129 oldfs = get_fs(); set_fs(KERNEL_DS);
39130 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
39131 + host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
39132 set_fs(oldfs);
39133
39134 if (host_err < 0)
39135 diff -urNp linux-2.6.39.4/fs/notify/fanotify/fanotify_user.c linux-2.6.39.4/fs/notify/fanotify/fanotify_user.c
39136 --- linux-2.6.39.4/fs/notify/fanotify/fanotify_user.c 2011-05-19 00:06:34.000000000 -0400
39137 +++ linux-2.6.39.4/fs/notify/fanotify/fanotify_user.c 2011-08-14 11:28:46.000000000 -0400
39138 @@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
39139 goto out_close_fd;
39140
39141 ret = -EFAULT;
39142 - if (copy_to_user(buf, &fanotify_event_metadata,
39143 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
39144 + copy_to_user(buf, &fanotify_event_metadata,
39145 fanotify_event_metadata.event_len))
39146 goto out_kill_access_response;
39147
39148 diff -urNp linux-2.6.39.4/fs/notify/notification.c linux-2.6.39.4/fs/notify/notification.c
39149 --- linux-2.6.39.4/fs/notify/notification.c 2011-05-19 00:06:34.000000000 -0400
39150 +++ linux-2.6.39.4/fs/notify/notification.c 2011-08-05 19:44:37.000000000 -0400
39151 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
39152 * get set to 0 so it will never get 'freed'
39153 */
39154 static struct fsnotify_event *q_overflow_event;
39155 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
39156 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
39157
39158 /**
39159 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
39160 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
39161 */
39162 u32 fsnotify_get_cookie(void)
39163 {
39164 - return atomic_inc_return(&fsnotify_sync_cookie);
39165 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
39166 }
39167 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
39168
39169 diff -urNp linux-2.6.39.4/fs/ntfs/dir.c linux-2.6.39.4/fs/ntfs/dir.c
39170 --- linux-2.6.39.4/fs/ntfs/dir.c 2011-05-19 00:06:34.000000000 -0400
39171 +++ linux-2.6.39.4/fs/ntfs/dir.c 2011-08-05 19:44:37.000000000 -0400
39172 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
39173 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
39174 ~(s64)(ndir->itype.index.block_size - 1)));
39175 /* Bounds checks. */
39176 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
39177 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
39178 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
39179 "inode 0x%lx or driver bug.", vdir->i_ino);
39180 goto err_out;
39181 diff -urNp linux-2.6.39.4/fs/ntfs/file.c linux-2.6.39.4/fs/ntfs/file.c
39182 --- linux-2.6.39.4/fs/ntfs/file.c 2011-05-19 00:06:34.000000000 -0400
39183 +++ linux-2.6.39.4/fs/ntfs/file.c 2011-08-05 19:44:37.000000000 -0400
39184 @@ -2222,6 +2222,6 @@ const struct inode_operations ntfs_file_
39185 #endif /* NTFS_RW */
39186 };
39187
39188 -const struct file_operations ntfs_empty_file_ops = {};
39189 +const struct file_operations ntfs_empty_file_ops __read_only;
39190
39191 -const struct inode_operations ntfs_empty_inode_ops = {};
39192 +const struct inode_operations ntfs_empty_inode_ops __read_only;
39193 diff -urNp linux-2.6.39.4/fs/ocfs2/localalloc.c linux-2.6.39.4/fs/ocfs2/localalloc.c
39194 --- linux-2.6.39.4/fs/ocfs2/localalloc.c 2011-05-19 00:06:34.000000000 -0400
39195 +++ linux-2.6.39.4/fs/ocfs2/localalloc.c 2011-08-05 19:44:37.000000000 -0400
39196 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
39197 goto bail;
39198 }
39199
39200 - atomic_inc(&osb->alloc_stats.moves);
39201 + atomic_inc_unchecked(&osb->alloc_stats.moves);
39202
39203 bail:
39204 if (handle)
39205 diff -urNp linux-2.6.39.4/fs/ocfs2/namei.c linux-2.6.39.4/fs/ocfs2/namei.c
39206 --- linux-2.6.39.4/fs/ocfs2/namei.c 2011-05-19 00:06:34.000000000 -0400
39207 +++ linux-2.6.39.4/fs/ocfs2/namei.c 2011-08-05 19:44:37.000000000 -0400
39208 @@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
39209 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
39210 struct ocfs2_dir_lookup_result target_insert = { NULL, };
39211
39212 + pax_track_stack();
39213 +
39214 /* At some point it might be nice to break this function up a
39215 * bit. */
39216
39217 diff -urNp linux-2.6.39.4/fs/ocfs2/ocfs2.h linux-2.6.39.4/fs/ocfs2/ocfs2.h
39218 --- linux-2.6.39.4/fs/ocfs2/ocfs2.h 2011-05-19 00:06:34.000000000 -0400
39219 +++ linux-2.6.39.4/fs/ocfs2/ocfs2.h 2011-08-05 19:44:37.000000000 -0400
39220 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
39221
39222 struct ocfs2_alloc_stats
39223 {
39224 - atomic_t moves;
39225 - atomic_t local_data;
39226 - atomic_t bitmap_data;
39227 - atomic_t bg_allocs;
39228 - atomic_t bg_extends;
39229 + atomic_unchecked_t moves;
39230 + atomic_unchecked_t local_data;
39231 + atomic_unchecked_t bitmap_data;
39232 + atomic_unchecked_t bg_allocs;
39233 + atomic_unchecked_t bg_extends;
39234 };
39235
39236 enum ocfs2_local_alloc_state
39237 diff -urNp linux-2.6.39.4/fs/ocfs2/suballoc.c linux-2.6.39.4/fs/ocfs2/suballoc.c
39238 --- linux-2.6.39.4/fs/ocfs2/suballoc.c 2011-05-19 00:06:34.000000000 -0400
39239 +++ linux-2.6.39.4/fs/ocfs2/suballoc.c 2011-08-05 19:44:37.000000000 -0400
39240 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
39241 mlog_errno(status);
39242 goto bail;
39243 }
39244 - atomic_inc(&osb->alloc_stats.bg_extends);
39245 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
39246
39247 /* You should never ask for this much metadata */
39248 BUG_ON(bits_wanted >
39249 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
39250 mlog_errno(status);
39251 goto bail;
39252 }
39253 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39254 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39255
39256 *suballoc_loc = res.sr_bg_blkno;
39257 *suballoc_bit_start = res.sr_bit_offset;
39258 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
39259 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
39260 res->sr_bits);
39261
39262 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39263 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39264
39265 BUG_ON(res->sr_bits != 1);
39266
39267 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
39268 mlog_errno(status);
39269 goto bail;
39270 }
39271 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39272 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39273
39274 BUG_ON(res.sr_bits != 1);
39275
39276 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
39277 cluster_start,
39278 num_clusters);
39279 if (!status)
39280 - atomic_inc(&osb->alloc_stats.local_data);
39281 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
39282 } else {
39283 if (min_clusters > (osb->bitmap_cpg - 1)) {
39284 /* The only paths asking for contiguousness
39285 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
39286 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
39287 res.sr_bg_blkno,
39288 res.sr_bit_offset);
39289 - atomic_inc(&osb->alloc_stats.bitmap_data);
39290 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
39291 *num_clusters = res.sr_bits;
39292 }
39293 }
39294 diff -urNp linux-2.6.39.4/fs/ocfs2/super.c linux-2.6.39.4/fs/ocfs2/super.c
39295 --- linux-2.6.39.4/fs/ocfs2/super.c 2011-05-19 00:06:34.000000000 -0400
39296 +++ linux-2.6.39.4/fs/ocfs2/super.c 2011-08-05 19:44:37.000000000 -0400
39297 @@ -299,11 +299,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
39298 "%10s => GlobalAllocs: %d LocalAllocs: %d "
39299 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
39300 "Stats",
39301 - atomic_read(&osb->alloc_stats.bitmap_data),
39302 - atomic_read(&osb->alloc_stats.local_data),
39303 - atomic_read(&osb->alloc_stats.bg_allocs),
39304 - atomic_read(&osb->alloc_stats.moves),
39305 - atomic_read(&osb->alloc_stats.bg_extends));
39306 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
39307 + atomic_read_unchecked(&osb->alloc_stats.local_data),
39308 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
39309 + atomic_read_unchecked(&osb->alloc_stats.moves),
39310 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
39311
39312 out += snprintf(buf + out, len - out,
39313 "%10s => State: %u Descriptor: %llu Size: %u bits "
39314 @@ -2111,11 +2111,11 @@ static int ocfs2_initialize_super(struct
39315 spin_lock_init(&osb->osb_xattr_lock);
39316 ocfs2_init_steal_slots(osb);
39317
39318 - atomic_set(&osb->alloc_stats.moves, 0);
39319 - atomic_set(&osb->alloc_stats.local_data, 0);
39320 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
39321 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
39322 - atomic_set(&osb->alloc_stats.bg_extends, 0);
39323 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
39324 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
39325 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
39326 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
39327 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
39328
39329 /* Copy the blockcheck stats from the superblock probe */
39330 osb->osb_ecc_stats = *stats;
39331 diff -urNp linux-2.6.39.4/fs/ocfs2/symlink.c linux-2.6.39.4/fs/ocfs2/symlink.c
39332 --- linux-2.6.39.4/fs/ocfs2/symlink.c 2011-05-19 00:06:34.000000000 -0400
39333 +++ linux-2.6.39.4/fs/ocfs2/symlink.c 2011-08-05 19:44:37.000000000 -0400
39334 @@ -142,7 +142,7 @@ bail:
39335
39336 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
39337 {
39338 - char *link = nd_get_link(nd);
39339 + const char *link = nd_get_link(nd);
39340 if (!IS_ERR(link))
39341 kfree(link);
39342 }
39343 diff -urNp linux-2.6.39.4/fs/open.c linux-2.6.39.4/fs/open.c
39344 --- linux-2.6.39.4/fs/open.c 2011-05-19 00:06:34.000000000 -0400
39345 +++ linux-2.6.39.4/fs/open.c 2011-08-05 19:44:37.000000000 -0400
39346 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
39347 error = locks_verify_truncate(inode, NULL, length);
39348 if (!error)
39349 error = security_path_truncate(&path);
39350 +
39351 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
39352 + error = -EACCES;
39353 +
39354 if (!error)
39355 error = do_truncate(path.dentry, length, 0, NULL);
39356
39357 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
39358 if (__mnt_is_readonly(path.mnt))
39359 res = -EROFS;
39360
39361 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
39362 + res = -EACCES;
39363 +
39364 out_path_release:
39365 path_put(&path);
39366 out:
39367 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
39368 if (error)
39369 goto dput_and_out;
39370
39371 + gr_log_chdir(path.dentry, path.mnt);
39372 +
39373 set_fs_pwd(current->fs, &path);
39374
39375 dput_and_out:
39376 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
39377 goto out_putf;
39378
39379 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
39380 +
39381 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
39382 + error = -EPERM;
39383 +
39384 + if (!error)
39385 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
39386 +
39387 if (!error)
39388 set_fs_pwd(current->fs, &file->f_path);
39389 out_putf:
39390 @@ -438,7 +454,18 @@ SYSCALL_DEFINE1(chroot, const char __use
39391 if (error)
39392 goto dput_and_out;
39393
39394 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
39395 + goto dput_and_out;
39396 +
39397 + if (gr_handle_chroot_caps(&path)) {
39398 + error = -ENOMEM;
39399 + goto dput_and_out;
39400 + }
39401 +
39402 set_fs_root(current->fs, &path);
39403 +
39404 + gr_handle_chroot_chdir(&path);
39405 +
39406 error = 0;
39407 dput_and_out:
39408 path_put(&path);
39409 @@ -466,12 +493,25 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
39410 err = mnt_want_write_file(file);
39411 if (err)
39412 goto out_putf;
39413 +
39414 mutex_lock(&inode->i_mutex);
39415 +
39416 + if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
39417 + err = -EACCES;
39418 + goto out_unlock;
39419 + }
39420 +
39421 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
39422 if (err)
39423 goto out_unlock;
39424 if (mode == (mode_t) -1)
39425 mode = inode->i_mode;
39426 +
39427 + if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
39428 + err = -EACCES;
39429 + goto out_unlock;
39430 + }
39431 +
39432 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
39433 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
39434 err = notify_change(dentry, &newattrs);
39435 @@ -499,12 +539,25 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
39436 error = mnt_want_write(path.mnt);
39437 if (error)
39438 goto dput_and_out;
39439 +
39440 mutex_lock(&inode->i_mutex);
39441 +
39442 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
39443 + error = -EACCES;
39444 + goto out_unlock;
39445 + }
39446 +
39447 error = security_path_chmod(path.dentry, path.mnt, mode);
39448 if (error)
39449 goto out_unlock;
39450 if (mode == (mode_t) -1)
39451 mode = inode->i_mode;
39452 +
39453 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
39454 + error = -EACCES;
39455 + goto out_unlock;
39456 + }
39457 +
39458 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
39459 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
39460 error = notify_change(path.dentry, &newattrs);
39461 @@ -528,6 +581,9 @@ static int chown_common(struct path *pat
39462 int error;
39463 struct iattr newattrs;
39464
39465 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
39466 + return -EACCES;
39467 +
39468 newattrs.ia_valid = ATTR_CTIME;
39469 if (user != (uid_t) -1) {
39470 newattrs.ia_valid |= ATTR_UID;
39471 @@ -998,7 +1054,10 @@ long do_sys_open(int dfd, const char __u
39472 if (!IS_ERR(tmp)) {
39473 fd = get_unused_fd_flags(flags);
39474 if (fd >= 0) {
39475 - struct file *f = do_filp_open(dfd, tmp, &op, lookup);
39476 + struct file *f;
39477 + /* don't allow to be set by userland */
39478 + flags &= ~FMODE_GREXEC;
39479 + f = do_filp_open(dfd, tmp, &op, lookup);
39480 if (IS_ERR(f)) {
39481 put_unused_fd(fd);
39482 fd = PTR_ERR(f);
39483 diff -urNp linux-2.6.39.4/fs/partitions/ldm.c linux-2.6.39.4/fs/partitions/ldm.c
39484 --- linux-2.6.39.4/fs/partitions/ldm.c 2011-06-03 00:04:14.000000000 -0400
39485 +++ linux-2.6.39.4/fs/partitions/ldm.c 2011-08-05 19:44:37.000000000 -0400
39486 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
39487 ldm_error ("A VBLK claims to have %d parts.", num);
39488 return false;
39489 }
39490 +
39491 if (rec >= num) {
39492 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
39493 return false;
39494 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
39495 goto found;
39496 }
39497
39498 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
39499 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
39500 if (!f) {
39501 ldm_crit ("Out of memory.");
39502 return false;
39503 diff -urNp linux-2.6.39.4/fs/pipe.c linux-2.6.39.4/fs/pipe.c
39504 --- linux-2.6.39.4/fs/pipe.c 2011-05-19 00:06:34.000000000 -0400
39505 +++ linux-2.6.39.4/fs/pipe.c 2011-08-05 19:44:37.000000000 -0400
39506 @@ -420,9 +420,9 @@ redo:
39507 }
39508 if (bufs) /* More to do? */
39509 continue;
39510 - if (!pipe->writers)
39511 + if (!atomic_read(&pipe->writers))
39512 break;
39513 - if (!pipe->waiting_writers) {
39514 + if (!atomic_read(&pipe->waiting_writers)) {
39515 /* syscall merging: Usually we must not sleep
39516 * if O_NONBLOCK is set, or if we got some data.
39517 * But if a writer sleeps in kernel space, then
39518 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
39519 mutex_lock(&inode->i_mutex);
39520 pipe = inode->i_pipe;
39521
39522 - if (!pipe->readers) {
39523 + if (!atomic_read(&pipe->readers)) {
39524 send_sig(SIGPIPE, current, 0);
39525 ret = -EPIPE;
39526 goto out;
39527 @@ -530,7 +530,7 @@ redo1:
39528 for (;;) {
39529 int bufs;
39530
39531 - if (!pipe->readers) {
39532 + if (!atomic_read(&pipe->readers)) {
39533 send_sig(SIGPIPE, current, 0);
39534 if (!ret)
39535 ret = -EPIPE;
39536 @@ -616,9 +616,9 @@ redo2:
39537 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
39538 do_wakeup = 0;
39539 }
39540 - pipe->waiting_writers++;
39541 + atomic_inc(&pipe->waiting_writers);
39542 pipe_wait(pipe);
39543 - pipe->waiting_writers--;
39544 + atomic_dec(&pipe->waiting_writers);
39545 }
39546 out:
39547 mutex_unlock(&inode->i_mutex);
39548 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
39549 mask = 0;
39550 if (filp->f_mode & FMODE_READ) {
39551 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
39552 - if (!pipe->writers && filp->f_version != pipe->w_counter)
39553 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
39554 mask |= POLLHUP;
39555 }
39556
39557 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
39558 * Most Unices do not set POLLERR for FIFOs but on Linux they
39559 * behave exactly like pipes for poll().
39560 */
39561 - if (!pipe->readers)
39562 + if (!atomic_read(&pipe->readers))
39563 mask |= POLLERR;
39564 }
39565
39566 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
39567
39568 mutex_lock(&inode->i_mutex);
39569 pipe = inode->i_pipe;
39570 - pipe->readers -= decr;
39571 - pipe->writers -= decw;
39572 + atomic_sub(decr, &pipe->readers);
39573 + atomic_sub(decw, &pipe->writers);
39574
39575 - if (!pipe->readers && !pipe->writers) {
39576 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
39577 free_pipe_info(inode);
39578 } else {
39579 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
39580 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
39581
39582 if (inode->i_pipe) {
39583 ret = 0;
39584 - inode->i_pipe->readers++;
39585 + atomic_inc(&inode->i_pipe->readers);
39586 }
39587
39588 mutex_unlock(&inode->i_mutex);
39589 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
39590
39591 if (inode->i_pipe) {
39592 ret = 0;
39593 - inode->i_pipe->writers++;
39594 + atomic_inc(&inode->i_pipe->writers);
39595 }
39596
39597 mutex_unlock(&inode->i_mutex);
39598 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
39599 if (inode->i_pipe) {
39600 ret = 0;
39601 if (filp->f_mode & FMODE_READ)
39602 - inode->i_pipe->readers++;
39603 + atomic_inc(&inode->i_pipe->readers);
39604 if (filp->f_mode & FMODE_WRITE)
39605 - inode->i_pipe->writers++;
39606 + atomic_inc(&inode->i_pipe->writers);
39607 }
39608
39609 mutex_unlock(&inode->i_mutex);
39610 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
39611 inode->i_pipe = NULL;
39612 }
39613
39614 -static struct vfsmount *pipe_mnt __read_mostly;
39615 +struct vfsmount *pipe_mnt __read_mostly;
39616
39617 /*
39618 * pipefs_dname() is called from d_path().
39619 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
39620 goto fail_iput;
39621 inode->i_pipe = pipe;
39622
39623 - pipe->readers = pipe->writers = 1;
39624 + atomic_set(&pipe->readers, 1);
39625 + atomic_set(&pipe->writers, 1);
39626 inode->i_fop = &rdwr_pipefifo_fops;
39627
39628 /*
39629 diff -urNp linux-2.6.39.4/fs/proc/array.c linux-2.6.39.4/fs/proc/array.c
39630 --- linux-2.6.39.4/fs/proc/array.c 2011-05-19 00:06:34.000000000 -0400
39631 +++ linux-2.6.39.4/fs/proc/array.c 2011-08-05 19:44:37.000000000 -0400
39632 @@ -60,6 +60,7 @@
39633 #include <linux/tty.h>
39634 #include <linux/string.h>
39635 #include <linux/mman.h>
39636 +#include <linux/grsecurity.h>
39637 #include <linux/proc_fs.h>
39638 #include <linux/ioport.h>
39639 #include <linux/uaccess.h>
39640 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
39641 seq_putc(m, '\n');
39642 }
39643
39644 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39645 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
39646 +{
39647 + if (p->mm)
39648 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
39649 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
39650 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
39651 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
39652 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
39653 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
39654 + else
39655 + seq_printf(m, "PaX:\t-----\n");
39656 +}
39657 +#endif
39658 +
39659 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
39660 struct pid *pid, struct task_struct *task)
39661 {
39662 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
39663 task_cpus_allowed(m, task);
39664 cpuset_task_status_allowed(m, task);
39665 task_context_switch_counts(m, task);
39666 +
39667 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39668 + task_pax(m, task);
39669 +#endif
39670 +
39671 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
39672 + task_grsec_rbac(m, task);
39673 +#endif
39674 +
39675 return 0;
39676 }
39677
39678 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39679 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
39680 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
39681 + _mm->pax_flags & MF_PAX_SEGMEXEC))
39682 +#endif
39683 +
39684 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
39685 struct pid *pid, struct task_struct *task, int whole)
39686 {
39687 @@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file
39688 cputime_t cutime, cstime, utime, stime;
39689 cputime_t cgtime, gtime;
39690 unsigned long rsslim = 0;
39691 - char tcomm[sizeof(task->comm)];
39692 + char tcomm[sizeof(task->comm)] = { 0 };
39693 unsigned long flags;
39694
39695 + pax_track_stack();
39696 +
39697 state = *get_task_state(task);
39698 vsize = eip = esp = 0;
39699 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
39700 @@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
39701 gtime = task->gtime;
39702 }
39703
39704 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39705 + if (PAX_RAND_FLAGS(mm)) {
39706 + eip = 0;
39707 + esp = 0;
39708 + wchan = 0;
39709 + }
39710 +#endif
39711 +#ifdef CONFIG_GRKERNSEC_HIDESYM
39712 + wchan = 0;
39713 + eip =0;
39714 + esp =0;
39715 +#endif
39716 +
39717 /* scale priority and nice values from timeslices to -20..20 */
39718 /* to make it look like a "normal" Unix priority/nice value */
39719 priority = task_prio(task);
39720 @@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
39721 vsize,
39722 mm ? get_mm_rss(mm) : 0,
39723 rsslim,
39724 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39725 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
39726 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
39727 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
39728 +#else
39729 mm ? (permitted ? mm->start_code : 1) : 0,
39730 mm ? (permitted ? mm->end_code : 1) : 0,
39731 (permitted && mm) ? mm->start_stack : 0,
39732 +#endif
39733 esp,
39734 eip,
39735 /* The signal information here is obsolete.
39736 @@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
39737
39738 return 0;
39739 }
39740 +
39741 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
39742 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
39743 +{
39744 + u32 curr_ip = 0;
39745 + unsigned long flags;
39746 +
39747 + if (lock_task_sighand(task, &flags)) {
39748 + curr_ip = task->signal->curr_ip;
39749 + unlock_task_sighand(task, &flags);
39750 + }
39751 +
39752 + return sprintf(buffer, "%pI4\n", &curr_ip);
39753 +}
39754 +#endif
39755 diff -urNp linux-2.6.39.4/fs/proc/base.c linux-2.6.39.4/fs/proc/base.c
39756 --- linux-2.6.39.4/fs/proc/base.c 2011-08-05 21:11:51.000000000 -0400
39757 +++ linux-2.6.39.4/fs/proc/base.c 2011-08-05 21:13:18.000000000 -0400
39758 @@ -104,6 +104,22 @@ struct pid_entry {
39759 union proc_op op;
39760 };
39761
39762 +struct getdents_callback {
39763 + struct linux_dirent __user * current_dir;
39764 + struct linux_dirent __user * previous;
39765 + struct file * file;
39766 + int count;
39767 + int error;
39768 +};
39769 +
39770 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
39771 + loff_t offset, u64 ino, unsigned int d_type)
39772 +{
39773 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
39774 + buf->error = -EINVAL;
39775 + return 0;
39776 +}
39777 +
39778 #define NOD(NAME, MODE, IOP, FOP, OP) { \
39779 .name = (NAME), \
39780 .len = sizeof(NAME) - 1, \
39781 @@ -206,6 +222,9 @@ static struct mm_struct *__check_mem_per
39782 if (task == current)
39783 return mm;
39784
39785 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
39786 + return ERR_PTR(-EPERM);
39787 +
39788 /*
39789 * If current is actively ptrace'ing, and would also be
39790 * permitted to freshly attach with ptrace now, permit it.
39791 @@ -279,6 +298,9 @@ static int proc_pid_cmdline(struct task_
39792 if (!mm->arg_end)
39793 goto out_mm; /* Shh! No looking before we're done */
39794
39795 + if (gr_acl_handle_procpidmem(task))
39796 + goto out_mm;
39797 +
39798 len = mm->arg_end - mm->arg_start;
39799
39800 if (len > PAGE_SIZE)
39801 @@ -306,12 +328,28 @@ out:
39802 return res;
39803 }
39804
39805 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39806 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
39807 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
39808 + _mm->pax_flags & MF_PAX_SEGMEXEC))
39809 +#endif
39810 +
39811 static int proc_pid_auxv(struct task_struct *task, char *buffer)
39812 {
39813 struct mm_struct *mm = mm_for_maps(task);
39814 int res = PTR_ERR(mm);
39815 if (mm && !IS_ERR(mm)) {
39816 unsigned int nwords = 0;
39817 +
39818 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39819 + /* allow if we're currently ptracing this task */
39820 + if (PAX_RAND_FLAGS(mm) &&
39821 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
39822 + mmput(mm);
39823 + return res;
39824 + }
39825 +#endif
39826 +
39827 do {
39828 nwords += 2;
39829 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
39830 @@ -325,7 +363,7 @@ static int proc_pid_auxv(struct task_str
39831 }
39832
39833
39834 -#ifdef CONFIG_KALLSYMS
39835 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39836 /*
39837 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
39838 * Returns the resolved symbol. If that fails, simply return the address.
39839 @@ -364,7 +402,7 @@ static void unlock_trace(struct task_str
39840 mutex_unlock(&task->signal->cred_guard_mutex);
39841 }
39842
39843 -#ifdef CONFIG_STACKTRACE
39844 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39845
39846 #define MAX_STACK_TRACE_DEPTH 64
39847
39848 @@ -555,7 +593,7 @@ static int proc_pid_limits(struct task_s
39849 return count;
39850 }
39851
39852 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
39853 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
39854 static int proc_pid_syscall(struct task_struct *task, char *buffer)
39855 {
39856 long nr;
39857 @@ -584,7 +622,7 @@ static int proc_pid_syscall(struct task_
39858 /************************************************************************/
39859
39860 /* permission checks */
39861 -static int proc_fd_access_allowed(struct inode *inode)
39862 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
39863 {
39864 struct task_struct *task;
39865 int allowed = 0;
39866 @@ -594,7 +632,10 @@ static int proc_fd_access_allowed(struct
39867 */
39868 task = get_proc_task(inode);
39869 if (task) {
39870 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
39871 + if (log)
39872 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
39873 + else
39874 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
39875 put_task_struct(task);
39876 }
39877 return allowed;
39878 @@ -973,6 +1014,9 @@ static ssize_t environ_read(struct file
39879 if (!task)
39880 goto out_no_task;
39881
39882 + if (gr_acl_handle_procpidmem(task))
39883 + goto out;
39884 +
39885 ret = -ENOMEM;
39886 page = (char *)__get_free_page(GFP_TEMPORARY);
39887 if (!page)
39888 @@ -1660,7 +1704,7 @@ static void *proc_pid_follow_link(struct
39889 path_put(&nd->path);
39890
39891 /* Are we allowed to snoop on the tasks file descriptors? */
39892 - if (!proc_fd_access_allowed(inode))
39893 + if (!proc_fd_access_allowed(inode,0))
39894 goto out;
39895
39896 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
39897 @@ -1699,8 +1743,18 @@ static int proc_pid_readlink(struct dent
39898 struct path path;
39899
39900 /* Are we allowed to snoop on the tasks file descriptors? */
39901 - if (!proc_fd_access_allowed(inode))
39902 - goto out;
39903 + /* logging this is needed for learning on chromium to work properly,
39904 + but we don't want to flood the logs from 'ps' which does a readlink
39905 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
39906 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
39907 + */
39908 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
39909 + if (!proc_fd_access_allowed(inode,0))
39910 + goto out;
39911 + } else {
39912 + if (!proc_fd_access_allowed(inode,1))
39913 + goto out;
39914 + }
39915
39916 error = PROC_I(inode)->op.proc_get_link(inode, &path);
39917 if (error)
39918 @@ -1766,7 +1820,11 @@ static struct inode *proc_pid_make_inode
39919 rcu_read_lock();
39920 cred = __task_cred(task);
39921 inode->i_uid = cred->euid;
39922 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39923 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39924 +#else
39925 inode->i_gid = cred->egid;
39926 +#endif
39927 rcu_read_unlock();
39928 }
39929 security_task_to_inode(task, inode);
39930 @@ -1784,6 +1842,9 @@ static int pid_getattr(struct vfsmount *
39931 struct inode *inode = dentry->d_inode;
39932 struct task_struct *task;
39933 const struct cred *cred;
39934 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39935 + const struct cred *tmpcred = current_cred();
39936 +#endif
39937
39938 generic_fillattr(inode, stat);
39939
39940 @@ -1791,13 +1852,41 @@ static int pid_getattr(struct vfsmount *
39941 stat->uid = 0;
39942 stat->gid = 0;
39943 task = pid_task(proc_pid(inode), PIDTYPE_PID);
39944 +
39945 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
39946 + rcu_read_unlock();
39947 + return -ENOENT;
39948 + }
39949 +
39950 if (task) {
39951 + cred = __task_cred(task);
39952 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39953 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
39954 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39955 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
39956 +#endif
39957 + ) {
39958 +#endif
39959 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
39960 +#ifdef CONFIG_GRKERNSEC_PROC_USER
39961 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
39962 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39963 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
39964 +#endif
39965 task_dumpable(task)) {
39966 - cred = __task_cred(task);
39967 stat->uid = cred->euid;
39968 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39969 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
39970 +#else
39971 stat->gid = cred->egid;
39972 +#endif
39973 }
39974 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39975 + } else {
39976 + rcu_read_unlock();
39977 + return -ENOENT;
39978 + }
39979 +#endif
39980 }
39981 rcu_read_unlock();
39982 return 0;
39983 @@ -1834,11 +1923,20 @@ static int pid_revalidate(struct dentry
39984
39985 if (task) {
39986 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
39987 +#ifdef CONFIG_GRKERNSEC_PROC_USER
39988 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
39989 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39990 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
39991 +#endif
39992 task_dumpable(task)) {
39993 rcu_read_lock();
39994 cred = __task_cred(task);
39995 inode->i_uid = cred->euid;
39996 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39997 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39998 +#else
39999 inode->i_gid = cred->egid;
40000 +#endif
40001 rcu_read_unlock();
40002 } else {
40003 inode->i_uid = 0;
40004 @@ -1959,7 +2057,8 @@ static int proc_fd_info(struct inode *in
40005 int fd = proc_fd(inode);
40006
40007 if (task) {
40008 - files = get_files_struct(task);
40009 + if (!gr_acl_handle_procpidmem(task))
40010 + files = get_files_struct(task);
40011 put_task_struct(task);
40012 }
40013 if (files) {
40014 @@ -2219,15 +2318,25 @@ static const struct file_operations proc
40015 */
40016 static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
40017 {
40018 + struct task_struct *task;
40019 int rv;
40020
40021 if (flags & IPERM_FLAG_RCU)
40022 return -ECHILD;
40023 rv = generic_permission(inode, mask, flags, NULL);
40024 - if (rv == 0)
40025 - return 0;
40026 +
40027 if (task_pid(current) == proc_pid(inode))
40028 rv = 0;
40029 +
40030 + task = get_proc_task(inode);
40031 + if (task == NULL)
40032 + return rv;
40033 +
40034 + if (gr_acl_handle_procpidmem(task))
40035 + rv = -EACCES;
40036 +
40037 + put_task_struct(task);
40038 +
40039 return rv;
40040 }
40041
40042 @@ -2337,6 +2446,9 @@ static struct dentry *proc_pident_lookup
40043 if (!task)
40044 goto out_no_task;
40045
40046 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
40047 + goto out;
40048 +
40049 /*
40050 * Yes, it does not scale. And it should not. Don't add
40051 * new entries into /proc/<tgid>/ without very good reasons.
40052 @@ -2381,6 +2493,9 @@ static int proc_pident_readdir(struct fi
40053 if (!task)
40054 goto out_no_task;
40055
40056 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
40057 + goto out;
40058 +
40059 ret = 0;
40060 i = filp->f_pos;
40061 switch (i) {
40062 @@ -2651,7 +2766,7 @@ static void *proc_self_follow_link(struc
40063 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
40064 void *cookie)
40065 {
40066 - char *s = nd_get_link(nd);
40067 + const char *s = nd_get_link(nd);
40068 if (!IS_ERR(s))
40069 __putname(s);
40070 }
40071 @@ -2838,7 +2953,7 @@ static const struct pid_entry tgid_base_
40072 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
40073 #endif
40074 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
40075 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
40076 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
40077 INF("syscall", S_IRUGO, proc_pid_syscall),
40078 #endif
40079 INF("cmdline", S_IRUGO, proc_pid_cmdline),
40080 @@ -2863,10 +2978,10 @@ static const struct pid_entry tgid_base_
40081 #ifdef CONFIG_SECURITY
40082 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
40083 #endif
40084 -#ifdef CONFIG_KALLSYMS
40085 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40086 INF("wchan", S_IRUGO, proc_pid_wchan),
40087 #endif
40088 -#ifdef CONFIG_STACKTRACE
40089 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40090 ONE("stack", S_IRUGO, proc_pid_stack),
40091 #endif
40092 #ifdef CONFIG_SCHEDSTATS
40093 @@ -2897,6 +3012,9 @@ static const struct pid_entry tgid_base_
40094 #ifdef CONFIG_TASK_IO_ACCOUNTING
40095 INF("io", S_IRUSR, proc_tgid_io_accounting),
40096 #endif
40097 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
40098 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
40099 +#endif
40100 };
40101
40102 static int proc_tgid_base_readdir(struct file * filp,
40103 @@ -3022,7 +3140,14 @@ static struct dentry *proc_pid_instantia
40104 if (!inode)
40105 goto out;
40106
40107 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40108 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
40109 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40110 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
40111 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
40112 +#else
40113 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
40114 +#endif
40115 inode->i_op = &proc_tgid_base_inode_operations;
40116 inode->i_fop = &proc_tgid_base_operations;
40117 inode->i_flags|=S_IMMUTABLE;
40118 @@ -3064,7 +3189,11 @@ struct dentry *proc_pid_lookup(struct in
40119 if (!task)
40120 goto out;
40121
40122 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
40123 + goto out_put_task;
40124 +
40125 result = proc_pid_instantiate(dir, dentry, task, NULL);
40126 +out_put_task:
40127 put_task_struct(task);
40128 out:
40129 return result;
40130 @@ -3129,6 +3258,11 @@ int proc_pid_readdir(struct file * filp,
40131 {
40132 unsigned int nr;
40133 struct task_struct *reaper;
40134 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40135 + const struct cred *tmpcred = current_cred();
40136 + const struct cred *itercred;
40137 +#endif
40138 + filldir_t __filldir = filldir;
40139 struct tgid_iter iter;
40140 struct pid_namespace *ns;
40141
40142 @@ -3152,8 +3286,27 @@ int proc_pid_readdir(struct file * filp,
40143 for (iter = next_tgid(ns, iter);
40144 iter.task;
40145 iter.tgid += 1, iter = next_tgid(ns, iter)) {
40146 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40147 + rcu_read_lock();
40148 + itercred = __task_cred(iter.task);
40149 +#endif
40150 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
40151 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40152 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
40153 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
40154 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
40155 +#endif
40156 + )
40157 +#endif
40158 + )
40159 + __filldir = &gr_fake_filldir;
40160 + else
40161 + __filldir = filldir;
40162 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40163 + rcu_read_unlock();
40164 +#endif
40165 filp->f_pos = iter.tgid + TGID_OFFSET;
40166 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
40167 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
40168 put_task_struct(iter.task);
40169 goto out;
40170 }
40171 @@ -3180,7 +3333,7 @@ static const struct pid_entry tid_base_s
40172 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
40173 #endif
40174 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
40175 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
40176 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
40177 INF("syscall", S_IRUGO, proc_pid_syscall),
40178 #endif
40179 INF("cmdline", S_IRUGO, proc_pid_cmdline),
40180 @@ -3204,10 +3357,10 @@ static const struct pid_entry tid_base_s
40181 #ifdef CONFIG_SECURITY
40182 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
40183 #endif
40184 -#ifdef CONFIG_KALLSYMS
40185 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40186 INF("wchan", S_IRUGO, proc_pid_wchan),
40187 #endif
40188 -#ifdef CONFIG_STACKTRACE
40189 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40190 ONE("stack", S_IRUGO, proc_pid_stack),
40191 #endif
40192 #ifdef CONFIG_SCHEDSTATS
40193 diff -urNp linux-2.6.39.4/fs/proc/cmdline.c linux-2.6.39.4/fs/proc/cmdline.c
40194 --- linux-2.6.39.4/fs/proc/cmdline.c 2011-05-19 00:06:34.000000000 -0400
40195 +++ linux-2.6.39.4/fs/proc/cmdline.c 2011-08-05 19:44:37.000000000 -0400
40196 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
40197
40198 static int __init proc_cmdline_init(void)
40199 {
40200 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40201 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
40202 +#else
40203 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
40204 +#endif
40205 return 0;
40206 }
40207 module_init(proc_cmdline_init);
40208 diff -urNp linux-2.6.39.4/fs/proc/devices.c linux-2.6.39.4/fs/proc/devices.c
40209 --- linux-2.6.39.4/fs/proc/devices.c 2011-05-19 00:06:34.000000000 -0400
40210 +++ linux-2.6.39.4/fs/proc/devices.c 2011-08-05 19:44:37.000000000 -0400
40211 @@ -64,7 +64,11 @@ static const struct file_operations proc
40212
40213 static int __init proc_devices_init(void)
40214 {
40215 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40216 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
40217 +#else
40218 proc_create("devices", 0, NULL, &proc_devinfo_operations);
40219 +#endif
40220 return 0;
40221 }
40222 module_init(proc_devices_init);
40223 diff -urNp linux-2.6.39.4/fs/proc/inode.c linux-2.6.39.4/fs/proc/inode.c
40224 --- linux-2.6.39.4/fs/proc/inode.c 2011-05-19 00:06:34.000000000 -0400
40225 +++ linux-2.6.39.4/fs/proc/inode.c 2011-08-05 19:44:37.000000000 -0400
40226 @@ -433,7 +433,11 @@ struct inode *proc_get_inode(struct supe
40227 if (de->mode) {
40228 inode->i_mode = de->mode;
40229 inode->i_uid = de->uid;
40230 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
40231 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
40232 +#else
40233 inode->i_gid = de->gid;
40234 +#endif
40235 }
40236 if (de->size)
40237 inode->i_size = de->size;
40238 diff -urNp linux-2.6.39.4/fs/proc/internal.h linux-2.6.39.4/fs/proc/internal.h
40239 --- linux-2.6.39.4/fs/proc/internal.h 2011-05-19 00:06:34.000000000 -0400
40240 +++ linux-2.6.39.4/fs/proc/internal.h 2011-08-05 19:44:37.000000000 -0400
40241 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
40242 struct pid *pid, struct task_struct *task);
40243 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
40244 struct pid *pid, struct task_struct *task);
40245 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
40246 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
40247 +#endif
40248 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
40249
40250 extern const struct file_operations proc_maps_operations;
40251 diff -urNp linux-2.6.39.4/fs/proc/Kconfig linux-2.6.39.4/fs/proc/Kconfig
40252 --- linux-2.6.39.4/fs/proc/Kconfig 2011-05-19 00:06:34.000000000 -0400
40253 +++ linux-2.6.39.4/fs/proc/Kconfig 2011-08-05 19:44:37.000000000 -0400
40254 @@ -30,12 +30,12 @@ config PROC_FS
40255
40256 config PROC_KCORE
40257 bool "/proc/kcore support" if !ARM
40258 - depends on PROC_FS && MMU
40259 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
40260
40261 config PROC_VMCORE
40262 bool "/proc/vmcore support"
40263 - depends on PROC_FS && CRASH_DUMP
40264 - default y
40265 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
40266 + default n
40267 help
40268 Exports the dump image of crashed kernel in ELF format.
40269
40270 @@ -59,8 +59,8 @@ config PROC_SYSCTL
40271 limited in memory.
40272
40273 config PROC_PAGE_MONITOR
40274 - default y
40275 - depends on PROC_FS && MMU
40276 + default n
40277 + depends on PROC_FS && MMU && !GRKERNSEC
40278 bool "Enable /proc page monitoring" if EXPERT
40279 help
40280 Various /proc files exist to monitor process memory utilization:
40281 diff -urNp linux-2.6.39.4/fs/proc/kcore.c linux-2.6.39.4/fs/proc/kcore.c
40282 --- linux-2.6.39.4/fs/proc/kcore.c 2011-05-19 00:06:34.000000000 -0400
40283 +++ linux-2.6.39.4/fs/proc/kcore.c 2011-08-05 19:44:37.000000000 -0400
40284 @@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
40285 off_t offset = 0;
40286 struct kcore_list *m;
40287
40288 + pax_track_stack();
40289 +
40290 /* setup ELF header */
40291 elf = (struct elfhdr *) bufp;
40292 bufp += sizeof(struct elfhdr);
40293 @@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
40294 * the addresses in the elf_phdr on our list.
40295 */
40296 start = kc_offset_to_vaddr(*fpos - elf_buflen);
40297 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
40298 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
40299 + if (tsz > buflen)
40300 tsz = buflen;
40301 -
40302 +
40303 while (buflen) {
40304 struct kcore_list *m;
40305
40306 @@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
40307 kfree(elf_buf);
40308 } else {
40309 if (kern_addr_valid(start)) {
40310 - unsigned long n;
40311 + char *elf_buf;
40312 + mm_segment_t oldfs;
40313
40314 - n = copy_to_user(buffer, (char *)start, tsz);
40315 - /*
40316 - * We cannot distingush between fault on source
40317 - * and fault on destination. When this happens
40318 - * we clear too and hope it will trigger the
40319 - * EFAULT again.
40320 - */
40321 - if (n) {
40322 - if (clear_user(buffer + tsz - n,
40323 - n))
40324 + elf_buf = kmalloc(tsz, GFP_KERNEL);
40325 + if (!elf_buf)
40326 + return -ENOMEM;
40327 + oldfs = get_fs();
40328 + set_fs(KERNEL_DS);
40329 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
40330 + set_fs(oldfs);
40331 + if (copy_to_user(buffer, elf_buf, tsz)) {
40332 + kfree(elf_buf);
40333 return -EFAULT;
40334 + }
40335 }
40336 + set_fs(oldfs);
40337 + kfree(elf_buf);
40338 } else {
40339 if (clear_user(buffer, tsz))
40340 return -EFAULT;
40341 @@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
40342
40343 static int open_kcore(struct inode *inode, struct file *filp)
40344 {
40345 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
40346 + return -EPERM;
40347 +#endif
40348 if (!capable(CAP_SYS_RAWIO))
40349 return -EPERM;
40350 if (kcore_need_update)
40351 diff -urNp linux-2.6.39.4/fs/proc/meminfo.c linux-2.6.39.4/fs/proc/meminfo.c
40352 --- linux-2.6.39.4/fs/proc/meminfo.c 2011-05-19 00:06:34.000000000 -0400
40353 +++ linux-2.6.39.4/fs/proc/meminfo.c 2011-08-05 19:44:37.000000000 -0400
40354 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
40355 unsigned long pages[NR_LRU_LISTS];
40356 int lru;
40357
40358 + pax_track_stack();
40359 +
40360 /*
40361 * display in kilobytes.
40362 */
40363 @@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
40364 vmi.used >> 10,
40365 vmi.largest_chunk >> 10
40366 #ifdef CONFIG_MEMORY_FAILURE
40367 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
40368 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
40369 #endif
40370 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
40371 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
40372 diff -urNp linux-2.6.39.4/fs/proc/nommu.c linux-2.6.39.4/fs/proc/nommu.c
40373 --- linux-2.6.39.4/fs/proc/nommu.c 2011-05-19 00:06:34.000000000 -0400
40374 +++ linux-2.6.39.4/fs/proc/nommu.c 2011-08-05 19:44:37.000000000 -0400
40375 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
40376 if (len < 1)
40377 len = 1;
40378 seq_printf(m, "%*c", len, ' ');
40379 - seq_path(m, &file->f_path, "");
40380 + seq_path(m, &file->f_path, "\n\\");
40381 }
40382
40383 seq_putc(m, '\n');
40384 diff -urNp linux-2.6.39.4/fs/proc/proc_net.c linux-2.6.39.4/fs/proc/proc_net.c
40385 --- linux-2.6.39.4/fs/proc/proc_net.c 2011-05-19 00:06:34.000000000 -0400
40386 +++ linux-2.6.39.4/fs/proc/proc_net.c 2011-08-05 19:44:37.000000000 -0400
40387 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
40388 struct task_struct *task;
40389 struct nsproxy *ns;
40390 struct net *net = NULL;
40391 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40392 + const struct cred *cred = current_cred();
40393 +#endif
40394 +
40395 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40396 + if (cred->fsuid)
40397 + return net;
40398 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40399 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
40400 + return net;
40401 +#endif
40402
40403 rcu_read_lock();
40404 task = pid_task(proc_pid(dir), PIDTYPE_PID);
40405 diff -urNp linux-2.6.39.4/fs/proc/proc_sysctl.c linux-2.6.39.4/fs/proc/proc_sysctl.c
40406 --- linux-2.6.39.4/fs/proc/proc_sysctl.c 2011-05-19 00:06:34.000000000 -0400
40407 +++ linux-2.6.39.4/fs/proc/proc_sysctl.c 2011-08-05 19:44:37.000000000 -0400
40408 @@ -8,6 +8,8 @@
40409 #include <linux/namei.h>
40410 #include "internal.h"
40411
40412 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
40413 +
40414 static const struct dentry_operations proc_sys_dentry_operations;
40415 static const struct file_operations proc_sys_file_operations;
40416 static const struct inode_operations proc_sys_inode_operations;
40417 @@ -111,6 +113,9 @@ static struct dentry *proc_sys_lookup(st
40418 if (!p)
40419 goto out;
40420
40421 + if (gr_handle_sysctl(p, MAY_EXEC))
40422 + goto out;
40423 +
40424 err = ERR_PTR(-ENOMEM);
40425 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
40426 if (h)
40427 @@ -230,6 +235,9 @@ static int scan(struct ctl_table_header
40428 if (*pos < file->f_pos)
40429 continue;
40430
40431 + if (gr_handle_sysctl(table, 0))
40432 + continue;
40433 +
40434 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
40435 if (res)
40436 return res;
40437 @@ -358,6 +366,9 @@ static int proc_sys_getattr(struct vfsmo
40438 if (IS_ERR(head))
40439 return PTR_ERR(head);
40440
40441 + if (table && gr_handle_sysctl(table, MAY_EXEC))
40442 + return -ENOENT;
40443 +
40444 generic_fillattr(inode, stat);
40445 if (table)
40446 stat->mode = (stat->mode & S_IFMT) | table->mode;
40447 diff -urNp linux-2.6.39.4/fs/proc/root.c linux-2.6.39.4/fs/proc/root.c
40448 --- linux-2.6.39.4/fs/proc/root.c 2011-05-19 00:06:34.000000000 -0400
40449 +++ linux-2.6.39.4/fs/proc/root.c 2011-08-05 19:44:37.000000000 -0400
40450 @@ -122,7 +122,15 @@ void __init proc_root_init(void)
40451 #ifdef CONFIG_PROC_DEVICETREE
40452 proc_device_tree_init();
40453 #endif
40454 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40455 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40456 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
40457 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40458 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40459 +#endif
40460 +#else
40461 proc_mkdir("bus", NULL);
40462 +#endif
40463 proc_sys_init();
40464 }
40465
40466 diff -urNp linux-2.6.39.4/fs/proc/task_mmu.c linux-2.6.39.4/fs/proc/task_mmu.c
40467 --- linux-2.6.39.4/fs/proc/task_mmu.c 2011-05-19 00:06:34.000000000 -0400
40468 +++ linux-2.6.39.4/fs/proc/task_mmu.c 2011-08-05 19:44:37.000000000 -0400
40469 @@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
40470 "VmExe:\t%8lu kB\n"
40471 "VmLib:\t%8lu kB\n"
40472 "VmPTE:\t%8lu kB\n"
40473 - "VmSwap:\t%8lu kB\n",
40474 - hiwater_vm << (PAGE_SHIFT-10),
40475 + "VmSwap:\t%8lu kB\n"
40476 +
40477 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40478 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
40479 +#endif
40480 +
40481 + ,hiwater_vm << (PAGE_SHIFT-10),
40482 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
40483 mm->locked_vm << (PAGE_SHIFT-10),
40484 hiwater_rss << (PAGE_SHIFT-10),
40485 @@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
40486 data << (PAGE_SHIFT-10),
40487 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
40488 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
40489 - swap << (PAGE_SHIFT-10));
40490 + swap << (PAGE_SHIFT-10)
40491 +
40492 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40493 + , mm->context.user_cs_base, mm->context.user_cs_limit
40494 +#endif
40495 +
40496 + );
40497 }
40498
40499 unsigned long task_vsize(struct mm_struct *mm)
40500 @@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
40501 return ret;
40502 }
40503
40504 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40505 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
40506 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
40507 + _mm->pax_flags & MF_PAX_SEGMEXEC))
40508 +#endif
40509 +
40510 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
40511 {
40512 struct mm_struct *mm = vma->vm_mm;
40513 @@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
40514 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
40515 }
40516
40517 - /* We don't show the stack guard page in /proc/maps */
40518 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40519 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
40520 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
40521 +#else
40522 start = vma->vm_start;
40523 - if (stack_guard_page_start(vma, start))
40524 - start += PAGE_SIZE;
40525 end = vma->vm_end;
40526 - if (stack_guard_page_end(vma, end))
40527 - end -= PAGE_SIZE;
40528 +#endif
40529
40530 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
40531 start,
40532 @@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
40533 flags & VM_WRITE ? 'w' : '-',
40534 flags & VM_EXEC ? 'x' : '-',
40535 flags & VM_MAYSHARE ? 's' : 'p',
40536 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40537 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
40538 +#else
40539 pgoff,
40540 +#endif
40541 MAJOR(dev), MINOR(dev), ino, &len);
40542
40543 /*
40544 @@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
40545 */
40546 if (file) {
40547 pad_len_spaces(m, len);
40548 - seq_path(m, &file->f_path, "\n");
40549 + seq_path(m, &file->f_path, "\n\\");
40550 } else {
40551 const char *name = arch_vma_name(vma);
40552 if (!name) {
40553 @@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
40554 if (vma->vm_start <= mm->brk &&
40555 vma->vm_end >= mm->start_brk) {
40556 name = "[heap]";
40557 - } else if (vma->vm_start <= mm->start_stack &&
40558 - vma->vm_end >= mm->start_stack) {
40559 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
40560 + (vma->vm_start <= mm->start_stack &&
40561 + vma->vm_end >= mm->start_stack)) {
40562 name = "[stack]";
40563 }
40564 } else {
40565 @@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
40566 };
40567
40568 memset(&mss, 0, sizeof mss);
40569 - mss.vma = vma;
40570 - /* mmap_sem is held in m_start */
40571 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
40572 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
40573 -
40574 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40575 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
40576 +#endif
40577 + mss.vma = vma;
40578 + /* mmap_sem is held in m_start */
40579 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
40580 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
40581 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40582 + }
40583 +#endif
40584 show_map_vma(m, vma);
40585
40586 seq_printf(m,
40587 @@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
40588 "KernelPageSize: %8lu kB\n"
40589 "MMUPageSize: %8lu kB\n"
40590 "Locked: %8lu kB\n",
40591 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40592 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
40593 +#else
40594 (vma->vm_end - vma->vm_start) >> 10,
40595 +#endif
40596 mss.resident >> 10,
40597 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
40598 mss.shared_clean >> 10,
40599 diff -urNp linux-2.6.39.4/fs/proc/task_nommu.c linux-2.6.39.4/fs/proc/task_nommu.c
40600 --- linux-2.6.39.4/fs/proc/task_nommu.c 2011-05-19 00:06:34.000000000 -0400
40601 +++ linux-2.6.39.4/fs/proc/task_nommu.c 2011-08-05 19:44:37.000000000 -0400
40602 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
40603 else
40604 bytes += kobjsize(mm);
40605
40606 - if (current->fs && current->fs->users > 1)
40607 + if (current->fs && atomic_read(&current->fs->users) > 1)
40608 sbytes += kobjsize(current->fs);
40609 else
40610 bytes += kobjsize(current->fs);
40611 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
40612
40613 if (file) {
40614 pad_len_spaces(m, len);
40615 - seq_path(m, &file->f_path, "");
40616 + seq_path(m, &file->f_path, "\n\\");
40617 } else if (mm) {
40618 if (vma->vm_start <= mm->start_stack &&
40619 vma->vm_end >= mm->start_stack) {
40620 diff -urNp linux-2.6.39.4/fs/quota/netlink.c linux-2.6.39.4/fs/quota/netlink.c
40621 --- linux-2.6.39.4/fs/quota/netlink.c 2011-05-19 00:06:34.000000000 -0400
40622 +++ linux-2.6.39.4/fs/quota/netlink.c 2011-08-05 19:44:37.000000000 -0400
40623 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
40624 void quota_send_warning(short type, unsigned int id, dev_t dev,
40625 const char warntype)
40626 {
40627 - static atomic_t seq;
40628 + static atomic_unchecked_t seq;
40629 struct sk_buff *skb;
40630 void *msg_head;
40631 int ret;
40632 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
40633 "VFS: Not enough memory to send quota warning.\n");
40634 return;
40635 }
40636 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
40637 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
40638 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
40639 if (!msg_head) {
40640 printk(KERN_ERR
40641 diff -urNp linux-2.6.39.4/fs/readdir.c linux-2.6.39.4/fs/readdir.c
40642 --- linux-2.6.39.4/fs/readdir.c 2011-05-19 00:06:34.000000000 -0400
40643 +++ linux-2.6.39.4/fs/readdir.c 2011-08-05 19:44:37.000000000 -0400
40644 @@ -17,6 +17,7 @@
40645 #include <linux/security.h>
40646 #include <linux/syscalls.h>
40647 #include <linux/unistd.h>
40648 +#include <linux/namei.h>
40649
40650 #include <asm/uaccess.h>
40651
40652 @@ -67,6 +68,7 @@ struct old_linux_dirent {
40653
40654 struct readdir_callback {
40655 struct old_linux_dirent __user * dirent;
40656 + struct file * file;
40657 int result;
40658 };
40659
40660 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
40661 buf->result = -EOVERFLOW;
40662 return -EOVERFLOW;
40663 }
40664 +
40665 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40666 + return 0;
40667 +
40668 buf->result++;
40669 dirent = buf->dirent;
40670 if (!access_ok(VERIFY_WRITE, dirent,
40671 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
40672
40673 buf.result = 0;
40674 buf.dirent = dirent;
40675 + buf.file = file;
40676
40677 error = vfs_readdir(file, fillonedir, &buf);
40678 if (buf.result)
40679 @@ -142,6 +149,7 @@ struct linux_dirent {
40680 struct getdents_callback {
40681 struct linux_dirent __user * current_dir;
40682 struct linux_dirent __user * previous;
40683 + struct file * file;
40684 int count;
40685 int error;
40686 };
40687 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
40688 buf->error = -EOVERFLOW;
40689 return -EOVERFLOW;
40690 }
40691 +
40692 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40693 + return 0;
40694 +
40695 dirent = buf->previous;
40696 if (dirent) {
40697 if (__put_user(offset, &dirent->d_off))
40698 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
40699 buf.previous = NULL;
40700 buf.count = count;
40701 buf.error = 0;
40702 + buf.file = file;
40703
40704 error = vfs_readdir(file, filldir, &buf);
40705 if (error >= 0)
40706 @@ -229,6 +242,7 @@ out:
40707 struct getdents_callback64 {
40708 struct linux_dirent64 __user * current_dir;
40709 struct linux_dirent64 __user * previous;
40710 + struct file *file;
40711 int count;
40712 int error;
40713 };
40714 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
40715 buf->error = -EINVAL; /* only used if we fail.. */
40716 if (reclen > buf->count)
40717 return -EINVAL;
40718 +
40719 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40720 + return 0;
40721 +
40722 dirent = buf->previous;
40723 if (dirent) {
40724 if (__put_user(offset, &dirent->d_off))
40725 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
40726
40727 buf.current_dir = dirent;
40728 buf.previous = NULL;
40729 + buf.file = file;
40730 buf.count = count;
40731 buf.error = 0;
40732
40733 diff -urNp linux-2.6.39.4/fs/reiserfs/dir.c linux-2.6.39.4/fs/reiserfs/dir.c
40734 --- linux-2.6.39.4/fs/reiserfs/dir.c 2011-05-19 00:06:34.000000000 -0400
40735 +++ linux-2.6.39.4/fs/reiserfs/dir.c 2011-08-05 19:44:37.000000000 -0400
40736 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
40737 struct reiserfs_dir_entry de;
40738 int ret = 0;
40739
40740 + pax_track_stack();
40741 +
40742 reiserfs_write_lock(inode->i_sb);
40743
40744 reiserfs_check_lock_depth(inode->i_sb, "readdir");
40745 diff -urNp linux-2.6.39.4/fs/reiserfs/do_balan.c linux-2.6.39.4/fs/reiserfs/do_balan.c
40746 --- linux-2.6.39.4/fs/reiserfs/do_balan.c 2011-05-19 00:06:34.000000000 -0400
40747 +++ linux-2.6.39.4/fs/reiserfs/do_balan.c 2011-08-05 19:44:37.000000000 -0400
40748 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
40749 return;
40750 }
40751
40752 - atomic_inc(&(fs_generation(tb->tb_sb)));
40753 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
40754 do_balance_starts(tb);
40755
40756 /* balance leaf returns 0 except if combining L R and S into
40757 diff -urNp linux-2.6.39.4/fs/reiserfs/journal.c linux-2.6.39.4/fs/reiserfs/journal.c
40758 --- linux-2.6.39.4/fs/reiserfs/journal.c 2011-05-19 00:06:34.000000000 -0400
40759 +++ linux-2.6.39.4/fs/reiserfs/journal.c 2011-08-05 19:44:37.000000000 -0400
40760 @@ -2299,6 +2299,8 @@ static struct buffer_head *reiserfs_brea
40761 struct buffer_head *bh;
40762 int i, j;
40763
40764 + pax_track_stack();
40765 +
40766 bh = __getblk(dev, block, bufsize);
40767 if (buffer_uptodate(bh))
40768 return (bh);
40769 diff -urNp linux-2.6.39.4/fs/reiserfs/namei.c linux-2.6.39.4/fs/reiserfs/namei.c
40770 --- linux-2.6.39.4/fs/reiserfs/namei.c 2011-05-19 00:06:34.000000000 -0400
40771 +++ linux-2.6.39.4/fs/reiserfs/namei.c 2011-08-05 19:44:37.000000000 -0400
40772 @@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
40773 unsigned long savelink = 1;
40774 struct timespec ctime;
40775
40776 + pax_track_stack();
40777 +
40778 /* three balancings: (1) old name removal, (2) new name insertion
40779 and (3) maybe "save" link insertion
40780 stat data updates: (1) old directory,
40781 diff -urNp linux-2.6.39.4/fs/reiserfs/procfs.c linux-2.6.39.4/fs/reiserfs/procfs.c
40782 --- linux-2.6.39.4/fs/reiserfs/procfs.c 2011-05-19 00:06:34.000000000 -0400
40783 +++ linux-2.6.39.4/fs/reiserfs/procfs.c 2011-08-05 19:44:37.000000000 -0400
40784 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
40785 "SMALL_TAILS " : "NO_TAILS ",
40786 replay_only(sb) ? "REPLAY_ONLY " : "",
40787 convert_reiserfs(sb) ? "CONV " : "",
40788 - atomic_read(&r->s_generation_counter),
40789 + atomic_read_unchecked(&r->s_generation_counter),
40790 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
40791 SF(s_do_balance), SF(s_unneeded_left_neighbor),
40792 SF(s_good_search_by_key_reada), SF(s_bmaps),
40793 @@ -299,6 +299,8 @@ static int show_journal(struct seq_file
40794 struct journal_params *jp = &rs->s_v1.s_journal;
40795 char b[BDEVNAME_SIZE];
40796
40797 + pax_track_stack();
40798 +
40799 seq_printf(m, /* on-disk fields */
40800 "jp_journal_1st_block: \t%i\n"
40801 "jp_journal_dev: \t%s[%x]\n"
40802 diff -urNp linux-2.6.39.4/fs/reiserfs/stree.c linux-2.6.39.4/fs/reiserfs/stree.c
40803 --- linux-2.6.39.4/fs/reiserfs/stree.c 2011-05-19 00:06:34.000000000 -0400
40804 +++ linux-2.6.39.4/fs/reiserfs/stree.c 2011-08-05 19:44:37.000000000 -0400
40805 @@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
40806 int iter = 0;
40807 #endif
40808
40809 + pax_track_stack();
40810 +
40811 BUG_ON(!th->t_trans_id);
40812
40813 init_tb_struct(th, &s_del_balance, sb, path,
40814 @@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
40815 int retval;
40816 int quota_cut_bytes = 0;
40817
40818 + pax_track_stack();
40819 +
40820 BUG_ON(!th->t_trans_id);
40821
40822 le_key2cpu_key(&cpu_key, key);
40823 @@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
40824 int quota_cut_bytes;
40825 loff_t tail_pos = 0;
40826
40827 + pax_track_stack();
40828 +
40829 BUG_ON(!th->t_trans_id);
40830
40831 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
40832 @@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
40833 int retval;
40834 int fs_gen;
40835
40836 + pax_track_stack();
40837 +
40838 BUG_ON(!th->t_trans_id);
40839
40840 fs_gen = get_generation(inode->i_sb);
40841 @@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
40842 int fs_gen = 0;
40843 int quota_bytes = 0;
40844
40845 + pax_track_stack();
40846 +
40847 BUG_ON(!th->t_trans_id);
40848
40849 if (inode) { /* Do we count quotas for item? */
40850 diff -urNp linux-2.6.39.4/fs/reiserfs/super.c linux-2.6.39.4/fs/reiserfs/super.c
40851 --- linux-2.6.39.4/fs/reiserfs/super.c 2011-05-19 00:06:34.000000000 -0400
40852 +++ linux-2.6.39.4/fs/reiserfs/super.c 2011-08-05 19:44:37.000000000 -0400
40853 @@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
40854 {.option_name = NULL}
40855 };
40856
40857 + pax_track_stack();
40858 +
40859 *blocks = 0;
40860 if (!options || !*options)
40861 /* use default configuration: create tails, journaling on, no
40862 diff -urNp linux-2.6.39.4/fs/select.c linux-2.6.39.4/fs/select.c
40863 --- linux-2.6.39.4/fs/select.c 2011-05-19 00:06:34.000000000 -0400
40864 +++ linux-2.6.39.4/fs/select.c 2011-08-05 19:44:37.000000000 -0400
40865 @@ -20,6 +20,7 @@
40866 #include <linux/module.h>
40867 #include <linux/slab.h>
40868 #include <linux/poll.h>
40869 +#include <linux/security.h>
40870 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
40871 #include <linux/file.h>
40872 #include <linux/fdtable.h>
40873 @@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
40874 int retval, i, timed_out = 0;
40875 unsigned long slack = 0;
40876
40877 + pax_track_stack();
40878 +
40879 rcu_read_lock();
40880 retval = max_select_fd(n, fds);
40881 rcu_read_unlock();
40882 @@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
40883 /* Allocate small arguments on the stack to save memory and be faster */
40884 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
40885
40886 + pax_track_stack();
40887 +
40888 ret = -EINVAL;
40889 if (n < 0)
40890 goto out_nofds;
40891 @@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
40892 struct poll_list *walk = head;
40893 unsigned long todo = nfds;
40894
40895 + pax_track_stack();
40896 +
40897 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
40898 if (nfds > rlimit(RLIMIT_NOFILE))
40899 return -EINVAL;
40900
40901 diff -urNp linux-2.6.39.4/fs/seq_file.c linux-2.6.39.4/fs/seq_file.c
40902 --- linux-2.6.39.4/fs/seq_file.c 2011-05-19 00:06:34.000000000 -0400
40903 +++ linux-2.6.39.4/fs/seq_file.c 2011-08-05 20:34:06.000000000 -0400
40904 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
40905 return 0;
40906 }
40907 if (!m->buf) {
40908 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
40909 + m->size = PAGE_SIZE;
40910 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
40911 if (!m->buf)
40912 return -ENOMEM;
40913 }
40914 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
40915 Eoverflow:
40916 m->op->stop(m, p);
40917 kfree(m->buf);
40918 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
40919 + m->size <<= 1;
40920 + m->buf = kmalloc(m->size, GFP_KERNEL);
40921 return !m->buf ? -ENOMEM : -EAGAIN;
40922 }
40923
40924 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
40925 m->version = file->f_version;
40926 /* grab buffer if we didn't have one */
40927 if (!m->buf) {
40928 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
40929 + m->size = PAGE_SIZE;
40930 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
40931 if (!m->buf)
40932 goto Enomem;
40933 }
40934 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
40935 goto Fill;
40936 m->op->stop(m, p);
40937 kfree(m->buf);
40938 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
40939 + m->size <<= 1;
40940 + m->buf = kmalloc(m->size, GFP_KERNEL);
40941 if (!m->buf)
40942 goto Enomem;
40943 m->count = 0;
40944 @@ -549,7 +553,7 @@ static void single_stop(struct seq_file
40945 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
40946 void *data)
40947 {
40948 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
40949 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
40950 int res = -ENOMEM;
40951
40952 if (op) {
40953 diff -urNp linux-2.6.39.4/fs/splice.c linux-2.6.39.4/fs/splice.c
40954 --- linux-2.6.39.4/fs/splice.c 2011-05-19 00:06:34.000000000 -0400
40955 +++ linux-2.6.39.4/fs/splice.c 2011-08-05 19:44:37.000000000 -0400
40956 @@ -186,7 +186,7 @@ ssize_t splice_to_pipe(struct pipe_inode
40957 pipe_lock(pipe);
40958
40959 for (;;) {
40960 - if (!pipe->readers) {
40961 + if (!atomic_read(&pipe->readers)) {
40962 send_sig(SIGPIPE, current, 0);
40963 if (!ret)
40964 ret = -EPIPE;
40965 @@ -240,9 +240,9 @@ ssize_t splice_to_pipe(struct pipe_inode
40966 do_wakeup = 0;
40967 }
40968
40969 - pipe->waiting_writers++;
40970 + atomic_inc(&pipe->waiting_writers);
40971 pipe_wait(pipe);
40972 - pipe->waiting_writers--;
40973 + atomic_dec(&pipe->waiting_writers);
40974 }
40975
40976 pipe_unlock(pipe);
40977 @@ -316,6 +316,8 @@ __generic_file_splice_read(struct file *
40978 .spd_release = spd_release_page,
40979 };
40980
40981 + pax_track_stack();
40982 +
40983 if (splice_grow_spd(pipe, &spd))
40984 return -ENOMEM;
40985
40986 @@ -556,7 +558,7 @@ static ssize_t kernel_readv(struct file
40987 old_fs = get_fs();
40988 set_fs(get_ds());
40989 /* The cast to a user pointer is valid due to the set_fs() */
40990 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
40991 + res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
40992 set_fs(old_fs);
40993
40994 return res;
40995 @@ -571,7 +573,7 @@ static ssize_t kernel_write(struct file
40996 old_fs = get_fs();
40997 set_fs(get_ds());
40998 /* The cast to a user pointer is valid due to the set_fs() */
40999 - res = vfs_write(file, (const char __user *)buf, count, &pos);
41000 + res = vfs_write(file, (__force const char __user *)buf, count, &pos);
41001 set_fs(old_fs);
41002
41003 return res;
41004 @@ -599,6 +601,8 @@ ssize_t default_file_splice_read(struct
41005 .spd_release = spd_release_page,
41006 };
41007
41008 + pax_track_stack();
41009 +
41010 if (splice_grow_spd(pipe, &spd))
41011 return -ENOMEM;
41012
41013 @@ -622,7 +626,7 @@ ssize_t default_file_splice_read(struct
41014 goto err;
41015
41016 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
41017 - vec[i].iov_base = (void __user *) page_address(page);
41018 + vec[i].iov_base = (__force void __user *) page_address(page);
41019 vec[i].iov_len = this_len;
41020 spd.pages[i] = page;
41021 spd.nr_pages++;
41022 @@ -842,10 +846,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
41023 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
41024 {
41025 while (!pipe->nrbufs) {
41026 - if (!pipe->writers)
41027 + if (!atomic_read(&pipe->writers))
41028 return 0;
41029
41030 - if (!pipe->waiting_writers && sd->num_spliced)
41031 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
41032 return 0;
41033
41034 if (sd->flags & SPLICE_F_NONBLOCK)
41035 @@ -1178,7 +1182,7 @@ ssize_t splice_direct_to_actor(struct fi
41036 * out of the pipe right after the splice_to_pipe(). So set
41037 * PIPE_READERS appropriately.
41038 */
41039 - pipe->readers = 1;
41040 + atomic_set(&pipe->readers, 1);
41041
41042 current->splice_pipe = pipe;
41043 }
41044 @@ -1615,6 +1619,8 @@ static long vmsplice_to_pipe(struct file
41045 };
41046 long ret;
41047
41048 + pax_track_stack();
41049 +
41050 pipe = get_pipe_info(file);
41051 if (!pipe)
41052 return -EBADF;
41053 @@ -1730,9 +1736,9 @@ static int ipipe_prep(struct pipe_inode_
41054 ret = -ERESTARTSYS;
41055 break;
41056 }
41057 - if (!pipe->writers)
41058 + if (!atomic_read(&pipe->writers))
41059 break;
41060 - if (!pipe->waiting_writers) {
41061 + if (!atomic_read(&pipe->waiting_writers)) {
41062 if (flags & SPLICE_F_NONBLOCK) {
41063 ret = -EAGAIN;
41064 break;
41065 @@ -1764,7 +1770,7 @@ static int opipe_prep(struct pipe_inode_
41066 pipe_lock(pipe);
41067
41068 while (pipe->nrbufs >= pipe->buffers) {
41069 - if (!pipe->readers) {
41070 + if (!atomic_read(&pipe->readers)) {
41071 send_sig(SIGPIPE, current, 0);
41072 ret = -EPIPE;
41073 break;
41074 @@ -1777,9 +1783,9 @@ static int opipe_prep(struct pipe_inode_
41075 ret = -ERESTARTSYS;
41076 break;
41077 }
41078 - pipe->waiting_writers++;
41079 + atomic_inc(&pipe->waiting_writers);
41080 pipe_wait(pipe);
41081 - pipe->waiting_writers--;
41082 + atomic_dec(&pipe->waiting_writers);
41083 }
41084
41085 pipe_unlock(pipe);
41086 @@ -1815,14 +1821,14 @@ retry:
41087 pipe_double_lock(ipipe, opipe);
41088
41089 do {
41090 - if (!opipe->readers) {
41091 + if (!atomic_read(&opipe->readers)) {
41092 send_sig(SIGPIPE, current, 0);
41093 if (!ret)
41094 ret = -EPIPE;
41095 break;
41096 }
41097
41098 - if (!ipipe->nrbufs && !ipipe->writers)
41099 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
41100 break;
41101
41102 /*
41103 @@ -1922,7 +1928,7 @@ static int link_pipe(struct pipe_inode_i
41104 pipe_double_lock(ipipe, opipe);
41105
41106 do {
41107 - if (!opipe->readers) {
41108 + if (!atomic_read(&opipe->readers)) {
41109 send_sig(SIGPIPE, current, 0);
41110 if (!ret)
41111 ret = -EPIPE;
41112 @@ -1967,7 +1973,7 @@ static int link_pipe(struct pipe_inode_i
41113 * return EAGAIN if we have the potential of some data in the
41114 * future, otherwise just return 0
41115 */
41116 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
41117 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
41118 ret = -EAGAIN;
41119
41120 pipe_unlock(ipipe);
41121 diff -urNp linux-2.6.39.4/fs/sysfs/file.c linux-2.6.39.4/fs/sysfs/file.c
41122 --- linux-2.6.39.4/fs/sysfs/file.c 2011-05-19 00:06:34.000000000 -0400
41123 +++ linux-2.6.39.4/fs/sysfs/file.c 2011-08-05 19:44:37.000000000 -0400
41124 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
41125
41126 struct sysfs_open_dirent {
41127 atomic_t refcnt;
41128 - atomic_t event;
41129 + atomic_unchecked_t event;
41130 wait_queue_head_t poll;
41131 struct list_head buffers; /* goes through sysfs_buffer.list */
41132 };
41133 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
41134 if (!sysfs_get_active(attr_sd))
41135 return -ENODEV;
41136
41137 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
41138 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
41139 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
41140
41141 sysfs_put_active(attr_sd);
41142 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
41143 return -ENOMEM;
41144
41145 atomic_set(&new_od->refcnt, 0);
41146 - atomic_set(&new_od->event, 1);
41147 + atomic_set_unchecked(&new_od->event, 1);
41148 init_waitqueue_head(&new_od->poll);
41149 INIT_LIST_HEAD(&new_od->buffers);
41150 goto retry;
41151 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
41152
41153 sysfs_put_active(attr_sd);
41154
41155 - if (buffer->event != atomic_read(&od->event))
41156 + if (buffer->event != atomic_read_unchecked(&od->event))
41157 goto trigger;
41158
41159 return DEFAULT_POLLMASK;
41160 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
41161
41162 od = sd->s_attr.open;
41163 if (od) {
41164 - atomic_inc(&od->event);
41165 + atomic_inc_unchecked(&od->event);
41166 wake_up_interruptible(&od->poll);
41167 }
41168
41169 diff -urNp linux-2.6.39.4/fs/sysfs/mount.c linux-2.6.39.4/fs/sysfs/mount.c
41170 --- linux-2.6.39.4/fs/sysfs/mount.c 2011-05-19 00:06:34.000000000 -0400
41171 +++ linux-2.6.39.4/fs/sysfs/mount.c 2011-08-05 19:44:37.000000000 -0400
41172 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
41173 .s_name = "",
41174 .s_count = ATOMIC_INIT(1),
41175 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
41176 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
41177 + .s_mode = S_IFDIR | S_IRWXU,
41178 +#else
41179 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
41180 +#endif
41181 .s_ino = 1,
41182 };
41183
41184 diff -urNp linux-2.6.39.4/fs/sysfs/symlink.c linux-2.6.39.4/fs/sysfs/symlink.c
41185 --- linux-2.6.39.4/fs/sysfs/symlink.c 2011-05-19 00:06:34.000000000 -0400
41186 +++ linux-2.6.39.4/fs/sysfs/symlink.c 2011-08-05 19:44:37.000000000 -0400
41187 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
41188
41189 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
41190 {
41191 - char *page = nd_get_link(nd);
41192 + const char *page = nd_get_link(nd);
41193 if (!IS_ERR(page))
41194 free_page((unsigned long)page);
41195 }
41196 diff -urNp linux-2.6.39.4/fs/udf/inode.c linux-2.6.39.4/fs/udf/inode.c
41197 --- linux-2.6.39.4/fs/udf/inode.c 2011-05-19 00:06:34.000000000 -0400
41198 +++ linux-2.6.39.4/fs/udf/inode.c 2011-08-05 19:44:37.000000000 -0400
41199 @@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
41200 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
41201 int lastblock = 0;
41202
41203 + pax_track_stack();
41204 +
41205 prev_epos.offset = udf_file_entry_alloc_offset(inode);
41206 prev_epos.block = iinfo->i_location;
41207 prev_epos.bh = NULL;
41208 diff -urNp linux-2.6.39.4/fs/udf/misc.c linux-2.6.39.4/fs/udf/misc.c
41209 --- linux-2.6.39.4/fs/udf/misc.c 2011-05-19 00:06:34.000000000 -0400
41210 +++ linux-2.6.39.4/fs/udf/misc.c 2011-08-05 19:44:37.000000000 -0400
41211 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
41212
41213 u8 udf_tag_checksum(const struct tag *t)
41214 {
41215 - u8 *data = (u8 *)t;
41216 + const u8 *data = (const u8 *)t;
41217 u8 checksum = 0;
41218 int i;
41219 for (i = 0; i < sizeof(struct tag); ++i)
41220 diff -urNp linux-2.6.39.4/fs/utimes.c linux-2.6.39.4/fs/utimes.c
41221 --- linux-2.6.39.4/fs/utimes.c 2011-05-19 00:06:34.000000000 -0400
41222 +++ linux-2.6.39.4/fs/utimes.c 2011-08-05 19:44:37.000000000 -0400
41223 @@ -1,6 +1,7 @@
41224 #include <linux/compiler.h>
41225 #include <linux/file.h>
41226 #include <linux/fs.h>
41227 +#include <linux/security.h>
41228 #include <linux/linkage.h>
41229 #include <linux/mount.h>
41230 #include <linux/namei.h>
41231 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
41232 goto mnt_drop_write_and_out;
41233 }
41234 }
41235 +
41236 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
41237 + error = -EACCES;
41238 + goto mnt_drop_write_and_out;
41239 + }
41240 +
41241 mutex_lock(&inode->i_mutex);
41242 error = notify_change(path->dentry, &newattrs);
41243 mutex_unlock(&inode->i_mutex);
41244 diff -urNp linux-2.6.39.4/fs/xattr_acl.c linux-2.6.39.4/fs/xattr_acl.c
41245 --- linux-2.6.39.4/fs/xattr_acl.c 2011-05-19 00:06:34.000000000 -0400
41246 +++ linux-2.6.39.4/fs/xattr_acl.c 2011-08-05 19:44:37.000000000 -0400
41247 @@ -17,8 +17,8 @@
41248 struct posix_acl *
41249 posix_acl_from_xattr(const void *value, size_t size)
41250 {
41251 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
41252 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
41253 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
41254 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
41255 int count;
41256 struct posix_acl *acl;
41257 struct posix_acl_entry *acl_e;
41258 diff -urNp linux-2.6.39.4/fs/xattr.c linux-2.6.39.4/fs/xattr.c
41259 --- linux-2.6.39.4/fs/xattr.c 2011-05-19 00:06:34.000000000 -0400
41260 +++ linux-2.6.39.4/fs/xattr.c 2011-08-05 19:44:37.000000000 -0400
41261 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
41262 * Extended attribute SET operations
41263 */
41264 static long
41265 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
41266 +setxattr(struct path *path, const char __user *name, const void __user *value,
41267 size_t size, int flags)
41268 {
41269 int error;
41270 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
41271 return PTR_ERR(kvalue);
41272 }
41273
41274 - error = vfs_setxattr(d, kname, kvalue, size, flags);
41275 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
41276 + error = -EACCES;
41277 + goto out;
41278 + }
41279 +
41280 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
41281 +out:
41282 kfree(kvalue);
41283 return error;
41284 }
41285 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
41286 return error;
41287 error = mnt_want_write(path.mnt);
41288 if (!error) {
41289 - error = setxattr(path.dentry, name, value, size, flags);
41290 + error = setxattr(&path, name, value, size, flags);
41291 mnt_drop_write(path.mnt);
41292 }
41293 path_put(&path);
41294 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
41295 return error;
41296 error = mnt_want_write(path.mnt);
41297 if (!error) {
41298 - error = setxattr(path.dentry, name, value, size, flags);
41299 + error = setxattr(&path, name, value, size, flags);
41300 mnt_drop_write(path.mnt);
41301 }
41302 path_put(&path);
41303 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
41304 const void __user *,value, size_t, size, int, flags)
41305 {
41306 struct file *f;
41307 - struct dentry *dentry;
41308 int error = -EBADF;
41309
41310 f = fget(fd);
41311 if (!f)
41312 return error;
41313 - dentry = f->f_path.dentry;
41314 - audit_inode(NULL, dentry);
41315 + audit_inode(NULL, f->f_path.dentry);
41316 error = mnt_want_write_file(f);
41317 if (!error) {
41318 - error = setxattr(dentry, name, value, size, flags);
41319 + error = setxattr(&f->f_path, name, value, size, flags);
41320 mnt_drop_write(f->f_path.mnt);
41321 }
41322 fput(f);
41323 diff -urNp linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c
41324 --- linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-05-19 00:06:34.000000000 -0400
41325 +++ linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-08-05 19:44:37.000000000 -0400
41326 @@ -73,6 +73,7 @@ xfs_compat_ioc_fsgeometry_v1(
41327 xfs_fsop_geom_t fsgeo;
41328 int error;
41329
41330 + memset(&fsgeo, 0, sizeof(fsgeo));
41331 error = xfs_fs_geometry(mp, &fsgeo, 3);
41332 if (error)
41333 return -error;
41334 diff -urNp linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c
41335 --- linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-05-19 00:06:34.000000000 -0400
41336 +++ linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-08-05 19:44:37.000000000 -0400
41337 @@ -128,7 +128,7 @@ xfs_find_handle(
41338 }
41339
41340 error = -EFAULT;
41341 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
41342 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
41343 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
41344 goto out_put;
41345
41346 diff -urNp linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c
41347 --- linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c 2011-05-19 00:06:34.000000000 -0400
41348 +++ linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c 2011-08-05 19:44:37.000000000 -0400
41349 @@ -437,7 +437,7 @@ xfs_vn_put_link(
41350 struct nameidata *nd,
41351 void *p)
41352 {
41353 - char *s = nd_get_link(nd);
41354 + const char *s = nd_get_link(nd);
41355
41356 if (!IS_ERR(s))
41357 kfree(s);
41358 diff -urNp linux-2.6.39.4/fs/xfs/xfs_bmap.c linux-2.6.39.4/fs/xfs/xfs_bmap.c
41359 --- linux-2.6.39.4/fs/xfs/xfs_bmap.c 2011-05-19 00:06:34.000000000 -0400
41360 +++ linux-2.6.39.4/fs/xfs/xfs_bmap.c 2011-08-05 19:44:37.000000000 -0400
41361 @@ -287,7 +287,7 @@ xfs_bmap_validate_ret(
41362 int nmap,
41363 int ret_nmap);
41364 #else
41365 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
41366 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
41367 #endif /* DEBUG */
41368
41369 STATIC int
41370 diff -urNp linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c
41371 --- linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c 2011-05-19 00:06:34.000000000 -0400
41372 +++ linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c 2011-08-05 19:44:37.000000000 -0400
41373 @@ -780,7 +780,15 @@ xfs_dir2_sf_getdents(
41374 }
41375
41376 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
41377 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
41378 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
41379 + char name[sfep->namelen];
41380 + memcpy(name, sfep->name, sfep->namelen);
41381 + if (filldir(dirent, name, sfep->namelen,
41382 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
41383 + *offset = off & 0x7fffffff;
41384 + return 0;
41385 + }
41386 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
41387 off & 0x7fffffff, ino, DT_UNKNOWN)) {
41388 *offset = off & 0x7fffffff;
41389 return 0;
41390 diff -urNp linux-2.6.39.4/grsecurity/gracl_alloc.c linux-2.6.39.4/grsecurity/gracl_alloc.c
41391 --- linux-2.6.39.4/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
41392 +++ linux-2.6.39.4/grsecurity/gracl_alloc.c 2011-08-05 19:44:37.000000000 -0400
41393 @@ -0,0 +1,105 @@
41394 +#include <linux/kernel.h>
41395 +#include <linux/mm.h>
41396 +#include <linux/slab.h>
41397 +#include <linux/vmalloc.h>
41398 +#include <linux/gracl.h>
41399 +#include <linux/grsecurity.h>
41400 +
41401 +static unsigned long alloc_stack_next = 1;
41402 +static unsigned long alloc_stack_size = 1;
41403 +static void **alloc_stack;
41404 +
41405 +static __inline__ int
41406 +alloc_pop(void)
41407 +{
41408 + if (alloc_stack_next == 1)
41409 + return 0;
41410 +
41411 + kfree(alloc_stack[alloc_stack_next - 2]);
41412 +
41413 + alloc_stack_next--;
41414 +
41415 + return 1;
41416 +}
41417 +
41418 +static __inline__ int
41419 +alloc_push(void *buf)
41420 +{
41421 + if (alloc_stack_next >= alloc_stack_size)
41422 + return 1;
41423 +
41424 + alloc_stack[alloc_stack_next - 1] = buf;
41425 +
41426 + alloc_stack_next++;
41427 +
41428 + return 0;
41429 +}
41430 +
41431 +void *
41432 +acl_alloc(unsigned long len)
41433 +{
41434 + void *ret = NULL;
41435 +
41436 + if (!len || len > PAGE_SIZE)
41437 + goto out;
41438 +
41439 + ret = kmalloc(len, GFP_KERNEL);
41440 +
41441 + if (ret) {
41442 + if (alloc_push(ret)) {
41443 + kfree(ret);
41444 + ret = NULL;
41445 + }
41446 + }
41447 +
41448 +out:
41449 + return ret;
41450 +}
41451 +
41452 +void *
41453 +acl_alloc_num(unsigned long num, unsigned long len)
41454 +{
41455 + if (!len || (num > (PAGE_SIZE / len)))
41456 + return NULL;
41457 +
41458 + return acl_alloc(num * len);
41459 +}
41460 +
41461 +void
41462 +acl_free_all(void)
41463 +{
41464 + if (gr_acl_is_enabled() || !alloc_stack)
41465 + return;
41466 +
41467 + while (alloc_pop()) ;
41468 +
41469 + if (alloc_stack) {
41470 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
41471 + kfree(alloc_stack);
41472 + else
41473 + vfree(alloc_stack);
41474 + }
41475 +
41476 + alloc_stack = NULL;
41477 + alloc_stack_size = 1;
41478 + alloc_stack_next = 1;
41479 +
41480 + return;
41481 +}
41482 +
41483 +int
41484 +acl_alloc_stack_init(unsigned long size)
41485 +{
41486 + if ((size * sizeof (void *)) <= PAGE_SIZE)
41487 + alloc_stack =
41488 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
41489 + else
41490 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
41491 +
41492 + alloc_stack_size = size;
41493 +
41494 + if (!alloc_stack)
41495 + return 0;
41496 + else
41497 + return 1;
41498 +}
41499 diff -urNp linux-2.6.39.4/grsecurity/gracl.c linux-2.6.39.4/grsecurity/gracl.c
41500 --- linux-2.6.39.4/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
41501 +++ linux-2.6.39.4/grsecurity/gracl.c 2011-08-05 19:44:37.000000000 -0400
41502 @@ -0,0 +1,4106 @@
41503 +#include <linux/kernel.h>
41504 +#include <linux/module.h>
41505 +#include <linux/sched.h>
41506 +#include <linux/mm.h>
41507 +#include <linux/file.h>
41508 +#include <linux/fs.h>
41509 +#include <linux/namei.h>
41510 +#include <linux/mount.h>
41511 +#include <linux/tty.h>
41512 +#include <linux/proc_fs.h>
41513 +#include <linux/lglock.h>
41514 +#include <linux/slab.h>
41515 +#include <linux/vmalloc.h>
41516 +#include <linux/types.h>
41517 +#include <linux/sysctl.h>
41518 +#include <linux/netdevice.h>
41519 +#include <linux/ptrace.h>
41520 +#include <linux/gracl.h>
41521 +#include <linux/gralloc.h>
41522 +#include <linux/grsecurity.h>
41523 +#include <linux/grinternal.h>
41524 +#include <linux/pid_namespace.h>
41525 +#include <linux/fdtable.h>
41526 +#include <linux/percpu.h>
41527 +
41528 +#include <asm/uaccess.h>
41529 +#include <asm/errno.h>
41530 +#include <asm/mman.h>
41531 +
41532 +static struct acl_role_db acl_role_set;
41533 +static struct name_db name_set;
41534 +static struct inodev_db inodev_set;
41535 +
41536 +/* for keeping track of userspace pointers used for subjects, so we
41537 + can share references in the kernel as well
41538 +*/
41539 +
41540 +static struct path real_root;
41541 +
41542 +static struct acl_subj_map_db subj_map_set;
41543 +
41544 +static struct acl_role_label *default_role;
41545 +
41546 +static struct acl_role_label *role_list;
41547 +
41548 +static u16 acl_sp_role_value;
41549 +
41550 +extern char *gr_shared_page[4];
41551 +static DEFINE_MUTEX(gr_dev_mutex);
41552 +DEFINE_RWLOCK(gr_inode_lock);
41553 +
41554 +struct gr_arg *gr_usermode;
41555 +
41556 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
41557 +
41558 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
41559 +extern void gr_clear_learn_entries(void);
41560 +
41561 +#ifdef CONFIG_GRKERNSEC_RESLOG
41562 +extern void gr_log_resource(const struct task_struct *task,
41563 + const int res, const unsigned long wanted, const int gt);
41564 +#endif
41565 +
41566 +unsigned char *gr_system_salt;
41567 +unsigned char *gr_system_sum;
41568 +
41569 +static struct sprole_pw **acl_special_roles = NULL;
41570 +static __u16 num_sprole_pws = 0;
41571 +
41572 +static struct acl_role_label *kernel_role = NULL;
41573 +
41574 +static unsigned int gr_auth_attempts = 0;
41575 +static unsigned long gr_auth_expires = 0UL;
41576 +
41577 +#ifdef CONFIG_NET
41578 +extern struct vfsmount *sock_mnt;
41579 +#endif
41580 +
41581 +extern struct vfsmount *pipe_mnt;
41582 +extern struct vfsmount *shm_mnt;
41583 +#ifdef CONFIG_HUGETLBFS
41584 +extern struct vfsmount *hugetlbfs_vfsmount;
41585 +#endif
41586 +
41587 +static struct acl_object_label *fakefs_obj_rw;
41588 +static struct acl_object_label *fakefs_obj_rwx;
41589 +
41590 +extern int gr_init_uidset(void);
41591 +extern void gr_free_uidset(void);
41592 +extern void gr_remove_uid(uid_t uid);
41593 +extern int gr_find_uid(uid_t uid);
41594 +
41595 +DECLARE_BRLOCK(vfsmount_lock);
41596 +
41597 +__inline__ int
41598 +gr_acl_is_enabled(void)
41599 +{
41600 + return (gr_status & GR_READY);
41601 +}
41602 +
41603 +#ifdef CONFIG_BTRFS_FS
41604 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
41605 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
41606 +#endif
41607 +
41608 +static inline dev_t __get_dev(const struct dentry *dentry)
41609 +{
41610 +#ifdef CONFIG_BTRFS_FS
41611 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
41612 + return get_btrfs_dev_from_inode(dentry->d_inode);
41613 + else
41614 +#endif
41615 + return dentry->d_inode->i_sb->s_dev;
41616 +}
41617 +
41618 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
41619 +{
41620 + return __get_dev(dentry);
41621 +}
41622 +
41623 +static char gr_task_roletype_to_char(struct task_struct *task)
41624 +{
41625 + switch (task->role->roletype &
41626 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
41627 + GR_ROLE_SPECIAL)) {
41628 + case GR_ROLE_DEFAULT:
41629 + return 'D';
41630 + case GR_ROLE_USER:
41631 + return 'U';
41632 + case GR_ROLE_GROUP:
41633 + return 'G';
41634 + case GR_ROLE_SPECIAL:
41635 + return 'S';
41636 + }
41637 +
41638 + return 'X';
41639 +}
41640 +
41641 +char gr_roletype_to_char(void)
41642 +{
41643 + return gr_task_roletype_to_char(current);
41644 +}
41645 +
41646 +__inline__ int
41647 +gr_acl_tpe_check(void)
41648 +{
41649 + if (unlikely(!(gr_status & GR_READY)))
41650 + return 0;
41651 + if (current->role->roletype & GR_ROLE_TPE)
41652 + return 1;
41653 + else
41654 + return 0;
41655 +}
41656 +
41657 +int
41658 +gr_handle_rawio(const struct inode *inode)
41659 +{
41660 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
41661 + if (inode && S_ISBLK(inode->i_mode) &&
41662 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
41663 + !capable(CAP_SYS_RAWIO))
41664 + return 1;
41665 +#endif
41666 + return 0;
41667 +}
41668 +
41669 +static int
41670 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
41671 +{
41672 + if (likely(lena != lenb))
41673 + return 0;
41674 +
41675 + return !memcmp(a, b, lena);
41676 +}
41677 +
41678 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
41679 +{
41680 + *buflen -= namelen;
41681 + if (*buflen < 0)
41682 + return -ENAMETOOLONG;
41683 + *buffer -= namelen;
41684 + memcpy(*buffer, str, namelen);
41685 + return 0;
41686 +}
41687 +
41688 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
41689 +{
41690 + return prepend(buffer, buflen, name->name, name->len);
41691 +}
41692 +
41693 +static int prepend_path(const struct path *path, struct path *root,
41694 + char **buffer, int *buflen)
41695 +{
41696 + struct dentry *dentry = path->dentry;
41697 + struct vfsmount *vfsmnt = path->mnt;
41698 + bool slash = false;
41699 + int error = 0;
41700 +
41701 + while (dentry != root->dentry || vfsmnt != root->mnt) {
41702 + struct dentry * parent;
41703 +
41704 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
41705 + /* Global root? */
41706 + if (vfsmnt->mnt_parent == vfsmnt) {
41707 + goto out;
41708 + }
41709 + dentry = vfsmnt->mnt_mountpoint;
41710 + vfsmnt = vfsmnt->mnt_parent;
41711 + continue;
41712 + }
41713 + parent = dentry->d_parent;
41714 + prefetch(parent);
41715 + spin_lock(&dentry->d_lock);
41716 + error = prepend_name(buffer, buflen, &dentry->d_name);
41717 + spin_unlock(&dentry->d_lock);
41718 + if (!error)
41719 + error = prepend(buffer, buflen, "/", 1);
41720 + if (error)
41721 + break;
41722 +
41723 + slash = true;
41724 + dentry = parent;
41725 + }
41726 +
41727 +out:
41728 + if (!error && !slash)
41729 + error = prepend(buffer, buflen, "/", 1);
41730 +
41731 + return error;
41732 +}
41733 +
41734 +/* this must be called with vfsmount_lock and rename_lock held */
41735 +
41736 +static char *__our_d_path(const struct path *path, struct path *root,
41737 + char *buf, int buflen)
41738 +{
41739 + char *res = buf + buflen;
41740 + int error;
41741 +
41742 + prepend(&res, &buflen, "\0", 1);
41743 + error = prepend_path(path, root, &res, &buflen);
41744 + if (error)
41745 + return ERR_PTR(error);
41746 +
41747 + return res;
41748 +}
41749 +
41750 +static char *
41751 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
41752 +{
41753 + char *retval;
41754 +
41755 + retval = __our_d_path(path, root, buf, buflen);
41756 + if (unlikely(IS_ERR(retval)))
41757 + retval = strcpy(buf, "<path too long>");
41758 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
41759 + retval[1] = '\0';
41760 +
41761 + return retval;
41762 +}
41763 +
41764 +static char *
41765 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
41766 + char *buf, int buflen)
41767 +{
41768 + struct path path;
41769 + char *res;
41770 +
41771 + path.dentry = (struct dentry *)dentry;
41772 + path.mnt = (struct vfsmount *)vfsmnt;
41773 +
41774 + /* we can use real_root.dentry, real_root.mnt, because this is only called
41775 + by the RBAC system */
41776 + res = gen_full_path(&path, &real_root, buf, buflen);
41777 +
41778 + return res;
41779 +}
41780 +
41781 +static char *
41782 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
41783 + char *buf, int buflen)
41784 +{
41785 + char *res;
41786 + struct path path;
41787 + struct path root;
41788 + struct task_struct *reaper = &init_task;
41789 +
41790 + path.dentry = (struct dentry *)dentry;
41791 + path.mnt = (struct vfsmount *)vfsmnt;
41792 +
41793 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
41794 + get_fs_root(reaper->fs, &root);
41795 +
41796 + write_seqlock(&rename_lock);
41797 + br_read_lock(vfsmount_lock);
41798 + res = gen_full_path(&path, &root, buf, buflen);
41799 + br_read_unlock(vfsmount_lock);
41800 + write_sequnlock(&rename_lock);
41801 +
41802 + path_put(&root);
41803 + return res;
41804 +}
41805 +
41806 +static char *
41807 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
41808 +{
41809 + char *ret;
41810 + write_seqlock(&rename_lock);
41811 + br_read_lock(vfsmount_lock);
41812 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
41813 + PAGE_SIZE);
41814 + br_read_unlock(vfsmount_lock);
41815 + write_sequnlock(&rename_lock);
41816 + return ret;
41817 +}
41818 +
41819 +char *
41820 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
41821 +{
41822 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
41823 + PAGE_SIZE);
41824 +}
41825 +
41826 +char *
41827 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
41828 +{
41829 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
41830 + PAGE_SIZE);
41831 +}
41832 +
41833 +char *
41834 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
41835 +{
41836 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
41837 + PAGE_SIZE);
41838 +}
41839 +
41840 +char *
41841 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
41842 +{
41843 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
41844 + PAGE_SIZE);
41845 +}
41846 +
41847 +char *
41848 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
41849 +{
41850 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
41851 + PAGE_SIZE);
41852 +}
41853 +
41854 +__inline__ __u32
41855 +to_gr_audit(const __u32 reqmode)
41856 +{
41857 + /* masks off auditable permission flags, then shifts them to create
41858 + auditing flags, and adds the special case of append auditing if
41859 + we're requesting write */
41860 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
41861 +}
41862 +
41863 +struct acl_subject_label *
41864 +lookup_subject_map(const struct acl_subject_label *userp)
41865 +{
41866 + unsigned int index = shash(userp, subj_map_set.s_size);
41867 + struct subject_map *match;
41868 +
41869 + match = subj_map_set.s_hash[index];
41870 +
41871 + while (match && match->user != userp)
41872 + match = match->next;
41873 +
41874 + if (match != NULL)
41875 + return match->kernel;
41876 + else
41877 + return NULL;
41878 +}
41879 +
41880 +static void
41881 +insert_subj_map_entry(struct subject_map *subjmap)
41882 +{
41883 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
41884 + struct subject_map **curr;
41885 +
41886 + subjmap->prev = NULL;
41887 +
41888 + curr = &subj_map_set.s_hash[index];
41889 + if (*curr != NULL)
41890 + (*curr)->prev = subjmap;
41891 +
41892 + subjmap->next = *curr;
41893 + *curr = subjmap;
41894 +
41895 + return;
41896 +}
41897 +
41898 +static struct acl_role_label *
41899 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
41900 + const gid_t gid)
41901 +{
41902 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
41903 + struct acl_role_label *match;
41904 + struct role_allowed_ip *ipp;
41905 + unsigned int x;
41906 + u32 curr_ip = task->signal->curr_ip;
41907 +
41908 + task->signal->saved_ip = curr_ip;
41909 +
41910 + match = acl_role_set.r_hash[index];
41911 +
41912 + while (match) {
41913 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
41914 + for (x = 0; x < match->domain_child_num; x++) {
41915 + if (match->domain_children[x] == uid)
41916 + goto found;
41917 + }
41918 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
41919 + break;
41920 + match = match->next;
41921 + }
41922 +found:
41923 + if (match == NULL) {
41924 + try_group:
41925 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
41926 + match = acl_role_set.r_hash[index];
41927 +
41928 + while (match) {
41929 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
41930 + for (x = 0; x < match->domain_child_num; x++) {
41931 + if (match->domain_children[x] == gid)
41932 + goto found2;
41933 + }
41934 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
41935 + break;
41936 + match = match->next;
41937 + }
41938 +found2:
41939 + if (match == NULL)
41940 + match = default_role;
41941 + if (match->allowed_ips == NULL)
41942 + return match;
41943 + else {
41944 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
41945 + if (likely
41946 + ((ntohl(curr_ip) & ipp->netmask) ==
41947 + (ntohl(ipp->addr) & ipp->netmask)))
41948 + return match;
41949 + }
41950 + match = default_role;
41951 + }
41952 + } else if (match->allowed_ips == NULL) {
41953 + return match;
41954 + } else {
41955 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
41956 + if (likely
41957 + ((ntohl(curr_ip) & ipp->netmask) ==
41958 + (ntohl(ipp->addr) & ipp->netmask)))
41959 + return match;
41960 + }
41961 + goto try_group;
41962 + }
41963 +
41964 + return match;
41965 +}
41966 +
41967 +struct acl_subject_label *
41968 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
41969 + const struct acl_role_label *role)
41970 +{
41971 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
41972 + struct acl_subject_label *match;
41973 +
41974 + match = role->subj_hash[index];
41975 +
41976 + while (match && (match->inode != ino || match->device != dev ||
41977 + (match->mode & GR_DELETED))) {
41978 + match = match->next;
41979 + }
41980 +
41981 + if (match && !(match->mode & GR_DELETED))
41982 + return match;
41983 + else
41984 + return NULL;
41985 +}
41986 +
41987 +struct acl_subject_label *
41988 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
41989 + const struct acl_role_label *role)
41990 +{
41991 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
41992 + struct acl_subject_label *match;
41993 +
41994 + match = role->subj_hash[index];
41995 +
41996 + while (match && (match->inode != ino || match->device != dev ||
41997 + !(match->mode & GR_DELETED))) {
41998 + match = match->next;
41999 + }
42000 +
42001 + if (match && (match->mode & GR_DELETED))
42002 + return match;
42003 + else
42004 + return NULL;
42005 +}
42006 +
42007 +static struct acl_object_label *
42008 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
42009 + const struct acl_subject_label *subj)
42010 +{
42011 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
42012 + struct acl_object_label *match;
42013 +
42014 + match = subj->obj_hash[index];
42015 +
42016 + while (match && (match->inode != ino || match->device != dev ||
42017 + (match->mode & GR_DELETED))) {
42018 + match = match->next;
42019 + }
42020 +
42021 + if (match && !(match->mode & GR_DELETED))
42022 + return match;
42023 + else
42024 + return NULL;
42025 +}
42026 +
42027 +static struct acl_object_label *
42028 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
42029 + const struct acl_subject_label *subj)
42030 +{
42031 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
42032 + struct acl_object_label *match;
42033 +
42034 + match = subj->obj_hash[index];
42035 +
42036 + while (match && (match->inode != ino || match->device != dev ||
42037 + !(match->mode & GR_DELETED))) {
42038 + match = match->next;
42039 + }
42040 +
42041 + if (match && (match->mode & GR_DELETED))
42042 + return match;
42043 +
42044 + match = subj->obj_hash[index];
42045 +
42046 + while (match && (match->inode != ino || match->device != dev ||
42047 + (match->mode & GR_DELETED))) {
42048 + match = match->next;
42049 + }
42050 +
42051 + if (match && !(match->mode & GR_DELETED))
42052 + return match;
42053 + else
42054 + return NULL;
42055 +}
42056 +
42057 +static struct name_entry *
42058 +lookup_name_entry(const char *name)
42059 +{
42060 + unsigned int len = strlen(name);
42061 + unsigned int key = full_name_hash(name, len);
42062 + unsigned int index = key % name_set.n_size;
42063 + struct name_entry *match;
42064 +
42065 + match = name_set.n_hash[index];
42066 +
42067 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
42068 + match = match->next;
42069 +
42070 + return match;
42071 +}
42072 +
42073 +static struct name_entry *
42074 +lookup_name_entry_create(const char *name)
42075 +{
42076 + unsigned int len = strlen(name);
42077 + unsigned int key = full_name_hash(name, len);
42078 + unsigned int index = key % name_set.n_size;
42079 + struct name_entry *match;
42080 +
42081 + match = name_set.n_hash[index];
42082 +
42083 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
42084 + !match->deleted))
42085 + match = match->next;
42086 +
42087 + if (match && match->deleted)
42088 + return match;
42089 +
42090 + match = name_set.n_hash[index];
42091 +
42092 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
42093 + match->deleted))
42094 + match = match->next;
42095 +
42096 + if (match && !match->deleted)
42097 + return match;
42098 + else
42099 + return NULL;
42100 +}
42101 +
42102 +static struct inodev_entry *
42103 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
42104 +{
42105 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
42106 + struct inodev_entry *match;
42107 +
42108 + match = inodev_set.i_hash[index];
42109 +
42110 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
42111 + match = match->next;
42112 +
42113 + return match;
42114 +}
42115 +
42116 +static void
42117 +insert_inodev_entry(struct inodev_entry *entry)
42118 +{
42119 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
42120 + inodev_set.i_size);
42121 + struct inodev_entry **curr;
42122 +
42123 + entry->prev = NULL;
42124 +
42125 + curr = &inodev_set.i_hash[index];
42126 + if (*curr != NULL)
42127 + (*curr)->prev = entry;
42128 +
42129 + entry->next = *curr;
42130 + *curr = entry;
42131 +
42132 + return;
42133 +}
42134 +
42135 +static void
42136 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
42137 +{
42138 + unsigned int index =
42139 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
42140 + struct acl_role_label **curr;
42141 + struct acl_role_label *tmp;
42142 +
42143 + curr = &acl_role_set.r_hash[index];
42144 +
42145 + /* if role was already inserted due to domains and already has
42146 + a role in the same bucket as it attached, then we need to
42147 + combine these two buckets
42148 + */
42149 + if (role->next) {
42150 + tmp = role->next;
42151 + while (tmp->next)
42152 + tmp = tmp->next;
42153 + tmp->next = *curr;
42154 + } else
42155 + role->next = *curr;
42156 + *curr = role;
42157 +
42158 + return;
42159 +}
42160 +
42161 +static void
42162 +insert_acl_role_label(struct acl_role_label *role)
42163 +{
42164 + int i;
42165 +
42166 + if (role_list == NULL) {
42167 + role_list = role;
42168 + role->prev = NULL;
42169 + } else {
42170 + role->prev = role_list;
42171 + role_list = role;
42172 + }
42173 +
42174 + /* used for hash chains */
42175 + role->next = NULL;
42176 +
42177 + if (role->roletype & GR_ROLE_DOMAIN) {
42178 + for (i = 0; i < role->domain_child_num; i++)
42179 + __insert_acl_role_label(role, role->domain_children[i]);
42180 + } else
42181 + __insert_acl_role_label(role, role->uidgid);
42182 +}
42183 +
42184 +static int
42185 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
42186 +{
42187 + struct name_entry **curr, *nentry;
42188 + struct inodev_entry *ientry;
42189 + unsigned int len = strlen(name);
42190 + unsigned int key = full_name_hash(name, len);
42191 + unsigned int index = key % name_set.n_size;
42192 +
42193 + curr = &name_set.n_hash[index];
42194 +
42195 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
42196 + curr = &((*curr)->next);
42197 +
42198 + if (*curr != NULL)
42199 + return 1;
42200 +
42201 + nentry = acl_alloc(sizeof (struct name_entry));
42202 + if (nentry == NULL)
42203 + return 0;
42204 + ientry = acl_alloc(sizeof (struct inodev_entry));
42205 + if (ientry == NULL)
42206 + return 0;
42207 + ientry->nentry = nentry;
42208 +
42209 + nentry->key = key;
42210 + nentry->name = name;
42211 + nentry->inode = inode;
42212 + nentry->device = device;
42213 + nentry->len = len;
42214 + nentry->deleted = deleted;
42215 +
42216 + nentry->prev = NULL;
42217 + curr = &name_set.n_hash[index];
42218 + if (*curr != NULL)
42219 + (*curr)->prev = nentry;
42220 + nentry->next = *curr;
42221 + *curr = nentry;
42222 +
42223 + /* insert us into the table searchable by inode/dev */
42224 + insert_inodev_entry(ientry);
42225 +
42226 + return 1;
42227 +}
42228 +
42229 +static void
42230 +insert_acl_obj_label(struct acl_object_label *obj,
42231 + struct acl_subject_label *subj)
42232 +{
42233 + unsigned int index =
42234 + fhash(obj->inode, obj->device, subj->obj_hash_size);
42235 + struct acl_object_label **curr;
42236 +
42237 +
42238 + obj->prev = NULL;
42239 +
42240 + curr = &subj->obj_hash[index];
42241 + if (*curr != NULL)
42242 + (*curr)->prev = obj;
42243 +
42244 + obj->next = *curr;
42245 + *curr = obj;
42246 +
42247 + return;
42248 +}
42249 +
42250 +static void
42251 +insert_acl_subj_label(struct acl_subject_label *obj,
42252 + struct acl_role_label *role)
42253 +{
42254 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
42255 + struct acl_subject_label **curr;
42256 +
42257 + obj->prev = NULL;
42258 +
42259 + curr = &role->subj_hash[index];
42260 + if (*curr != NULL)
42261 + (*curr)->prev = obj;
42262 +
42263 + obj->next = *curr;
42264 + *curr = obj;
42265 +
42266 + return;
42267 +}
42268 +
42269 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
42270 +
42271 +static void *
42272 +create_table(__u32 * len, int elementsize)
42273 +{
42274 + unsigned int table_sizes[] = {
42275 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
42276 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
42277 + 4194301, 8388593, 16777213, 33554393, 67108859
42278 + };
42279 + void *newtable = NULL;
42280 + unsigned int pwr = 0;
42281 +
42282 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
42283 + table_sizes[pwr] <= *len)
42284 + pwr++;
42285 +
42286 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
42287 + return newtable;
42288 +
42289 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
42290 + newtable =
42291 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
42292 + else
42293 + newtable = vmalloc(table_sizes[pwr] * elementsize);
42294 +
42295 + *len = table_sizes[pwr];
42296 +
42297 + return newtable;
42298 +}
42299 +
42300 +static int
42301 +init_variables(const struct gr_arg *arg)
42302 +{
42303 + struct task_struct *reaper = &init_task;
42304 + unsigned int stacksize;
42305 +
42306 + subj_map_set.s_size = arg->role_db.num_subjects;
42307 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
42308 + name_set.n_size = arg->role_db.num_objects;
42309 + inodev_set.i_size = arg->role_db.num_objects;
42310 +
42311 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
42312 + !name_set.n_size || !inodev_set.i_size)
42313 + return 1;
42314 +
42315 + if (!gr_init_uidset())
42316 + return 1;
42317 +
42318 + /* set up the stack that holds allocation info */
42319 +
42320 + stacksize = arg->role_db.num_pointers + 5;
42321 +
42322 + if (!acl_alloc_stack_init(stacksize))
42323 + return 1;
42324 +
42325 + /* grab reference for the real root dentry and vfsmount */
42326 + get_fs_root(reaper->fs, &real_root);
42327 +
42328 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
42329 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
42330 +#endif
42331 +
42332 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
42333 + if (fakefs_obj_rw == NULL)
42334 + return 1;
42335 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
42336 +
42337 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
42338 + if (fakefs_obj_rwx == NULL)
42339 + return 1;
42340 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
42341 +
42342 + subj_map_set.s_hash =
42343 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
42344 + acl_role_set.r_hash =
42345 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
42346 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
42347 + inodev_set.i_hash =
42348 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
42349 +
42350 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
42351 + !name_set.n_hash || !inodev_set.i_hash)
42352 + return 1;
42353 +
42354 + memset(subj_map_set.s_hash, 0,
42355 + sizeof(struct subject_map *) * subj_map_set.s_size);
42356 + memset(acl_role_set.r_hash, 0,
42357 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
42358 + memset(name_set.n_hash, 0,
42359 + sizeof (struct name_entry *) * name_set.n_size);
42360 + memset(inodev_set.i_hash, 0,
42361 + sizeof (struct inodev_entry *) * inodev_set.i_size);
42362 +
42363 + return 0;
42364 +}
42365 +
42366 +/* free information not needed after startup
42367 + currently contains user->kernel pointer mappings for subjects
42368 +*/
42369 +
42370 +static void
42371 +free_init_variables(void)
42372 +{
42373 + __u32 i;
42374 +
42375 + if (subj_map_set.s_hash) {
42376 + for (i = 0; i < subj_map_set.s_size; i++) {
42377 + if (subj_map_set.s_hash[i]) {
42378 + kfree(subj_map_set.s_hash[i]);
42379 + subj_map_set.s_hash[i] = NULL;
42380 + }
42381 + }
42382 +
42383 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
42384 + PAGE_SIZE)
42385 + kfree(subj_map_set.s_hash);
42386 + else
42387 + vfree(subj_map_set.s_hash);
42388 + }
42389 +
42390 + return;
42391 +}
42392 +
42393 +static void
42394 +free_variables(void)
42395 +{
42396 + struct acl_subject_label *s;
42397 + struct acl_role_label *r;
42398 + struct task_struct *task, *task2;
42399 + unsigned int x;
42400 +
42401 + gr_clear_learn_entries();
42402 +
42403 + read_lock(&tasklist_lock);
42404 + do_each_thread(task2, task) {
42405 + task->acl_sp_role = 0;
42406 + task->acl_role_id = 0;
42407 + task->acl = NULL;
42408 + task->role = NULL;
42409 + } while_each_thread(task2, task);
42410 + read_unlock(&tasklist_lock);
42411 +
42412 + /* release the reference to the real root dentry and vfsmount */
42413 + path_put(&real_root);
42414 +
42415 + /* free all object hash tables */
42416 +
42417 + FOR_EACH_ROLE_START(r)
42418 + if (r->subj_hash == NULL)
42419 + goto next_role;
42420 + FOR_EACH_SUBJECT_START(r, s, x)
42421 + if (s->obj_hash == NULL)
42422 + break;
42423 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
42424 + kfree(s->obj_hash);
42425 + else
42426 + vfree(s->obj_hash);
42427 + FOR_EACH_SUBJECT_END(s, x)
42428 + FOR_EACH_NESTED_SUBJECT_START(r, s)
42429 + if (s->obj_hash == NULL)
42430 + break;
42431 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
42432 + kfree(s->obj_hash);
42433 + else
42434 + vfree(s->obj_hash);
42435 + FOR_EACH_NESTED_SUBJECT_END(s)
42436 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
42437 + kfree(r->subj_hash);
42438 + else
42439 + vfree(r->subj_hash);
42440 + r->subj_hash = NULL;
42441 +next_role:
42442 + FOR_EACH_ROLE_END(r)
42443 +
42444 + acl_free_all();
42445 +
42446 + if (acl_role_set.r_hash) {
42447 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
42448 + PAGE_SIZE)
42449 + kfree(acl_role_set.r_hash);
42450 + else
42451 + vfree(acl_role_set.r_hash);
42452 + }
42453 + if (name_set.n_hash) {
42454 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
42455 + PAGE_SIZE)
42456 + kfree(name_set.n_hash);
42457 + else
42458 + vfree(name_set.n_hash);
42459 + }
42460 +
42461 + if (inodev_set.i_hash) {
42462 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
42463 + PAGE_SIZE)
42464 + kfree(inodev_set.i_hash);
42465 + else
42466 + vfree(inodev_set.i_hash);
42467 + }
42468 +
42469 + gr_free_uidset();
42470 +
42471 + memset(&name_set, 0, sizeof (struct name_db));
42472 + memset(&inodev_set, 0, sizeof (struct inodev_db));
42473 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
42474 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
42475 +
42476 + default_role = NULL;
42477 + role_list = NULL;
42478 +
42479 + return;
42480 +}
42481 +
42482 +static __u32
42483 +count_user_objs(struct acl_object_label *userp)
42484 +{
42485 + struct acl_object_label o_tmp;
42486 + __u32 num = 0;
42487 +
42488 + while (userp) {
42489 + if (copy_from_user(&o_tmp, userp,
42490 + sizeof (struct acl_object_label)))
42491 + break;
42492 +
42493 + userp = o_tmp.prev;
42494 + num++;
42495 + }
42496 +
42497 + return num;
42498 +}
42499 +
42500 +static struct acl_subject_label *
42501 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
42502 +
42503 +static int
42504 +copy_user_glob(struct acl_object_label *obj)
42505 +{
42506 + struct acl_object_label *g_tmp, **guser;
42507 + unsigned int len;
42508 + char *tmp;
42509 +
42510 + if (obj->globbed == NULL)
42511 + return 0;
42512 +
42513 + guser = &obj->globbed;
42514 + while (*guser) {
42515 + g_tmp = (struct acl_object_label *)
42516 + acl_alloc(sizeof (struct acl_object_label));
42517 + if (g_tmp == NULL)
42518 + return -ENOMEM;
42519 +
42520 + if (copy_from_user(g_tmp, *guser,
42521 + sizeof (struct acl_object_label)))
42522 + return -EFAULT;
42523 +
42524 + len = strnlen_user(g_tmp->filename, PATH_MAX);
42525 +
42526 + if (!len || len >= PATH_MAX)
42527 + return -EINVAL;
42528 +
42529 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42530 + return -ENOMEM;
42531 +
42532 + if (copy_from_user(tmp, g_tmp->filename, len))
42533 + return -EFAULT;
42534 + tmp[len-1] = '\0';
42535 + g_tmp->filename = tmp;
42536 +
42537 + *guser = g_tmp;
42538 + guser = &(g_tmp->next);
42539 + }
42540 +
42541 + return 0;
42542 +}
42543 +
42544 +static int
42545 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
42546 + struct acl_role_label *role)
42547 +{
42548 + struct acl_object_label *o_tmp;
42549 + unsigned int len;
42550 + int ret;
42551 + char *tmp;
42552 +
42553 + while (userp) {
42554 + if ((o_tmp = (struct acl_object_label *)
42555 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
42556 + return -ENOMEM;
42557 +
42558 + if (copy_from_user(o_tmp, userp,
42559 + sizeof (struct acl_object_label)))
42560 + return -EFAULT;
42561 +
42562 + userp = o_tmp->prev;
42563 +
42564 + len = strnlen_user(o_tmp->filename, PATH_MAX);
42565 +
42566 + if (!len || len >= PATH_MAX)
42567 + return -EINVAL;
42568 +
42569 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42570 + return -ENOMEM;
42571 +
42572 + if (copy_from_user(tmp, o_tmp->filename, len))
42573 + return -EFAULT;
42574 + tmp[len-1] = '\0';
42575 + o_tmp->filename = tmp;
42576 +
42577 + insert_acl_obj_label(o_tmp, subj);
42578 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
42579 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
42580 + return -ENOMEM;
42581 +
42582 + ret = copy_user_glob(o_tmp);
42583 + if (ret)
42584 + return ret;
42585 +
42586 + if (o_tmp->nested) {
42587 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
42588 + if (IS_ERR(o_tmp->nested))
42589 + return PTR_ERR(o_tmp->nested);
42590 +
42591 + /* insert into nested subject list */
42592 + o_tmp->nested->next = role->hash->first;
42593 + role->hash->first = o_tmp->nested;
42594 + }
42595 + }
42596 +
42597 + return 0;
42598 +}
42599 +
42600 +static __u32
42601 +count_user_subjs(struct acl_subject_label *userp)
42602 +{
42603 + struct acl_subject_label s_tmp;
42604 + __u32 num = 0;
42605 +
42606 + while (userp) {
42607 + if (copy_from_user(&s_tmp, userp,
42608 + sizeof (struct acl_subject_label)))
42609 + break;
42610 +
42611 + userp = s_tmp.prev;
42612 + /* do not count nested subjects against this count, since
42613 + they are not included in the hash table, but are
42614 + attached to objects. We have already counted
42615 + the subjects in userspace for the allocation
42616 + stack
42617 + */
42618 + if (!(s_tmp.mode & GR_NESTED))
42619 + num++;
42620 + }
42621 +
42622 + return num;
42623 +}
42624 +
42625 +static int
42626 +copy_user_allowedips(struct acl_role_label *rolep)
42627 +{
42628 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
42629 +
42630 + ruserip = rolep->allowed_ips;
42631 +
42632 + while (ruserip) {
42633 + rlast = rtmp;
42634 +
42635 + if ((rtmp = (struct role_allowed_ip *)
42636 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
42637 + return -ENOMEM;
42638 +
42639 + if (copy_from_user(rtmp, ruserip,
42640 + sizeof (struct role_allowed_ip)))
42641 + return -EFAULT;
42642 +
42643 + ruserip = rtmp->prev;
42644 +
42645 + if (!rlast) {
42646 + rtmp->prev = NULL;
42647 + rolep->allowed_ips = rtmp;
42648 + } else {
42649 + rlast->next = rtmp;
42650 + rtmp->prev = rlast;
42651 + }
42652 +
42653 + if (!ruserip)
42654 + rtmp->next = NULL;
42655 + }
42656 +
42657 + return 0;
42658 +}
42659 +
42660 +static int
42661 +copy_user_transitions(struct acl_role_label *rolep)
42662 +{
42663 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
42664 +
42665 + unsigned int len;
42666 + char *tmp;
42667 +
42668 + rusertp = rolep->transitions;
42669 +
42670 + while (rusertp) {
42671 + rlast = rtmp;
42672 +
42673 + if ((rtmp = (struct role_transition *)
42674 + acl_alloc(sizeof (struct role_transition))) == NULL)
42675 + return -ENOMEM;
42676 +
42677 + if (copy_from_user(rtmp, rusertp,
42678 + sizeof (struct role_transition)))
42679 + return -EFAULT;
42680 +
42681 + rusertp = rtmp->prev;
42682 +
42683 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
42684 +
42685 + if (!len || len >= GR_SPROLE_LEN)
42686 + return -EINVAL;
42687 +
42688 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42689 + return -ENOMEM;
42690 +
42691 + if (copy_from_user(tmp, rtmp->rolename, len))
42692 + return -EFAULT;
42693 + tmp[len-1] = '\0';
42694 + rtmp->rolename = tmp;
42695 +
42696 + if (!rlast) {
42697 + rtmp->prev = NULL;
42698 + rolep->transitions = rtmp;
42699 + } else {
42700 + rlast->next = rtmp;
42701 + rtmp->prev = rlast;
42702 + }
42703 +
42704 + if (!rusertp)
42705 + rtmp->next = NULL;
42706 + }
42707 +
42708 + return 0;
42709 +}
42710 +
42711 +static struct acl_subject_label *
42712 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
42713 +{
42714 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
42715 + unsigned int len;
42716 + char *tmp;
42717 + __u32 num_objs;
42718 + struct acl_ip_label **i_tmp, *i_utmp2;
42719 + struct gr_hash_struct ghash;
42720 + struct subject_map *subjmap;
42721 + unsigned int i_num;
42722 + int err;
42723 +
42724 + s_tmp = lookup_subject_map(userp);
42725 +
42726 + /* we've already copied this subject into the kernel, just return
42727 + the reference to it, and don't copy it over again
42728 + */
42729 + if (s_tmp)
42730 + return(s_tmp);
42731 +
42732 + if ((s_tmp = (struct acl_subject_label *)
42733 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
42734 + return ERR_PTR(-ENOMEM);
42735 +
42736 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
42737 + if (subjmap == NULL)
42738 + return ERR_PTR(-ENOMEM);
42739 +
42740 + subjmap->user = userp;
42741 + subjmap->kernel = s_tmp;
42742 + insert_subj_map_entry(subjmap);
42743 +
42744 + if (copy_from_user(s_tmp, userp,
42745 + sizeof (struct acl_subject_label)))
42746 + return ERR_PTR(-EFAULT);
42747 +
42748 + len = strnlen_user(s_tmp->filename, PATH_MAX);
42749 +
42750 + if (!len || len >= PATH_MAX)
42751 + return ERR_PTR(-EINVAL);
42752 +
42753 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42754 + return ERR_PTR(-ENOMEM);
42755 +
42756 + if (copy_from_user(tmp, s_tmp->filename, len))
42757 + return ERR_PTR(-EFAULT);
42758 + tmp[len-1] = '\0';
42759 + s_tmp->filename = tmp;
42760 +
42761 + if (!strcmp(s_tmp->filename, "/"))
42762 + role->root_label = s_tmp;
42763 +
42764 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
42765 + return ERR_PTR(-EFAULT);
42766 +
42767 + /* copy user and group transition tables */
42768 +
42769 + if (s_tmp->user_trans_num) {
42770 + uid_t *uidlist;
42771 +
42772 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
42773 + if (uidlist == NULL)
42774 + return ERR_PTR(-ENOMEM);
42775 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
42776 + return ERR_PTR(-EFAULT);
42777 +
42778 + s_tmp->user_transitions = uidlist;
42779 + }
42780 +
42781 + if (s_tmp->group_trans_num) {
42782 + gid_t *gidlist;
42783 +
42784 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
42785 + if (gidlist == NULL)
42786 + return ERR_PTR(-ENOMEM);
42787 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
42788 + return ERR_PTR(-EFAULT);
42789 +
42790 + s_tmp->group_transitions = gidlist;
42791 + }
42792 +
42793 + /* set up object hash table */
42794 + num_objs = count_user_objs(ghash.first);
42795 +
42796 + s_tmp->obj_hash_size = num_objs;
42797 + s_tmp->obj_hash =
42798 + (struct acl_object_label **)
42799 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
42800 +
42801 + if (!s_tmp->obj_hash)
42802 + return ERR_PTR(-ENOMEM);
42803 +
42804 + memset(s_tmp->obj_hash, 0,
42805 + s_tmp->obj_hash_size *
42806 + sizeof (struct acl_object_label *));
42807 +
42808 + /* add in objects */
42809 + err = copy_user_objs(ghash.first, s_tmp, role);
42810 +
42811 + if (err)
42812 + return ERR_PTR(err);
42813 +
42814 + /* set pointer for parent subject */
42815 + if (s_tmp->parent_subject) {
42816 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
42817 +
42818 + if (IS_ERR(s_tmp2))
42819 + return s_tmp2;
42820 +
42821 + s_tmp->parent_subject = s_tmp2;
42822 + }
42823 +
42824 + /* add in ip acls */
42825 +
42826 + if (!s_tmp->ip_num) {
42827 + s_tmp->ips = NULL;
42828 + goto insert;
42829 + }
42830 +
42831 + i_tmp =
42832 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
42833 + sizeof (struct acl_ip_label *));
42834 +
42835 + if (!i_tmp)
42836 + return ERR_PTR(-ENOMEM);
42837 +
42838 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
42839 + *(i_tmp + i_num) =
42840 + (struct acl_ip_label *)
42841 + acl_alloc(sizeof (struct acl_ip_label));
42842 + if (!*(i_tmp + i_num))
42843 + return ERR_PTR(-ENOMEM);
42844 +
42845 + if (copy_from_user
42846 + (&i_utmp2, s_tmp->ips + i_num,
42847 + sizeof (struct acl_ip_label *)))
42848 + return ERR_PTR(-EFAULT);
42849 +
42850 + if (copy_from_user
42851 + (*(i_tmp + i_num), i_utmp2,
42852 + sizeof (struct acl_ip_label)))
42853 + return ERR_PTR(-EFAULT);
42854 +
42855 + if ((*(i_tmp + i_num))->iface == NULL)
42856 + continue;
42857 +
42858 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
42859 + if (!len || len >= IFNAMSIZ)
42860 + return ERR_PTR(-EINVAL);
42861 + tmp = acl_alloc(len);
42862 + if (tmp == NULL)
42863 + return ERR_PTR(-ENOMEM);
42864 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
42865 + return ERR_PTR(-EFAULT);
42866 + (*(i_tmp + i_num))->iface = tmp;
42867 + }
42868 +
42869 + s_tmp->ips = i_tmp;
42870 +
42871 +insert:
42872 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
42873 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
42874 + return ERR_PTR(-ENOMEM);
42875 +
42876 + return s_tmp;
42877 +}
42878 +
42879 +static int
42880 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
42881 +{
42882 + struct acl_subject_label s_pre;
42883 + struct acl_subject_label * ret;
42884 + int err;
42885 +
42886 + while (userp) {
42887 + if (copy_from_user(&s_pre, userp,
42888 + sizeof (struct acl_subject_label)))
42889 + return -EFAULT;
42890 +
42891 + /* do not add nested subjects here, add
42892 + while parsing objects
42893 + */
42894 +
42895 + if (s_pre.mode & GR_NESTED) {
42896 + userp = s_pre.prev;
42897 + continue;
42898 + }
42899 +
42900 + ret = do_copy_user_subj(userp, role);
42901 +
42902 + err = PTR_ERR(ret);
42903 + if (IS_ERR(ret))
42904 + return err;
42905 +
42906 + insert_acl_subj_label(ret, role);
42907 +
42908 + userp = s_pre.prev;
42909 + }
42910 +
42911 + return 0;
42912 +}
42913 +
42914 +static int
42915 +copy_user_acl(struct gr_arg *arg)
42916 +{
42917 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
42918 + struct sprole_pw *sptmp;
42919 + struct gr_hash_struct *ghash;
42920 + uid_t *domainlist;
42921 + unsigned int r_num;
42922 + unsigned int len;
42923 + char *tmp;
42924 + int err = 0;
42925 + __u16 i;
42926 + __u32 num_subjs;
42927 +
42928 + /* we need a default and kernel role */
42929 + if (arg->role_db.num_roles < 2)
42930 + return -EINVAL;
42931 +
42932 + /* copy special role authentication info from userspace */
42933 +
42934 + num_sprole_pws = arg->num_sprole_pws;
42935 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
42936 +
42937 + if (!acl_special_roles) {
42938 + err = -ENOMEM;
42939 + goto cleanup;
42940 + }
42941 +
42942 + for (i = 0; i < num_sprole_pws; i++) {
42943 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
42944 + if (!sptmp) {
42945 + err = -ENOMEM;
42946 + goto cleanup;
42947 + }
42948 + if (copy_from_user(sptmp, arg->sprole_pws + i,
42949 + sizeof (struct sprole_pw))) {
42950 + err = -EFAULT;
42951 + goto cleanup;
42952 + }
42953 +
42954 + len =
42955 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
42956 +
42957 + if (!len || len >= GR_SPROLE_LEN) {
42958 + err = -EINVAL;
42959 + goto cleanup;
42960 + }
42961 +
42962 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
42963 + err = -ENOMEM;
42964 + goto cleanup;
42965 + }
42966 +
42967 + if (copy_from_user(tmp, sptmp->rolename, len)) {
42968 + err = -EFAULT;
42969 + goto cleanup;
42970 + }
42971 + tmp[len-1] = '\0';
42972 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
42973 + printk(KERN_ALERT "Copying special role %s\n", tmp);
42974 +#endif
42975 + sptmp->rolename = tmp;
42976 + acl_special_roles[i] = sptmp;
42977 + }
42978 +
42979 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
42980 +
42981 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
42982 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
42983 +
42984 + if (!r_tmp) {
42985 + err = -ENOMEM;
42986 + goto cleanup;
42987 + }
42988 +
42989 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
42990 + sizeof (struct acl_role_label *))) {
42991 + err = -EFAULT;
42992 + goto cleanup;
42993 + }
42994 +
42995 + if (copy_from_user(r_tmp, r_utmp2,
42996 + sizeof (struct acl_role_label))) {
42997 + err = -EFAULT;
42998 + goto cleanup;
42999 + }
43000 +
43001 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
43002 +
43003 + if (!len || len >= PATH_MAX) {
43004 + err = -EINVAL;
43005 + goto cleanup;
43006 + }
43007 +
43008 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
43009 + err = -ENOMEM;
43010 + goto cleanup;
43011 + }
43012 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
43013 + err = -EFAULT;
43014 + goto cleanup;
43015 + }
43016 + tmp[len-1] = '\0';
43017 + r_tmp->rolename = tmp;
43018 +
43019 + if (!strcmp(r_tmp->rolename, "default")
43020 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
43021 + default_role = r_tmp;
43022 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
43023 + kernel_role = r_tmp;
43024 + }
43025 +
43026 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
43027 + err = -ENOMEM;
43028 + goto cleanup;
43029 + }
43030 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
43031 + err = -EFAULT;
43032 + goto cleanup;
43033 + }
43034 +
43035 + r_tmp->hash = ghash;
43036 +
43037 + num_subjs = count_user_subjs(r_tmp->hash->first);
43038 +
43039 + r_tmp->subj_hash_size = num_subjs;
43040 + r_tmp->subj_hash =
43041 + (struct acl_subject_label **)
43042 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
43043 +
43044 + if (!r_tmp->subj_hash) {
43045 + err = -ENOMEM;
43046 + goto cleanup;
43047 + }
43048 +
43049 + err = copy_user_allowedips(r_tmp);
43050 + if (err)
43051 + goto cleanup;
43052 +
43053 + /* copy domain info */
43054 + if (r_tmp->domain_children != NULL) {
43055 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
43056 + if (domainlist == NULL) {
43057 + err = -ENOMEM;
43058 + goto cleanup;
43059 + }
43060 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
43061 + err = -EFAULT;
43062 + goto cleanup;
43063 + }
43064 + r_tmp->domain_children = domainlist;
43065 + }
43066 +
43067 + err = copy_user_transitions(r_tmp);
43068 + if (err)
43069 + goto cleanup;
43070 +
43071 + memset(r_tmp->subj_hash, 0,
43072 + r_tmp->subj_hash_size *
43073 + sizeof (struct acl_subject_label *));
43074 +
43075 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
43076 +
43077 + if (err)
43078 + goto cleanup;
43079 +
43080 + /* set nested subject list to null */
43081 + r_tmp->hash->first = NULL;
43082 +
43083 + insert_acl_role_label(r_tmp);
43084 + }
43085 +
43086 + goto return_err;
43087 + cleanup:
43088 + free_variables();
43089 + return_err:
43090 + return err;
43091 +
43092 +}
43093 +
43094 +static int
43095 +gracl_init(struct gr_arg *args)
43096 +{
43097 + int error = 0;
43098 +
43099 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
43100 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
43101 +
43102 + if (init_variables(args)) {
43103 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
43104 + error = -ENOMEM;
43105 + free_variables();
43106 + goto out;
43107 + }
43108 +
43109 + error = copy_user_acl(args);
43110 + free_init_variables();
43111 + if (error) {
43112 + free_variables();
43113 + goto out;
43114 + }
43115 +
43116 + if ((error = gr_set_acls(0))) {
43117 + free_variables();
43118 + goto out;
43119 + }
43120 +
43121 + pax_open_kernel();
43122 + gr_status |= GR_READY;
43123 + pax_close_kernel();
43124 +
43125 + out:
43126 + return error;
43127 +}
43128 +
43129 +/* derived from glibc fnmatch() 0: match, 1: no match*/
43130 +
43131 +static int
43132 +glob_match(const char *p, const char *n)
43133 +{
43134 + char c;
43135 +
43136 + while ((c = *p++) != '\0') {
43137 + switch (c) {
43138 + case '?':
43139 + if (*n == '\0')
43140 + return 1;
43141 + else if (*n == '/')
43142 + return 1;
43143 + break;
43144 + case '\\':
43145 + if (*n != c)
43146 + return 1;
43147 + break;
43148 + case '*':
43149 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
43150 + if (*n == '/')
43151 + return 1;
43152 + else if (c == '?') {
43153 + if (*n == '\0')
43154 + return 1;
43155 + else
43156 + ++n;
43157 + }
43158 + }
43159 + if (c == '\0') {
43160 + return 0;
43161 + } else {
43162 + const char *endp;
43163 +
43164 + if ((endp = strchr(n, '/')) == NULL)
43165 + endp = n + strlen(n);
43166 +
43167 + if (c == '[') {
43168 + for (--p; n < endp; ++n)
43169 + if (!glob_match(p, n))
43170 + return 0;
43171 + } else if (c == '/') {
43172 + while (*n != '\0' && *n != '/')
43173 + ++n;
43174 + if (*n == '/' && !glob_match(p, n + 1))
43175 + return 0;
43176 + } else {
43177 + for (--p; n < endp; ++n)
43178 + if (*n == c && !glob_match(p, n))
43179 + return 0;
43180 + }
43181 +
43182 + return 1;
43183 + }
43184 + case '[':
43185 + {
43186 + int not;
43187 + char cold;
43188 +
43189 + if (*n == '\0' || *n == '/')
43190 + return 1;
43191 +
43192 + not = (*p == '!' || *p == '^');
43193 + if (not)
43194 + ++p;
43195 +
43196 + c = *p++;
43197 + for (;;) {
43198 + unsigned char fn = (unsigned char)*n;
43199 +
43200 + if (c == '\0')
43201 + return 1;
43202 + else {
43203 + if (c == fn)
43204 + goto matched;
43205 + cold = c;
43206 + c = *p++;
43207 +
43208 + if (c == '-' && *p != ']') {
43209 + unsigned char cend = *p++;
43210 +
43211 + if (cend == '\0')
43212 + return 1;
43213 +
43214 + if (cold <= fn && fn <= cend)
43215 + goto matched;
43216 +
43217 + c = *p++;
43218 + }
43219 + }
43220 +
43221 + if (c == ']')
43222 + break;
43223 + }
43224 + if (!not)
43225 + return 1;
43226 + break;
43227 + matched:
43228 + while (c != ']') {
43229 + if (c == '\0')
43230 + return 1;
43231 +
43232 + c = *p++;
43233 + }
43234 + if (not)
43235 + return 1;
43236 + }
43237 + break;
43238 + default:
43239 + if (c != *n)
43240 + return 1;
43241 + }
43242 +
43243 + ++n;
43244 + }
43245 +
43246 + if (*n == '\0')
43247 + return 0;
43248 +
43249 + if (*n == '/')
43250 + return 0;
43251 +
43252 + return 1;
43253 +}
43254 +
43255 +static struct acl_object_label *
43256 +chk_glob_label(struct acl_object_label *globbed,
43257 + struct dentry *dentry, struct vfsmount *mnt, char **path)
43258 +{
43259 + struct acl_object_label *tmp;
43260 +
43261 + if (*path == NULL)
43262 + *path = gr_to_filename_nolock(dentry, mnt);
43263 +
43264 + tmp = globbed;
43265 +
43266 + while (tmp) {
43267 + if (!glob_match(tmp->filename, *path))
43268 + return tmp;
43269 + tmp = tmp->next;
43270 + }
43271 +
43272 + return NULL;
43273 +}
43274 +
43275 +static struct acl_object_label *
43276 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
43277 + const ino_t curr_ino, const dev_t curr_dev,
43278 + const struct acl_subject_label *subj, char **path, const int checkglob)
43279 +{
43280 + struct acl_subject_label *tmpsubj;
43281 + struct acl_object_label *retval;
43282 + struct acl_object_label *retval2;
43283 +
43284 + tmpsubj = (struct acl_subject_label *) subj;
43285 + read_lock(&gr_inode_lock);
43286 + do {
43287 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
43288 + if (retval) {
43289 + if (checkglob && retval->globbed) {
43290 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
43291 + (struct vfsmount *)orig_mnt, path);
43292 + if (retval2)
43293 + retval = retval2;
43294 + }
43295 + break;
43296 + }
43297 + } while ((tmpsubj = tmpsubj->parent_subject));
43298 + read_unlock(&gr_inode_lock);
43299 +
43300 + return retval;
43301 +}
43302 +
43303 +static __inline__ struct acl_object_label *
43304 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
43305 + struct dentry *curr_dentry,
43306 + const struct acl_subject_label *subj, char **path, const int checkglob)
43307 +{
43308 + int newglob = checkglob;
43309 + ino_t inode;
43310 + dev_t device;
43311 +
43312 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
43313 + as we don't want a / * rule to match instead of the / object
43314 + don't do this for create lookups that call this function though, since they're looking up
43315 + on the parent and thus need globbing checks on all paths
43316 + */
43317 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
43318 + newglob = GR_NO_GLOB;
43319 +
43320 + spin_lock(&curr_dentry->d_lock);
43321 + inode = curr_dentry->d_inode->i_ino;
43322 + device = __get_dev(curr_dentry);
43323 + spin_unlock(&curr_dentry->d_lock);
43324 +
43325 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
43326 +}
43327 +
43328 +static struct acl_object_label *
43329 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43330 + const struct acl_subject_label *subj, char *path, const int checkglob)
43331 +{
43332 + struct dentry *dentry = (struct dentry *) l_dentry;
43333 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
43334 + struct acl_object_label *retval;
43335 + struct dentry *parent;
43336 +
43337 + write_seqlock(&rename_lock);
43338 + br_read_lock(vfsmount_lock);
43339 +
43340 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
43341 +#ifdef CONFIG_NET
43342 + mnt == sock_mnt ||
43343 +#endif
43344 +#ifdef CONFIG_HUGETLBFS
43345 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
43346 +#endif
43347 + /* ignore Eric Biederman */
43348 + IS_PRIVATE(l_dentry->d_inode))) {
43349 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
43350 + goto out;
43351 + }
43352 +
43353 + for (;;) {
43354 + if (dentry == real_root.dentry && mnt == real_root.mnt)
43355 + break;
43356 +
43357 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
43358 + if (mnt->mnt_parent == mnt)
43359 + break;
43360 +
43361 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43362 + if (retval != NULL)
43363 + goto out;
43364 +
43365 + dentry = mnt->mnt_mountpoint;
43366 + mnt = mnt->mnt_parent;
43367 + continue;
43368 + }
43369 +
43370 + parent = dentry->d_parent;
43371 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43372 + if (retval != NULL)
43373 + goto out;
43374 +
43375 + dentry = parent;
43376 + }
43377 +
43378 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43379 +
43380 + /* real_root is pinned so we don't have to hold a reference */
43381 + if (retval == NULL)
43382 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
43383 +out:
43384 + br_read_unlock(vfsmount_lock);
43385 + write_sequnlock(&rename_lock);
43386 +
43387 + BUG_ON(retval == NULL);
43388 +
43389 + return retval;
43390 +}
43391 +
43392 +static __inline__ struct acl_object_label *
43393 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43394 + const struct acl_subject_label *subj)
43395 +{
43396 + char *path = NULL;
43397 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
43398 +}
43399 +
43400 +static __inline__ struct acl_object_label *
43401 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43402 + const struct acl_subject_label *subj)
43403 +{
43404 + char *path = NULL;
43405 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
43406 +}
43407 +
43408 +static __inline__ struct acl_object_label *
43409 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43410 + const struct acl_subject_label *subj, char *path)
43411 +{
43412 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
43413 +}
43414 +
43415 +static struct acl_subject_label *
43416 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43417 + const struct acl_role_label *role)
43418 +{
43419 + struct dentry *dentry = (struct dentry *) l_dentry;
43420 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
43421 + struct acl_subject_label *retval;
43422 + struct dentry *parent;
43423 +
43424 + write_seqlock(&rename_lock);
43425 + br_read_lock(vfsmount_lock);
43426 +
43427 + for (;;) {
43428 + if (dentry == real_root.dentry && mnt == real_root.mnt)
43429 + break;
43430 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
43431 + if (mnt->mnt_parent == mnt)
43432 + break;
43433 +
43434 + spin_lock(&dentry->d_lock);
43435 + read_lock(&gr_inode_lock);
43436 + retval =
43437 + lookup_acl_subj_label(dentry->d_inode->i_ino,
43438 + __get_dev(dentry), role);
43439 + read_unlock(&gr_inode_lock);
43440 + spin_unlock(&dentry->d_lock);
43441 + if (retval != NULL)
43442 + goto out;
43443 +
43444 + dentry = mnt->mnt_mountpoint;
43445 + mnt = mnt->mnt_parent;
43446 + continue;
43447 + }
43448 +
43449 + spin_lock(&dentry->d_lock);
43450 + read_lock(&gr_inode_lock);
43451 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
43452 + __get_dev(dentry), role);
43453 + read_unlock(&gr_inode_lock);
43454 + parent = dentry->d_parent;
43455 + spin_unlock(&dentry->d_lock);
43456 +
43457 + if (retval != NULL)
43458 + goto out;
43459 +
43460 + dentry = parent;
43461 + }
43462 +
43463 + spin_lock(&dentry->d_lock);
43464 + read_lock(&gr_inode_lock);
43465 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
43466 + __get_dev(dentry), role);
43467 + read_unlock(&gr_inode_lock);
43468 + spin_unlock(&dentry->d_lock);
43469 +
43470 + if (unlikely(retval == NULL)) {
43471 + /* real_root is pinned, we don't need to hold a reference */
43472 + read_lock(&gr_inode_lock);
43473 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
43474 + __get_dev(real_root.dentry), role);
43475 + read_unlock(&gr_inode_lock);
43476 + }
43477 +out:
43478 + br_read_unlock(vfsmount_lock);
43479 + write_sequnlock(&rename_lock);
43480 +
43481 + BUG_ON(retval == NULL);
43482 +
43483 + return retval;
43484 +}
43485 +
43486 +static void
43487 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
43488 +{
43489 + struct task_struct *task = current;
43490 + const struct cred *cred = current_cred();
43491 +
43492 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
43493 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43494 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43495 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
43496 +
43497 + return;
43498 +}
43499 +
43500 +static void
43501 +gr_log_learn_sysctl(const char *path, const __u32 mode)
43502 +{
43503 + struct task_struct *task = current;
43504 + const struct cred *cred = current_cred();
43505 +
43506 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
43507 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43508 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43509 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
43510 +
43511 + return;
43512 +}
43513 +
43514 +static void
43515 +gr_log_learn_id_change(const char type, const unsigned int real,
43516 + const unsigned int effective, const unsigned int fs)
43517 +{
43518 + struct task_struct *task = current;
43519 + const struct cred *cred = current_cred();
43520 +
43521 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
43522 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43523 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43524 + type, real, effective, fs, &task->signal->saved_ip);
43525 +
43526 + return;
43527 +}
43528 +
43529 +__u32
43530 +gr_check_link(const struct dentry * new_dentry,
43531 + const struct dentry * parent_dentry,
43532 + const struct vfsmount * parent_mnt,
43533 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
43534 +{
43535 + struct acl_object_label *obj;
43536 + __u32 oldmode, newmode;
43537 + __u32 needmode;
43538 +
43539 + if (unlikely(!(gr_status & GR_READY)))
43540 + return (GR_CREATE | GR_LINK);
43541 +
43542 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
43543 + oldmode = obj->mode;
43544 +
43545 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43546 + oldmode |= (GR_CREATE | GR_LINK);
43547 +
43548 + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
43549 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
43550 + needmode |= GR_SETID | GR_AUDIT_SETID;
43551 +
43552 + newmode =
43553 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
43554 + oldmode | needmode);
43555 +
43556 + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
43557 + GR_SETID | GR_READ | GR_FIND | GR_DELETE |
43558 + GR_INHERIT | GR_AUDIT_INHERIT);
43559 +
43560 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
43561 + goto bad;
43562 +
43563 + if ((oldmode & needmode) != needmode)
43564 + goto bad;
43565 +
43566 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
43567 + if ((newmode & needmode) != needmode)
43568 + goto bad;
43569 +
43570 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
43571 + return newmode;
43572 +bad:
43573 + needmode = oldmode;
43574 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
43575 + needmode |= GR_SETID;
43576 +
43577 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
43578 + gr_log_learn(old_dentry, old_mnt, needmode);
43579 + return (GR_CREATE | GR_LINK);
43580 + } else if (newmode & GR_SUPPRESS)
43581 + return GR_SUPPRESS;
43582 + else
43583 + return 0;
43584 +}
43585 +
43586 +__u32
43587 +gr_search_file(const struct dentry * dentry, const __u32 mode,
43588 + const struct vfsmount * mnt)
43589 +{
43590 + __u32 retval = mode;
43591 + struct acl_subject_label *curracl;
43592 + struct acl_object_label *currobj;
43593 +
43594 + if (unlikely(!(gr_status & GR_READY)))
43595 + return (mode & ~GR_AUDITS);
43596 +
43597 + curracl = current->acl;
43598 +
43599 + currobj = chk_obj_label(dentry, mnt, curracl);
43600 + retval = currobj->mode & mode;
43601 +
43602 + /* if we're opening a specified transfer file for writing
43603 + (e.g. /dev/initctl), then transfer our role to init
43604 + */
43605 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
43606 + current->role->roletype & GR_ROLE_PERSIST)) {
43607 + struct task_struct *task = init_pid_ns.child_reaper;
43608 +
43609 + if (task->role != current->role) {
43610 + task->acl_sp_role = 0;
43611 + task->acl_role_id = current->acl_role_id;
43612 + task->role = current->role;
43613 + rcu_read_lock();
43614 + read_lock(&grsec_exec_file_lock);
43615 + gr_apply_subject_to_task(task);
43616 + read_unlock(&grsec_exec_file_lock);
43617 + rcu_read_unlock();
43618 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
43619 + }
43620 + }
43621 +
43622 + if (unlikely
43623 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
43624 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
43625 + __u32 new_mode = mode;
43626 +
43627 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43628 +
43629 + retval = new_mode;
43630 +
43631 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
43632 + new_mode |= GR_INHERIT;
43633 +
43634 + if (!(mode & GR_NOLEARN))
43635 + gr_log_learn(dentry, mnt, new_mode);
43636 + }
43637 +
43638 + return retval;
43639 +}
43640 +
43641 +__u32
43642 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
43643 + const struct vfsmount * mnt, const __u32 mode)
43644 +{
43645 + struct name_entry *match;
43646 + struct acl_object_label *matchpo;
43647 + struct acl_subject_label *curracl;
43648 + char *path;
43649 + __u32 retval;
43650 +
43651 + if (unlikely(!(gr_status & GR_READY)))
43652 + return (mode & ~GR_AUDITS);
43653 +
43654 + preempt_disable();
43655 + path = gr_to_filename_rbac(new_dentry, mnt);
43656 + match = lookup_name_entry_create(path);
43657 +
43658 + if (!match)
43659 + goto check_parent;
43660 +
43661 + curracl = current->acl;
43662 +
43663 + read_lock(&gr_inode_lock);
43664 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
43665 + read_unlock(&gr_inode_lock);
43666 +
43667 + if (matchpo) {
43668 + if ((matchpo->mode & mode) !=
43669 + (mode & ~(GR_AUDITS | GR_SUPPRESS))
43670 + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
43671 + __u32 new_mode = mode;
43672 +
43673 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43674 +
43675 + gr_log_learn(new_dentry, mnt, new_mode);
43676 +
43677 + preempt_enable();
43678 + return new_mode;
43679 + }
43680 + preempt_enable();
43681 + return (matchpo->mode & mode);
43682 + }
43683 +
43684 + check_parent:
43685 + curracl = current->acl;
43686 +
43687 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
43688 + retval = matchpo->mode & mode;
43689 +
43690 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
43691 + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
43692 + __u32 new_mode = mode;
43693 +
43694 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43695 +
43696 + gr_log_learn(new_dentry, mnt, new_mode);
43697 + preempt_enable();
43698 + return new_mode;
43699 + }
43700 +
43701 + preempt_enable();
43702 + return retval;
43703 +}
43704 +
43705 +int
43706 +gr_check_hidden_task(const struct task_struct *task)
43707 +{
43708 + if (unlikely(!(gr_status & GR_READY)))
43709 + return 0;
43710 +
43711 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
43712 + return 1;
43713 +
43714 + return 0;
43715 +}
43716 +
43717 +int
43718 +gr_check_protected_task(const struct task_struct *task)
43719 +{
43720 + if (unlikely(!(gr_status & GR_READY) || !task))
43721 + return 0;
43722 +
43723 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
43724 + task->acl != current->acl)
43725 + return 1;
43726 +
43727 + return 0;
43728 +}
43729 +
43730 +int
43731 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
43732 +{
43733 + struct task_struct *p;
43734 + int ret = 0;
43735 +
43736 + if (unlikely(!(gr_status & GR_READY) || !pid))
43737 + return ret;
43738 +
43739 + read_lock(&tasklist_lock);
43740 + do_each_pid_task(pid, type, p) {
43741 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
43742 + p->acl != current->acl) {
43743 + ret = 1;
43744 + goto out;
43745 + }
43746 + } while_each_pid_task(pid, type, p);
43747 +out:
43748 + read_unlock(&tasklist_lock);
43749 +
43750 + return ret;
43751 +}
43752 +
43753 +void
43754 +gr_copy_label(struct task_struct *tsk)
43755 +{
43756 + tsk->signal->used_accept = 0;
43757 + tsk->acl_sp_role = 0;
43758 + tsk->acl_role_id = current->acl_role_id;
43759 + tsk->acl = current->acl;
43760 + tsk->role = current->role;
43761 + tsk->signal->curr_ip = current->signal->curr_ip;
43762 + tsk->signal->saved_ip = current->signal->saved_ip;
43763 + if (current->exec_file)
43764 + get_file(current->exec_file);
43765 + tsk->exec_file = current->exec_file;
43766 + tsk->is_writable = current->is_writable;
43767 + if (unlikely(current->signal->used_accept)) {
43768 + current->signal->curr_ip = 0;
43769 + current->signal->saved_ip = 0;
43770 + }
43771 +
43772 + return;
43773 +}
43774 +
43775 +static void
43776 +gr_set_proc_res(struct task_struct *task)
43777 +{
43778 + struct acl_subject_label *proc;
43779 + unsigned short i;
43780 +
43781 + proc = task->acl;
43782 +
43783 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
43784 + return;
43785 +
43786 + for (i = 0; i < RLIM_NLIMITS; i++) {
43787 + if (!(proc->resmask & (1 << i)))
43788 + continue;
43789 +
43790 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
43791 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
43792 + }
43793 +
43794 + return;
43795 +}
43796 +
43797 +extern int __gr_process_user_ban(struct user_struct *user);
43798 +
43799 +int
43800 +gr_check_user_change(int real, int effective, int fs)
43801 +{
43802 + unsigned int i;
43803 + __u16 num;
43804 + uid_t *uidlist;
43805 + int curuid;
43806 + int realok = 0;
43807 + int effectiveok = 0;
43808 + int fsok = 0;
43809 +
43810 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
43811 + struct user_struct *user;
43812 +
43813 + if (real == -1)
43814 + goto skipit;
43815 +
43816 + user = find_user(real);
43817 + if (user == NULL)
43818 + goto skipit;
43819 +
43820 + if (__gr_process_user_ban(user)) {
43821 + /* for find_user */
43822 + free_uid(user);
43823 + return 1;
43824 + }
43825 +
43826 + /* for find_user */
43827 + free_uid(user);
43828 +
43829 +skipit:
43830 +#endif
43831 +
43832 + if (unlikely(!(gr_status & GR_READY)))
43833 + return 0;
43834 +
43835 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43836 + gr_log_learn_id_change('u', real, effective, fs);
43837 +
43838 + num = current->acl->user_trans_num;
43839 + uidlist = current->acl->user_transitions;
43840 +
43841 + if (uidlist == NULL)
43842 + return 0;
43843 +
43844 + if (real == -1)
43845 + realok = 1;
43846 + if (effective == -1)
43847 + effectiveok = 1;
43848 + if (fs == -1)
43849 + fsok = 1;
43850 +
43851 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
43852 + for (i = 0; i < num; i++) {
43853 + curuid = (int)uidlist[i];
43854 + if (real == curuid)
43855 + realok = 1;
43856 + if (effective == curuid)
43857 + effectiveok = 1;
43858 + if (fs == curuid)
43859 + fsok = 1;
43860 + }
43861 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
43862 + for (i = 0; i < num; i++) {
43863 + curuid = (int)uidlist[i];
43864 + if (real == curuid)
43865 + break;
43866 + if (effective == curuid)
43867 + break;
43868 + if (fs == curuid)
43869 + break;
43870 + }
43871 + /* not in deny list */
43872 + if (i == num) {
43873 + realok = 1;
43874 + effectiveok = 1;
43875 + fsok = 1;
43876 + }
43877 + }
43878 +
43879 + if (realok && effectiveok && fsok)
43880 + return 0;
43881 + else {
43882 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
43883 + return 1;
43884 + }
43885 +}
43886 +
43887 +int
43888 +gr_check_group_change(int real, int effective, int fs)
43889 +{
43890 + unsigned int i;
43891 + __u16 num;
43892 + gid_t *gidlist;
43893 + int curgid;
43894 + int realok = 0;
43895 + int effectiveok = 0;
43896 + int fsok = 0;
43897 +
43898 + if (unlikely(!(gr_status & GR_READY)))
43899 + return 0;
43900 +
43901 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43902 + gr_log_learn_id_change('g', real, effective, fs);
43903 +
43904 + num = current->acl->group_trans_num;
43905 + gidlist = current->acl->group_transitions;
43906 +
43907 + if (gidlist == NULL)
43908 + return 0;
43909 +
43910 + if (real == -1)
43911 + realok = 1;
43912 + if (effective == -1)
43913 + effectiveok = 1;
43914 + if (fs == -1)
43915 + fsok = 1;
43916 +
43917 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
43918 + for (i = 0; i < num; i++) {
43919 + curgid = (int)gidlist[i];
43920 + if (real == curgid)
43921 + realok = 1;
43922 + if (effective == curgid)
43923 + effectiveok = 1;
43924 + if (fs == curgid)
43925 + fsok = 1;
43926 + }
43927 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
43928 + for (i = 0; i < num; i++) {
43929 + curgid = (int)gidlist[i];
43930 + if (real == curgid)
43931 + break;
43932 + if (effective == curgid)
43933 + break;
43934 + if (fs == curgid)
43935 + break;
43936 + }
43937 + /* not in deny list */
43938 + if (i == num) {
43939 + realok = 1;
43940 + effectiveok = 1;
43941 + fsok = 1;
43942 + }
43943 + }
43944 +
43945 + if (realok && effectiveok && fsok)
43946 + return 0;
43947 + else {
43948 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
43949 + return 1;
43950 + }
43951 +}
43952 +
43953 +void
43954 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
43955 +{
43956 + struct acl_role_label *role = task->role;
43957 + struct acl_subject_label *subj = NULL;
43958 + struct acl_object_label *obj;
43959 + struct file *filp;
43960 +
43961 + if (unlikely(!(gr_status & GR_READY)))
43962 + return;
43963 +
43964 + filp = task->exec_file;
43965 +
43966 + /* kernel process, we'll give them the kernel role */
43967 + if (unlikely(!filp)) {
43968 + task->role = kernel_role;
43969 + task->acl = kernel_role->root_label;
43970 + return;
43971 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
43972 + role = lookup_acl_role_label(task, uid, gid);
43973 +
43974 + /* perform subject lookup in possibly new role
43975 + we can use this result below in the case where role == task->role
43976 + */
43977 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
43978 +
43979 + /* if we changed uid/gid, but result in the same role
43980 + and are using inheritance, don't lose the inherited subject
43981 + if current subject is other than what normal lookup
43982 + would result in, we arrived via inheritance, don't
43983 + lose subject
43984 + */
43985 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
43986 + (subj == task->acl)))
43987 + task->acl = subj;
43988 +
43989 + task->role = role;
43990 +
43991 + task->is_writable = 0;
43992 +
43993 + /* ignore additional mmap checks for processes that are writable
43994 + by the default ACL */
43995 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
43996 + if (unlikely(obj->mode & GR_WRITE))
43997 + task->is_writable = 1;
43998 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
43999 + if (unlikely(obj->mode & GR_WRITE))
44000 + task->is_writable = 1;
44001 +
44002 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44003 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
44004 +#endif
44005 +
44006 + gr_set_proc_res(task);
44007 +
44008 + return;
44009 +}
44010 +
44011 +int
44012 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
44013 + const int unsafe_share)
44014 +{
44015 + struct task_struct *task = current;
44016 + struct acl_subject_label *newacl;
44017 + struct acl_object_label *obj;
44018 + __u32 retmode;
44019 +
44020 + if (unlikely(!(gr_status & GR_READY)))
44021 + return 0;
44022 +
44023 + newacl = chk_subj_label(dentry, mnt, task->role);
44024 +
44025 + task_lock(task);
44026 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
44027 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
44028 + !(task->role->roletype & GR_ROLE_GOD) &&
44029 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
44030 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
44031 + task_unlock(task);
44032 + if (unsafe_share)
44033 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
44034 + else
44035 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
44036 + return -EACCES;
44037 + }
44038 + task_unlock(task);
44039 +
44040 + obj = chk_obj_label(dentry, mnt, task->acl);
44041 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
44042 +
44043 + if (!(task->acl->mode & GR_INHERITLEARN) &&
44044 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
44045 + if (obj->nested)
44046 + task->acl = obj->nested;
44047 + else
44048 + task->acl = newacl;
44049 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
44050 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
44051 +
44052 + task->is_writable = 0;
44053 +
44054 + /* ignore additional mmap checks for processes that are writable
44055 + by the default ACL */
44056 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
44057 + if (unlikely(obj->mode & GR_WRITE))
44058 + task->is_writable = 1;
44059 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
44060 + if (unlikely(obj->mode & GR_WRITE))
44061 + task->is_writable = 1;
44062 +
44063 + gr_set_proc_res(task);
44064 +
44065 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44066 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
44067 +#endif
44068 + return 0;
44069 +}
44070 +
44071 +/* always called with valid inodev ptr */
44072 +static void
44073 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
44074 +{
44075 + struct acl_object_label *matchpo;
44076 + struct acl_subject_label *matchps;
44077 + struct acl_subject_label *subj;
44078 + struct acl_role_label *role;
44079 + unsigned int x;
44080 +
44081 + FOR_EACH_ROLE_START(role)
44082 + FOR_EACH_SUBJECT_START(role, subj, x)
44083 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
44084 + matchpo->mode |= GR_DELETED;
44085 + FOR_EACH_SUBJECT_END(subj,x)
44086 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
44087 + if (subj->inode == ino && subj->device == dev)
44088 + subj->mode |= GR_DELETED;
44089 + FOR_EACH_NESTED_SUBJECT_END(subj)
44090 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
44091 + matchps->mode |= GR_DELETED;
44092 + FOR_EACH_ROLE_END(role)
44093 +
44094 + inodev->nentry->deleted = 1;
44095 +
44096 + return;
44097 +}
44098 +
44099 +void
44100 +gr_handle_delete(const ino_t ino, const dev_t dev)
44101 +{
44102 + struct inodev_entry *inodev;
44103 +
44104 + if (unlikely(!(gr_status & GR_READY)))
44105 + return;
44106 +
44107 + write_lock(&gr_inode_lock);
44108 + inodev = lookup_inodev_entry(ino, dev);
44109 + if (inodev != NULL)
44110 + do_handle_delete(inodev, ino, dev);
44111 + write_unlock(&gr_inode_lock);
44112 +
44113 + return;
44114 +}
44115 +
44116 +static void
44117 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
44118 + const ino_t newinode, const dev_t newdevice,
44119 + struct acl_subject_label *subj)
44120 +{
44121 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
44122 + struct acl_object_label *match;
44123 +
44124 + match = subj->obj_hash[index];
44125 +
44126 + while (match && (match->inode != oldinode ||
44127 + match->device != olddevice ||
44128 + !(match->mode & GR_DELETED)))
44129 + match = match->next;
44130 +
44131 + if (match && (match->inode == oldinode)
44132 + && (match->device == olddevice)
44133 + && (match->mode & GR_DELETED)) {
44134 + if (match->prev == NULL) {
44135 + subj->obj_hash[index] = match->next;
44136 + if (match->next != NULL)
44137 + match->next->prev = NULL;
44138 + } else {
44139 + match->prev->next = match->next;
44140 + if (match->next != NULL)
44141 + match->next->prev = match->prev;
44142 + }
44143 + match->prev = NULL;
44144 + match->next = NULL;
44145 + match->inode = newinode;
44146 + match->device = newdevice;
44147 + match->mode &= ~GR_DELETED;
44148 +
44149 + insert_acl_obj_label(match, subj);
44150 + }
44151 +
44152 + return;
44153 +}
44154 +
44155 +static void
44156 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
44157 + const ino_t newinode, const dev_t newdevice,
44158 + struct acl_role_label *role)
44159 +{
44160 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
44161 + struct acl_subject_label *match;
44162 +
44163 + match = role->subj_hash[index];
44164 +
44165 + while (match && (match->inode != oldinode ||
44166 + match->device != olddevice ||
44167 + !(match->mode & GR_DELETED)))
44168 + match = match->next;
44169 +
44170 + if (match && (match->inode == oldinode)
44171 + && (match->device == olddevice)
44172 + && (match->mode & GR_DELETED)) {
44173 + if (match->prev == NULL) {
44174 + role->subj_hash[index] = match->next;
44175 + if (match->next != NULL)
44176 + match->next->prev = NULL;
44177 + } else {
44178 + match->prev->next = match->next;
44179 + if (match->next != NULL)
44180 + match->next->prev = match->prev;
44181 + }
44182 + match->prev = NULL;
44183 + match->next = NULL;
44184 + match->inode = newinode;
44185 + match->device = newdevice;
44186 + match->mode &= ~GR_DELETED;
44187 +
44188 + insert_acl_subj_label(match, role);
44189 + }
44190 +
44191 + return;
44192 +}
44193 +
44194 +static void
44195 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
44196 + const ino_t newinode, const dev_t newdevice)
44197 +{
44198 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
44199 + struct inodev_entry *match;
44200 +
44201 + match = inodev_set.i_hash[index];
44202 +
44203 + while (match && (match->nentry->inode != oldinode ||
44204 + match->nentry->device != olddevice || !match->nentry->deleted))
44205 + match = match->next;
44206 +
44207 + if (match && (match->nentry->inode == oldinode)
44208 + && (match->nentry->device == olddevice) &&
44209 + match->nentry->deleted) {
44210 + if (match->prev == NULL) {
44211 + inodev_set.i_hash[index] = match->next;
44212 + if (match->next != NULL)
44213 + match->next->prev = NULL;
44214 + } else {
44215 + match->prev->next = match->next;
44216 + if (match->next != NULL)
44217 + match->next->prev = match->prev;
44218 + }
44219 + match->prev = NULL;
44220 + match->next = NULL;
44221 + match->nentry->inode = newinode;
44222 + match->nentry->device = newdevice;
44223 + match->nentry->deleted = 0;
44224 +
44225 + insert_inodev_entry(match);
44226 + }
44227 +
44228 + return;
44229 +}
44230 +
44231 +static void
44232 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
44233 + const struct vfsmount *mnt)
44234 +{
44235 + struct acl_subject_label *subj;
44236 + struct acl_role_label *role;
44237 + unsigned int x;
44238 + ino_t ino = dentry->d_inode->i_ino;
44239 + dev_t dev = __get_dev(dentry);
44240 +
44241 + FOR_EACH_ROLE_START(role)
44242 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
44243 +
44244 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
44245 + if ((subj->inode == ino) && (subj->device == dev)) {
44246 + subj->inode = ino;
44247 + subj->device = dev;
44248 + }
44249 + FOR_EACH_NESTED_SUBJECT_END(subj)
44250 + FOR_EACH_SUBJECT_START(role, subj, x)
44251 + update_acl_obj_label(matchn->inode, matchn->device,
44252 + ino, dev, subj);
44253 + FOR_EACH_SUBJECT_END(subj,x)
44254 + FOR_EACH_ROLE_END(role)
44255 +
44256 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
44257 +
44258 + return;
44259 +}
44260 +
44261 +void
44262 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
44263 +{
44264 + struct name_entry *matchn;
44265 +
44266 + if (unlikely(!(gr_status & GR_READY)))
44267 + return;
44268 +
44269 + preempt_disable();
44270 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
44271 +
44272 + if (unlikely((unsigned long)matchn)) {
44273 + write_lock(&gr_inode_lock);
44274 + do_handle_create(matchn, dentry, mnt);
44275 + write_unlock(&gr_inode_lock);
44276 + }
44277 + preempt_enable();
44278 +
44279 + return;
44280 +}
44281 +
44282 +void
44283 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
44284 + struct dentry *old_dentry,
44285 + struct dentry *new_dentry,
44286 + struct vfsmount *mnt, const __u8 replace)
44287 +{
44288 + struct name_entry *matchn;
44289 + struct inodev_entry *inodev;
44290 + ino_t old_ino = old_dentry->d_inode->i_ino;
44291 + dev_t old_dev = __get_dev(old_dentry);
44292 +
44293 + /* vfs_rename swaps the name and parent link for old_dentry and
44294 + new_dentry
44295 + at this point, old_dentry has the new name, parent link, and inode
44296 + for the renamed file
44297 + if a file is being replaced by a rename, new_dentry has the inode
44298 + and name for the replaced file
44299 + */
44300 +
44301 + if (unlikely(!(gr_status & GR_READY)))
44302 + return;
44303 +
44304 + preempt_disable();
44305 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
44306 +
44307 + /* we wouldn't have to check d_inode if it weren't for
44308 + NFS silly-renaming
44309 + */
44310 +
44311 + write_lock(&gr_inode_lock);
44312 + if (unlikely(replace && new_dentry->d_inode)) {
44313 + ino_t new_ino = new_dentry->d_inode->i_ino;
44314 + dev_t new_dev = __get_dev(new_dentry);
44315 +
44316 + inodev = lookup_inodev_entry(new_ino, new_dev);
44317 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
44318 + do_handle_delete(inodev, new_ino, new_dev);
44319 + }
44320 +
44321 + inodev = lookup_inodev_entry(old_ino, old_dev);
44322 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
44323 + do_handle_delete(inodev, old_ino, old_dev);
44324 +
44325 + if (unlikely((unsigned long)matchn))
44326 + do_handle_create(matchn, old_dentry, mnt);
44327 +
44328 + write_unlock(&gr_inode_lock);
44329 + preempt_enable();
44330 +
44331 + return;
44332 +}
44333 +
44334 +static int
44335 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
44336 + unsigned char **sum)
44337 +{
44338 + struct acl_role_label *r;
44339 + struct role_allowed_ip *ipp;
44340 + struct role_transition *trans;
44341 + unsigned int i;
44342 + int found = 0;
44343 + u32 curr_ip = current->signal->curr_ip;
44344 +
44345 + current->signal->saved_ip = curr_ip;
44346 +
44347 + /* check transition table */
44348 +
44349 + for (trans = current->role->transitions; trans; trans = trans->next) {
44350 + if (!strcmp(rolename, trans->rolename)) {
44351 + found = 1;
44352 + break;
44353 + }
44354 + }
44355 +
44356 + if (!found)
44357 + return 0;
44358 +
44359 + /* handle special roles that do not require authentication
44360 + and check ip */
44361 +
44362 + FOR_EACH_ROLE_START(r)
44363 + if (!strcmp(rolename, r->rolename) &&
44364 + (r->roletype & GR_ROLE_SPECIAL)) {
44365 + found = 0;
44366 + if (r->allowed_ips != NULL) {
44367 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
44368 + if ((ntohl(curr_ip) & ipp->netmask) ==
44369 + (ntohl(ipp->addr) & ipp->netmask))
44370 + found = 1;
44371 + }
44372 + } else
44373 + found = 2;
44374 + if (!found)
44375 + return 0;
44376 +
44377 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
44378 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
44379 + *salt = NULL;
44380 + *sum = NULL;
44381 + return 1;
44382 + }
44383 + }
44384 + FOR_EACH_ROLE_END(r)
44385 +
44386 + for (i = 0; i < num_sprole_pws; i++) {
44387 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
44388 + *salt = acl_special_roles[i]->salt;
44389 + *sum = acl_special_roles[i]->sum;
44390 + return 1;
44391 + }
44392 + }
44393 +
44394 + return 0;
44395 +}
44396 +
44397 +static void
44398 +assign_special_role(char *rolename)
44399 +{
44400 + struct acl_object_label *obj;
44401 + struct acl_role_label *r;
44402 + struct acl_role_label *assigned = NULL;
44403 + struct task_struct *tsk;
44404 + struct file *filp;
44405 +
44406 + FOR_EACH_ROLE_START(r)
44407 + if (!strcmp(rolename, r->rolename) &&
44408 + (r->roletype & GR_ROLE_SPECIAL)) {
44409 + assigned = r;
44410 + break;
44411 + }
44412 + FOR_EACH_ROLE_END(r)
44413 +
44414 + if (!assigned)
44415 + return;
44416 +
44417 + read_lock(&tasklist_lock);
44418 + read_lock(&grsec_exec_file_lock);
44419 +
44420 + tsk = current->real_parent;
44421 + if (tsk == NULL)
44422 + goto out_unlock;
44423 +
44424 + filp = tsk->exec_file;
44425 + if (filp == NULL)
44426 + goto out_unlock;
44427 +
44428 + tsk->is_writable = 0;
44429 +
44430 + tsk->acl_sp_role = 1;
44431 + tsk->acl_role_id = ++acl_sp_role_value;
44432 + tsk->role = assigned;
44433 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
44434 +
44435 + /* ignore additional mmap checks for processes that are writable
44436 + by the default ACL */
44437 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44438 + if (unlikely(obj->mode & GR_WRITE))
44439 + tsk->is_writable = 1;
44440 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
44441 + if (unlikely(obj->mode & GR_WRITE))
44442 + tsk->is_writable = 1;
44443 +
44444 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44445 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
44446 +#endif
44447 +
44448 +out_unlock:
44449 + read_unlock(&grsec_exec_file_lock);
44450 + read_unlock(&tasklist_lock);
44451 + return;
44452 +}
44453 +
44454 +int gr_check_secure_terminal(struct task_struct *task)
44455 +{
44456 + struct task_struct *p, *p2, *p3;
44457 + struct files_struct *files;
44458 + struct fdtable *fdt;
44459 + struct file *our_file = NULL, *file;
44460 + int i;
44461 +
44462 + if (task->signal->tty == NULL)
44463 + return 1;
44464 +
44465 + files = get_files_struct(task);
44466 + if (files != NULL) {
44467 + rcu_read_lock();
44468 + fdt = files_fdtable(files);
44469 + for (i=0; i < fdt->max_fds; i++) {
44470 + file = fcheck_files(files, i);
44471 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
44472 + get_file(file);
44473 + our_file = file;
44474 + }
44475 + }
44476 + rcu_read_unlock();
44477 + put_files_struct(files);
44478 + }
44479 +
44480 + if (our_file == NULL)
44481 + return 1;
44482 +
44483 + read_lock(&tasklist_lock);
44484 + do_each_thread(p2, p) {
44485 + files = get_files_struct(p);
44486 + if (files == NULL ||
44487 + (p->signal && p->signal->tty == task->signal->tty)) {
44488 + if (files != NULL)
44489 + put_files_struct(files);
44490 + continue;
44491 + }
44492 + rcu_read_lock();
44493 + fdt = files_fdtable(files);
44494 + for (i=0; i < fdt->max_fds; i++) {
44495 + file = fcheck_files(files, i);
44496 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
44497 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
44498 + p3 = task;
44499 + while (p3->pid > 0) {
44500 + if (p3 == p)
44501 + break;
44502 + p3 = p3->real_parent;
44503 + }
44504 + if (p3 == p)
44505 + break;
44506 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
44507 + gr_handle_alertkill(p);
44508 + rcu_read_unlock();
44509 + put_files_struct(files);
44510 + read_unlock(&tasklist_lock);
44511 + fput(our_file);
44512 + return 0;
44513 + }
44514 + }
44515 + rcu_read_unlock();
44516 + put_files_struct(files);
44517 + } while_each_thread(p2, p);
44518 + read_unlock(&tasklist_lock);
44519 +
44520 + fput(our_file);
44521 + return 1;
44522 +}
44523 +
44524 +ssize_t
44525 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
44526 +{
44527 + struct gr_arg_wrapper uwrap;
44528 + unsigned char *sprole_salt = NULL;
44529 + unsigned char *sprole_sum = NULL;
44530 + int error = sizeof (struct gr_arg_wrapper);
44531 + int error2 = 0;
44532 +
44533 + mutex_lock(&gr_dev_mutex);
44534 +
44535 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
44536 + error = -EPERM;
44537 + goto out;
44538 + }
44539 +
44540 + if (count != sizeof (struct gr_arg_wrapper)) {
44541 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
44542 + error = -EINVAL;
44543 + goto out;
44544 + }
44545 +
44546 +
44547 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
44548 + gr_auth_expires = 0;
44549 + gr_auth_attempts = 0;
44550 + }
44551 +
44552 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
44553 + error = -EFAULT;
44554 + goto out;
44555 + }
44556 +
44557 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
44558 + error = -EINVAL;
44559 + goto out;
44560 + }
44561 +
44562 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
44563 + error = -EFAULT;
44564 + goto out;
44565 + }
44566 +
44567 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
44568 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
44569 + time_after(gr_auth_expires, get_seconds())) {
44570 + error = -EBUSY;
44571 + goto out;
44572 + }
44573 +
44574 + /* if non-root trying to do anything other than use a special role,
44575 + do not attempt authentication, do not count towards authentication
44576 + locking
44577 + */
44578 +
44579 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
44580 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
44581 + current_uid()) {
44582 + error = -EPERM;
44583 + goto out;
44584 + }
44585 +
44586 + /* ensure pw and special role name are null terminated */
44587 +
44588 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
44589 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
44590 +
44591 + /* Okay.
44592 + * We have our enough of the argument structure..(we have yet
44593 + * to copy_from_user the tables themselves) . Copy the tables
44594 + * only if we need them, i.e. for loading operations. */
44595 +
44596 + switch (gr_usermode->mode) {
44597 + case GR_STATUS:
44598 + if (gr_status & GR_READY) {
44599 + error = 1;
44600 + if (!gr_check_secure_terminal(current))
44601 + error = 3;
44602 + } else
44603 + error = 2;
44604 + goto out;
44605 + case GR_SHUTDOWN:
44606 + if ((gr_status & GR_READY)
44607 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44608 + pax_open_kernel();
44609 + gr_status &= ~GR_READY;
44610 + pax_close_kernel();
44611 +
44612 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
44613 + free_variables();
44614 + memset(gr_usermode, 0, sizeof (struct gr_arg));
44615 + memset(gr_system_salt, 0, GR_SALT_LEN);
44616 + memset(gr_system_sum, 0, GR_SHA_LEN);
44617 + } else if (gr_status & GR_READY) {
44618 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
44619 + error = -EPERM;
44620 + } else {
44621 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
44622 + error = -EAGAIN;
44623 + }
44624 + break;
44625 + case GR_ENABLE:
44626 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
44627 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
44628 + else {
44629 + if (gr_status & GR_READY)
44630 + error = -EAGAIN;
44631 + else
44632 + error = error2;
44633 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
44634 + }
44635 + break;
44636 + case GR_RELOAD:
44637 + if (!(gr_status & GR_READY)) {
44638 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
44639 + error = -EAGAIN;
44640 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44641 + preempt_disable();
44642 +
44643 + pax_open_kernel();
44644 + gr_status &= ~GR_READY;
44645 + pax_close_kernel();
44646 +
44647 + free_variables();
44648 + if (!(error2 = gracl_init(gr_usermode))) {
44649 + preempt_enable();
44650 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
44651 + } else {
44652 + preempt_enable();
44653 + error = error2;
44654 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
44655 + }
44656 + } else {
44657 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
44658 + error = -EPERM;
44659 + }
44660 + break;
44661 + case GR_SEGVMOD:
44662 + if (unlikely(!(gr_status & GR_READY))) {
44663 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
44664 + error = -EAGAIN;
44665 + break;
44666 + }
44667 +
44668 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44669 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
44670 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
44671 + struct acl_subject_label *segvacl;
44672 + segvacl =
44673 + lookup_acl_subj_label(gr_usermode->segv_inode,
44674 + gr_usermode->segv_device,
44675 + current->role);
44676 + if (segvacl) {
44677 + segvacl->crashes = 0;
44678 + segvacl->expires = 0;
44679 + }
44680 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
44681 + gr_remove_uid(gr_usermode->segv_uid);
44682 + }
44683 + } else {
44684 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
44685 + error = -EPERM;
44686 + }
44687 + break;
44688 + case GR_SPROLE:
44689 + case GR_SPROLEPAM:
44690 + if (unlikely(!(gr_status & GR_READY))) {
44691 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
44692 + error = -EAGAIN;
44693 + break;
44694 + }
44695 +
44696 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
44697 + current->role->expires = 0;
44698 + current->role->auth_attempts = 0;
44699 + }
44700 +
44701 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
44702 + time_after(current->role->expires, get_seconds())) {
44703 + error = -EBUSY;
44704 + goto out;
44705 + }
44706 +
44707 + if (lookup_special_role_auth
44708 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
44709 + && ((!sprole_salt && !sprole_sum)
44710 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
44711 + char *p = "";
44712 + assign_special_role(gr_usermode->sp_role);
44713 + read_lock(&tasklist_lock);
44714 + if (current->real_parent)
44715 + p = current->real_parent->role->rolename;
44716 + read_unlock(&tasklist_lock);
44717 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
44718 + p, acl_sp_role_value);
44719 + } else {
44720 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
44721 + error = -EPERM;
44722 + if(!(current->role->auth_attempts++))
44723 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
44724 +
44725 + goto out;
44726 + }
44727 + break;
44728 + case GR_UNSPROLE:
44729 + if (unlikely(!(gr_status & GR_READY))) {
44730 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
44731 + error = -EAGAIN;
44732 + break;
44733 + }
44734 +
44735 + if (current->role->roletype & GR_ROLE_SPECIAL) {
44736 + char *p = "";
44737 + int i = 0;
44738 +
44739 + read_lock(&tasklist_lock);
44740 + if (current->real_parent) {
44741 + p = current->real_parent->role->rolename;
44742 + i = current->real_parent->acl_role_id;
44743 + }
44744 + read_unlock(&tasklist_lock);
44745 +
44746 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
44747 + gr_set_acls(1);
44748 + } else {
44749 + error = -EPERM;
44750 + goto out;
44751 + }
44752 + break;
44753 + default:
44754 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
44755 + error = -EINVAL;
44756 + break;
44757 + }
44758 +
44759 + if (error != -EPERM)
44760 + goto out;
44761 +
44762 + if(!(gr_auth_attempts++))
44763 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
44764 +
44765 + out:
44766 + mutex_unlock(&gr_dev_mutex);
44767 + return error;
44768 +}
44769 +
44770 +/* must be called with
44771 + rcu_read_lock();
44772 + read_lock(&tasklist_lock);
44773 + read_lock(&grsec_exec_file_lock);
44774 +*/
44775 +int gr_apply_subject_to_task(struct task_struct *task)
44776 +{
44777 + struct acl_object_label *obj;
44778 + char *tmpname;
44779 + struct acl_subject_label *tmpsubj;
44780 + struct file *filp;
44781 + struct name_entry *nmatch;
44782 +
44783 + filp = task->exec_file;
44784 + if (filp == NULL)
44785 + return 0;
44786 +
44787 + /* the following is to apply the correct subject
44788 + on binaries running when the RBAC system
44789 + is enabled, when the binaries have been
44790 + replaced or deleted since their execution
44791 + -----
44792 + when the RBAC system starts, the inode/dev
44793 + from exec_file will be one the RBAC system
44794 + is unaware of. It only knows the inode/dev
44795 + of the present file on disk, or the absence
44796 + of it.
44797 + */
44798 + preempt_disable();
44799 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
44800 +
44801 + nmatch = lookup_name_entry(tmpname);
44802 + preempt_enable();
44803 + tmpsubj = NULL;
44804 + if (nmatch) {
44805 + if (nmatch->deleted)
44806 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
44807 + else
44808 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
44809 + if (tmpsubj != NULL)
44810 + task->acl = tmpsubj;
44811 + }
44812 + if (tmpsubj == NULL)
44813 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
44814 + task->role);
44815 + if (task->acl) {
44816 + task->is_writable = 0;
44817 + /* ignore additional mmap checks for processes that are writable
44818 + by the default ACL */
44819 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44820 + if (unlikely(obj->mode & GR_WRITE))
44821 + task->is_writable = 1;
44822 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
44823 + if (unlikely(obj->mode & GR_WRITE))
44824 + task->is_writable = 1;
44825 +
44826 + gr_set_proc_res(task);
44827 +
44828 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44829 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
44830 +#endif
44831 + } else {
44832 + return 1;
44833 + }
44834 +
44835 + return 0;
44836 +}
44837 +
44838 +int
44839 +gr_set_acls(const int type)
44840 +{
44841 + struct task_struct *task, *task2;
44842 + struct acl_role_label *role = current->role;
44843 + __u16 acl_role_id = current->acl_role_id;
44844 + const struct cred *cred;
44845 + int ret;
44846 +
44847 + rcu_read_lock();
44848 + read_lock(&tasklist_lock);
44849 + read_lock(&grsec_exec_file_lock);
44850 + do_each_thread(task2, task) {
44851 + /* check to see if we're called from the exit handler,
44852 + if so, only replace ACLs that have inherited the admin
44853 + ACL */
44854 +
44855 + if (type && (task->role != role ||
44856 + task->acl_role_id != acl_role_id))
44857 + continue;
44858 +
44859 + task->acl_role_id = 0;
44860 + task->acl_sp_role = 0;
44861 +
44862 + if (task->exec_file) {
44863 + cred = __task_cred(task);
44864 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
44865 + ret = gr_apply_subject_to_task(task);
44866 + if (ret) {
44867 + read_unlock(&grsec_exec_file_lock);
44868 + read_unlock(&tasklist_lock);
44869 + rcu_read_unlock();
44870 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
44871 + return ret;
44872 + }
44873 + } else {
44874 + // it's a kernel process
44875 + task->role = kernel_role;
44876 + task->acl = kernel_role->root_label;
44877 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
44878 + task->acl->mode &= ~GR_PROCFIND;
44879 +#endif
44880 + }
44881 + } while_each_thread(task2, task);
44882 + read_unlock(&grsec_exec_file_lock);
44883 + read_unlock(&tasklist_lock);
44884 + rcu_read_unlock();
44885 +
44886 + return 0;
44887 +}
44888 +
44889 +void
44890 +gr_learn_resource(const struct task_struct *task,
44891 + const int res, const unsigned long wanted, const int gt)
44892 +{
44893 + struct acl_subject_label *acl;
44894 + const struct cred *cred;
44895 +
44896 + if (unlikely((gr_status & GR_READY) &&
44897 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
44898 + goto skip_reslog;
44899 +
44900 +#ifdef CONFIG_GRKERNSEC_RESLOG
44901 + gr_log_resource(task, res, wanted, gt);
44902 +#endif
44903 + skip_reslog:
44904 +
44905 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
44906 + return;
44907 +
44908 + acl = task->acl;
44909 +
44910 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
44911 + !(acl->resmask & (1 << (unsigned short) res))))
44912 + return;
44913 +
44914 + if (wanted >= acl->res[res].rlim_cur) {
44915 + unsigned long res_add;
44916 +
44917 + res_add = wanted;
44918 + switch (res) {
44919 + case RLIMIT_CPU:
44920 + res_add += GR_RLIM_CPU_BUMP;
44921 + break;
44922 + case RLIMIT_FSIZE:
44923 + res_add += GR_RLIM_FSIZE_BUMP;
44924 + break;
44925 + case RLIMIT_DATA:
44926 + res_add += GR_RLIM_DATA_BUMP;
44927 + break;
44928 + case RLIMIT_STACK:
44929 + res_add += GR_RLIM_STACK_BUMP;
44930 + break;
44931 + case RLIMIT_CORE:
44932 + res_add += GR_RLIM_CORE_BUMP;
44933 + break;
44934 + case RLIMIT_RSS:
44935 + res_add += GR_RLIM_RSS_BUMP;
44936 + break;
44937 + case RLIMIT_NPROC:
44938 + res_add += GR_RLIM_NPROC_BUMP;
44939 + break;
44940 + case RLIMIT_NOFILE:
44941 + res_add += GR_RLIM_NOFILE_BUMP;
44942 + break;
44943 + case RLIMIT_MEMLOCK:
44944 + res_add += GR_RLIM_MEMLOCK_BUMP;
44945 + break;
44946 + case RLIMIT_AS:
44947 + res_add += GR_RLIM_AS_BUMP;
44948 + break;
44949 + case RLIMIT_LOCKS:
44950 + res_add += GR_RLIM_LOCKS_BUMP;
44951 + break;
44952 + case RLIMIT_SIGPENDING:
44953 + res_add += GR_RLIM_SIGPENDING_BUMP;
44954 + break;
44955 + case RLIMIT_MSGQUEUE:
44956 + res_add += GR_RLIM_MSGQUEUE_BUMP;
44957 + break;
44958 + case RLIMIT_NICE:
44959 + res_add += GR_RLIM_NICE_BUMP;
44960 + break;
44961 + case RLIMIT_RTPRIO:
44962 + res_add += GR_RLIM_RTPRIO_BUMP;
44963 + break;
44964 + case RLIMIT_RTTIME:
44965 + res_add += GR_RLIM_RTTIME_BUMP;
44966 + break;
44967 + }
44968 +
44969 + acl->res[res].rlim_cur = res_add;
44970 +
44971 + if (wanted > acl->res[res].rlim_max)
44972 + acl->res[res].rlim_max = res_add;
44973 +
44974 + /* only log the subject filename, since resource logging is supported for
44975 + single-subject learning only */
44976 + rcu_read_lock();
44977 + cred = __task_cred(task);
44978 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
44979 + task->role->roletype, cred->uid, cred->gid, acl->filename,
44980 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
44981 + "", (unsigned long) res, &task->signal->saved_ip);
44982 + rcu_read_unlock();
44983 + }
44984 +
44985 + return;
44986 +}
44987 +
44988 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
44989 +void
44990 +pax_set_initial_flags(struct linux_binprm *bprm)
44991 +{
44992 + struct task_struct *task = current;
44993 + struct acl_subject_label *proc;
44994 + unsigned long flags;
44995 +
44996 + if (unlikely(!(gr_status & GR_READY)))
44997 + return;
44998 +
44999 + flags = pax_get_flags(task);
45000 +
45001 + proc = task->acl;
45002 +
45003 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
45004 + flags &= ~MF_PAX_PAGEEXEC;
45005 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
45006 + flags &= ~MF_PAX_SEGMEXEC;
45007 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
45008 + flags &= ~MF_PAX_RANDMMAP;
45009 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
45010 + flags &= ~MF_PAX_EMUTRAMP;
45011 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
45012 + flags &= ~MF_PAX_MPROTECT;
45013 +
45014 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
45015 + flags |= MF_PAX_PAGEEXEC;
45016 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
45017 + flags |= MF_PAX_SEGMEXEC;
45018 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
45019 + flags |= MF_PAX_RANDMMAP;
45020 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
45021 + flags |= MF_PAX_EMUTRAMP;
45022 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
45023 + flags |= MF_PAX_MPROTECT;
45024 +
45025 + pax_set_flags(task, flags);
45026 +
45027 + return;
45028 +}
45029 +#endif
45030 +
45031 +#ifdef CONFIG_SYSCTL
45032 +/* Eric Biederman likes breaking userland ABI and every inode-based security
45033 + system to save 35kb of memory */
45034 +
45035 +/* we modify the passed in filename, but adjust it back before returning */
45036 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
45037 +{
45038 + struct name_entry *nmatch;
45039 + char *p, *lastp = NULL;
45040 + struct acl_object_label *obj = NULL, *tmp;
45041 + struct acl_subject_label *tmpsubj;
45042 + char c = '\0';
45043 +
45044 + read_lock(&gr_inode_lock);
45045 +
45046 + p = name + len - 1;
45047 + do {
45048 + nmatch = lookup_name_entry(name);
45049 + if (lastp != NULL)
45050 + *lastp = c;
45051 +
45052 + if (nmatch == NULL)
45053 + goto next_component;
45054 + tmpsubj = current->acl;
45055 + do {
45056 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
45057 + if (obj != NULL) {
45058 + tmp = obj->globbed;
45059 + while (tmp) {
45060 + if (!glob_match(tmp->filename, name)) {
45061 + obj = tmp;
45062 + goto found_obj;
45063 + }
45064 + tmp = tmp->next;
45065 + }
45066 + goto found_obj;
45067 + }
45068 + } while ((tmpsubj = tmpsubj->parent_subject));
45069 +next_component:
45070 + /* end case */
45071 + if (p == name)
45072 + break;
45073 +
45074 + while (*p != '/')
45075 + p--;
45076 + if (p == name)
45077 + lastp = p + 1;
45078 + else {
45079 + lastp = p;
45080 + p--;
45081 + }
45082 + c = *lastp;
45083 + *lastp = '\0';
45084 + } while (1);
45085 +found_obj:
45086 + read_unlock(&gr_inode_lock);
45087 + /* obj returned will always be non-null */
45088 + return obj;
45089 +}
45090 +
45091 +/* returns 0 when allowing, non-zero on error
45092 + op of 0 is used for readdir, so we don't log the names of hidden files
45093 +*/
45094 +__u32
45095 +gr_handle_sysctl(const struct ctl_table *table, const int op)
45096 +{
45097 + struct ctl_table *tmp;
45098 + const char *proc_sys = "/proc/sys";
45099 + char *path;
45100 + struct acl_object_label *obj;
45101 + unsigned short len = 0, pos = 0, depth = 0, i;
45102 + __u32 err = 0;
45103 + __u32 mode = 0;
45104 +
45105 + if (unlikely(!(gr_status & GR_READY)))
45106 + return 0;
45107 +
45108 + /* for now, ignore operations on non-sysctl entries if it's not a
45109 + readdir*/
45110 + if (table->child != NULL && op != 0)
45111 + return 0;
45112 +
45113 + mode |= GR_FIND;
45114 + /* it's only a read if it's an entry, read on dirs is for readdir */
45115 + if (op & MAY_READ)
45116 + mode |= GR_READ;
45117 + if (op & MAY_WRITE)
45118 + mode |= GR_WRITE;
45119 +
45120 + preempt_disable();
45121 +
45122 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
45123 +
45124 + /* it's only a read/write if it's an actual entry, not a dir
45125 + (which are opened for readdir)
45126 + */
45127 +
45128 + /* convert the requested sysctl entry into a pathname */
45129 +
45130 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
45131 + len += strlen(tmp->procname);
45132 + len++;
45133 + depth++;
45134 + }
45135 +
45136 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
45137 + /* deny */
45138 + goto out;
45139 + }
45140 +
45141 + memset(path, 0, PAGE_SIZE);
45142 +
45143 + memcpy(path, proc_sys, strlen(proc_sys));
45144 +
45145 + pos += strlen(proc_sys);
45146 +
45147 + for (; depth > 0; depth--) {
45148 + path[pos] = '/';
45149 + pos++;
45150 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
45151 + if (depth == i) {
45152 + memcpy(path + pos, tmp->procname,
45153 + strlen(tmp->procname));
45154 + pos += strlen(tmp->procname);
45155 + }
45156 + i++;
45157 + }
45158 + }
45159 +
45160 + obj = gr_lookup_by_name(path, pos);
45161 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
45162 +
45163 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
45164 + ((err & mode) != mode))) {
45165 + __u32 new_mode = mode;
45166 +
45167 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45168 +
45169 + err = 0;
45170 + gr_log_learn_sysctl(path, new_mode);
45171 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
45172 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
45173 + err = -ENOENT;
45174 + } else if (!(err & GR_FIND)) {
45175 + err = -ENOENT;
45176 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
45177 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
45178 + path, (mode & GR_READ) ? " reading" : "",
45179 + (mode & GR_WRITE) ? " writing" : "");
45180 + err = -EACCES;
45181 + } else if ((err & mode) != mode) {
45182 + err = -EACCES;
45183 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
45184 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
45185 + path, (mode & GR_READ) ? " reading" : "",
45186 + (mode & GR_WRITE) ? " writing" : "");
45187 + err = 0;
45188 + } else
45189 + err = 0;
45190 +
45191 + out:
45192 + preempt_enable();
45193 +
45194 + return err;
45195 +}
45196 +#endif
45197 +
45198 +int
45199 +gr_handle_proc_ptrace(struct task_struct *task)
45200 +{
45201 + struct file *filp;
45202 + struct task_struct *tmp = task;
45203 + struct task_struct *curtemp = current;
45204 + __u32 retmode;
45205 +
45206 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
45207 + if (unlikely(!(gr_status & GR_READY)))
45208 + return 0;
45209 +#endif
45210 +
45211 + read_lock(&tasklist_lock);
45212 + read_lock(&grsec_exec_file_lock);
45213 + filp = task->exec_file;
45214 +
45215 + while (tmp->pid > 0) {
45216 + if (tmp == curtemp)
45217 + break;
45218 + tmp = tmp->real_parent;
45219 + }
45220 +
45221 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
45222 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
45223 + read_unlock(&grsec_exec_file_lock);
45224 + read_unlock(&tasklist_lock);
45225 + return 1;
45226 + }
45227 +
45228 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
45229 + if (!(gr_status & GR_READY)) {
45230 + read_unlock(&grsec_exec_file_lock);
45231 + read_unlock(&tasklist_lock);
45232 + return 0;
45233 + }
45234 +#endif
45235 +
45236 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
45237 + read_unlock(&grsec_exec_file_lock);
45238 + read_unlock(&tasklist_lock);
45239 +
45240 + if (retmode & GR_NOPTRACE)
45241 + return 1;
45242 +
45243 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
45244 + && (current->acl != task->acl || (current->acl != current->role->root_label
45245 + && current->pid != task->pid)))
45246 + return 1;
45247 +
45248 + return 0;
45249 +}
45250 +
45251 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
45252 +{
45253 + if (unlikely(!(gr_status & GR_READY)))
45254 + return;
45255 +
45256 + if (!(current->role->roletype & GR_ROLE_GOD))
45257 + return;
45258 +
45259 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
45260 + p->role->rolename, gr_task_roletype_to_char(p),
45261 + p->acl->filename);
45262 +}
45263 +
45264 +int
45265 +gr_handle_ptrace(struct task_struct *task, const long request)
45266 +{
45267 + struct task_struct *tmp = task;
45268 + struct task_struct *curtemp = current;
45269 + __u32 retmode;
45270 +
45271 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
45272 + if (unlikely(!(gr_status & GR_READY)))
45273 + return 0;
45274 +#endif
45275 +
45276 + read_lock(&tasklist_lock);
45277 + while (tmp->pid > 0) {
45278 + if (tmp == curtemp)
45279 + break;
45280 + tmp = tmp->real_parent;
45281 + }
45282 +
45283 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
45284 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
45285 + read_unlock(&tasklist_lock);
45286 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45287 + return 1;
45288 + }
45289 + read_unlock(&tasklist_lock);
45290 +
45291 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
45292 + if (!(gr_status & GR_READY))
45293 + return 0;
45294 +#endif
45295 +
45296 + read_lock(&grsec_exec_file_lock);
45297 + if (unlikely(!task->exec_file)) {
45298 + read_unlock(&grsec_exec_file_lock);
45299 + return 0;
45300 + }
45301 +
45302 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
45303 + read_unlock(&grsec_exec_file_lock);
45304 +
45305 + if (retmode & GR_NOPTRACE) {
45306 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45307 + return 1;
45308 + }
45309 +
45310 + if (retmode & GR_PTRACERD) {
45311 + switch (request) {
45312 + case PTRACE_POKETEXT:
45313 + case PTRACE_POKEDATA:
45314 + case PTRACE_POKEUSR:
45315 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
45316 + case PTRACE_SETREGS:
45317 + case PTRACE_SETFPREGS:
45318 +#endif
45319 +#ifdef CONFIG_X86
45320 + case PTRACE_SETFPXREGS:
45321 +#endif
45322 +#ifdef CONFIG_ALTIVEC
45323 + case PTRACE_SETVRREGS:
45324 +#endif
45325 + return 1;
45326 + default:
45327 + return 0;
45328 + }
45329 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
45330 + !(current->role->roletype & GR_ROLE_GOD) &&
45331 + (current->acl != task->acl)) {
45332 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45333 + return 1;
45334 + }
45335 +
45336 + return 0;
45337 +}
45338 +
45339 +static int is_writable_mmap(const struct file *filp)
45340 +{
45341 + struct task_struct *task = current;
45342 + struct acl_object_label *obj, *obj2;
45343 +
45344 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
45345 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
45346 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
45347 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
45348 + task->role->root_label);
45349 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
45350 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
45351 + return 1;
45352 + }
45353 + }
45354 + return 0;
45355 +}
45356 +
45357 +int
45358 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
45359 +{
45360 + __u32 mode;
45361 +
45362 + if (unlikely(!file || !(prot & PROT_EXEC)))
45363 + return 1;
45364 +
45365 + if (is_writable_mmap(file))
45366 + return 0;
45367 +
45368 + mode =
45369 + gr_search_file(file->f_path.dentry,
45370 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
45371 + file->f_path.mnt);
45372 +
45373 + if (!gr_tpe_allow(file))
45374 + return 0;
45375 +
45376 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
45377 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45378 + return 0;
45379 + } else if (unlikely(!(mode & GR_EXEC))) {
45380 + return 0;
45381 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
45382 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45383 + return 1;
45384 + }
45385 +
45386 + return 1;
45387 +}
45388 +
45389 +int
45390 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
45391 +{
45392 + __u32 mode;
45393 +
45394 + if (unlikely(!file || !(prot & PROT_EXEC)))
45395 + return 1;
45396 +
45397 + if (is_writable_mmap(file))
45398 + return 0;
45399 +
45400 + mode =
45401 + gr_search_file(file->f_path.dentry,
45402 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
45403 + file->f_path.mnt);
45404 +
45405 + if (!gr_tpe_allow(file))
45406 + return 0;
45407 +
45408 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
45409 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45410 + return 0;
45411 + } else if (unlikely(!(mode & GR_EXEC))) {
45412 + return 0;
45413 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
45414 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45415 + return 1;
45416 + }
45417 +
45418 + return 1;
45419 +}
45420 +
45421 +void
45422 +gr_acl_handle_psacct(struct task_struct *task, const long code)
45423 +{
45424 + unsigned long runtime;
45425 + unsigned long cputime;
45426 + unsigned int wday, cday;
45427 + __u8 whr, chr;
45428 + __u8 wmin, cmin;
45429 + __u8 wsec, csec;
45430 + struct timespec timeval;
45431 +
45432 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
45433 + !(task->acl->mode & GR_PROCACCT)))
45434 + return;
45435 +
45436 + do_posix_clock_monotonic_gettime(&timeval);
45437 + runtime = timeval.tv_sec - task->start_time.tv_sec;
45438 + wday = runtime / (3600 * 24);
45439 + runtime -= wday * (3600 * 24);
45440 + whr = runtime / 3600;
45441 + runtime -= whr * 3600;
45442 + wmin = runtime / 60;
45443 + runtime -= wmin * 60;
45444 + wsec = runtime;
45445 +
45446 + cputime = (task->utime + task->stime) / HZ;
45447 + cday = cputime / (3600 * 24);
45448 + cputime -= cday * (3600 * 24);
45449 + chr = cputime / 3600;
45450 + cputime -= chr * 3600;
45451 + cmin = cputime / 60;
45452 + cputime -= cmin * 60;
45453 + csec = cputime;
45454 +
45455 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
45456 +
45457 + return;
45458 +}
45459 +
45460 +void gr_set_kernel_label(struct task_struct *task)
45461 +{
45462 + if (gr_status & GR_READY) {
45463 + task->role = kernel_role;
45464 + task->acl = kernel_role->root_label;
45465 + }
45466 + return;
45467 +}
45468 +
45469 +#ifdef CONFIG_TASKSTATS
45470 +int gr_is_taskstats_denied(int pid)
45471 +{
45472 + struct task_struct *task;
45473 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45474 + const struct cred *cred;
45475 +#endif
45476 + int ret = 0;
45477 +
45478 + /* restrict taskstats viewing to un-chrooted root users
45479 + who have the 'view' subject flag if the RBAC system is enabled
45480 + */
45481 +
45482 + rcu_read_lock();
45483 + read_lock(&tasklist_lock);
45484 + task = find_task_by_vpid(pid);
45485 + if (task) {
45486 +#ifdef CONFIG_GRKERNSEC_CHROOT
45487 + if (proc_is_chrooted(task))
45488 + ret = -EACCES;
45489 +#endif
45490 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45491 + cred = __task_cred(task);
45492 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45493 + if (cred->uid != 0)
45494 + ret = -EACCES;
45495 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45496 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
45497 + ret = -EACCES;
45498 +#endif
45499 +#endif
45500 + if (gr_status & GR_READY) {
45501 + if (!(task->acl->mode & GR_VIEW))
45502 + ret = -EACCES;
45503 + }
45504 + } else
45505 + ret = -ENOENT;
45506 +
45507 + read_unlock(&tasklist_lock);
45508 + rcu_read_unlock();
45509 +
45510 + return ret;
45511 +}
45512 +#endif
45513 +
45514 +/* AUXV entries are filled via a descendant of search_binary_handler
45515 + after we've already applied the subject for the target
45516 +*/
45517 +int gr_acl_enable_at_secure(void)
45518 +{
45519 + if (unlikely(!(gr_status & GR_READY)))
45520 + return 0;
45521 +
45522 + if (current->acl->mode & GR_ATSECURE)
45523 + return 1;
45524 +
45525 + return 0;
45526 +}
45527 +
45528 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
45529 +{
45530 + struct task_struct *task = current;
45531 + struct dentry *dentry = file->f_path.dentry;
45532 + struct vfsmount *mnt = file->f_path.mnt;
45533 + struct acl_object_label *obj, *tmp;
45534 + struct acl_subject_label *subj;
45535 + unsigned int bufsize;
45536 + int is_not_root;
45537 + char *path;
45538 + dev_t dev = __get_dev(dentry);
45539 +
45540 + if (unlikely(!(gr_status & GR_READY)))
45541 + return 1;
45542 +
45543 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
45544 + return 1;
45545 +
45546 + /* ignore Eric Biederman */
45547 + if (IS_PRIVATE(dentry->d_inode))
45548 + return 1;
45549 +
45550 + subj = task->acl;
45551 + do {
45552 + obj = lookup_acl_obj_label(ino, dev, subj);
45553 + if (obj != NULL)
45554 + return (obj->mode & GR_FIND) ? 1 : 0;
45555 + } while ((subj = subj->parent_subject));
45556 +
45557 + /* this is purely an optimization since we're looking for an object
45558 + for the directory we're doing a readdir on
45559 + if it's possible for any globbed object to match the entry we're
45560 + filling into the directory, then the object we find here will be
45561 + an anchor point with attached globbed objects
45562 + */
45563 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
45564 + if (obj->globbed == NULL)
45565 + return (obj->mode & GR_FIND) ? 1 : 0;
45566 +
45567 + is_not_root = ((obj->filename[0] == '/') &&
45568 + (obj->filename[1] == '\0')) ? 0 : 1;
45569 + bufsize = PAGE_SIZE - namelen - is_not_root;
45570 +
45571 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
45572 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
45573 + return 1;
45574 +
45575 + preempt_disable();
45576 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
45577 + bufsize);
45578 +
45579 + bufsize = strlen(path);
45580 +
45581 + /* if base is "/", don't append an additional slash */
45582 + if (is_not_root)
45583 + *(path + bufsize) = '/';
45584 + memcpy(path + bufsize + is_not_root, name, namelen);
45585 + *(path + bufsize + namelen + is_not_root) = '\0';
45586 +
45587 + tmp = obj->globbed;
45588 + while (tmp) {
45589 + if (!glob_match(tmp->filename, path)) {
45590 + preempt_enable();
45591 + return (tmp->mode & GR_FIND) ? 1 : 0;
45592 + }
45593 + tmp = tmp->next;
45594 + }
45595 + preempt_enable();
45596 + return (obj->mode & GR_FIND) ? 1 : 0;
45597 +}
45598 +
45599 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
45600 +EXPORT_SYMBOL(gr_acl_is_enabled);
45601 +#endif
45602 +EXPORT_SYMBOL(gr_learn_resource);
45603 +EXPORT_SYMBOL(gr_set_kernel_label);
45604 +#ifdef CONFIG_SECURITY
45605 +EXPORT_SYMBOL(gr_check_user_change);
45606 +EXPORT_SYMBOL(gr_check_group_change);
45607 +#endif
45608 +
45609 diff -urNp linux-2.6.39.4/grsecurity/gracl_cap.c linux-2.6.39.4/grsecurity/gracl_cap.c
45610 --- linux-2.6.39.4/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
45611 +++ linux-2.6.39.4/grsecurity/gracl_cap.c 2011-08-05 19:44:37.000000000 -0400
45612 @@ -0,0 +1,139 @@
45613 +#include <linux/kernel.h>
45614 +#include <linux/module.h>
45615 +#include <linux/sched.h>
45616 +#include <linux/gracl.h>
45617 +#include <linux/grsecurity.h>
45618 +#include <linux/grinternal.h>
45619 +
45620 +static const char *captab_log[] = {
45621 + "CAP_CHOWN",
45622 + "CAP_DAC_OVERRIDE",
45623 + "CAP_DAC_READ_SEARCH",
45624 + "CAP_FOWNER",
45625 + "CAP_FSETID",
45626 + "CAP_KILL",
45627 + "CAP_SETGID",
45628 + "CAP_SETUID",
45629 + "CAP_SETPCAP",
45630 + "CAP_LINUX_IMMUTABLE",
45631 + "CAP_NET_BIND_SERVICE",
45632 + "CAP_NET_BROADCAST",
45633 + "CAP_NET_ADMIN",
45634 + "CAP_NET_RAW",
45635 + "CAP_IPC_LOCK",
45636 + "CAP_IPC_OWNER",
45637 + "CAP_SYS_MODULE",
45638 + "CAP_SYS_RAWIO",
45639 + "CAP_SYS_CHROOT",
45640 + "CAP_SYS_PTRACE",
45641 + "CAP_SYS_PACCT",
45642 + "CAP_SYS_ADMIN",
45643 + "CAP_SYS_BOOT",
45644 + "CAP_SYS_NICE",
45645 + "CAP_SYS_RESOURCE",
45646 + "CAP_SYS_TIME",
45647 + "CAP_SYS_TTY_CONFIG",
45648 + "CAP_MKNOD",
45649 + "CAP_LEASE",
45650 + "CAP_AUDIT_WRITE",
45651 + "CAP_AUDIT_CONTROL",
45652 + "CAP_SETFCAP",
45653 + "CAP_MAC_OVERRIDE",
45654 + "CAP_MAC_ADMIN",
45655 + "CAP_SYSLOG"
45656 +};
45657 +
45658 +EXPORT_SYMBOL(gr_is_capable);
45659 +EXPORT_SYMBOL(gr_is_capable_nolog);
45660 +
45661 +int
45662 +gr_is_capable(const int cap)
45663 +{
45664 + struct task_struct *task = current;
45665 + const struct cred *cred = current_cred();
45666 + struct acl_subject_label *curracl;
45667 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
45668 + kernel_cap_t cap_audit = __cap_empty_set;
45669 +
45670 + if (!gr_acl_is_enabled())
45671 + return 1;
45672 +
45673 + curracl = task->acl;
45674 +
45675 + cap_drop = curracl->cap_lower;
45676 + cap_mask = curracl->cap_mask;
45677 + cap_audit = curracl->cap_invert_audit;
45678 +
45679 + while ((curracl = curracl->parent_subject)) {
45680 + /* if the cap isn't specified in the current computed mask but is specified in the
45681 + current level subject, and is lowered in the current level subject, then add
45682 + it to the set of dropped capabilities
45683 + otherwise, add the current level subject's mask to the current computed mask
45684 + */
45685 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
45686 + cap_raise(cap_mask, cap);
45687 + if (cap_raised(curracl->cap_lower, cap))
45688 + cap_raise(cap_drop, cap);
45689 + if (cap_raised(curracl->cap_invert_audit, cap))
45690 + cap_raise(cap_audit, cap);
45691 + }
45692 + }
45693 +
45694 + if (!cap_raised(cap_drop, cap)) {
45695 + if (cap_raised(cap_audit, cap))
45696 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
45697 + return 1;
45698 + }
45699 +
45700 + curracl = task->acl;
45701 +
45702 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
45703 + && cap_raised(cred->cap_effective, cap)) {
45704 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
45705 + task->role->roletype, cred->uid,
45706 + cred->gid, task->exec_file ?
45707 + gr_to_filename(task->exec_file->f_path.dentry,
45708 + task->exec_file->f_path.mnt) : curracl->filename,
45709 + curracl->filename, 0UL,
45710 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
45711 + return 1;
45712 + }
45713 +
45714 + if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
45715 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
45716 + return 0;
45717 +}
45718 +
45719 +int
45720 +gr_is_capable_nolog(const int cap)
45721 +{
45722 + struct acl_subject_label *curracl;
45723 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
45724 +
45725 + if (!gr_acl_is_enabled())
45726 + return 1;
45727 +
45728 + curracl = current->acl;
45729 +
45730 + cap_drop = curracl->cap_lower;
45731 + cap_mask = curracl->cap_mask;
45732 +
45733 + while ((curracl = curracl->parent_subject)) {
45734 + /* if the cap isn't specified in the current computed mask but is specified in the
45735 + current level subject, and is lowered in the current level subject, then add
45736 + it to the set of dropped capabilities
45737 + otherwise, add the current level subject's mask to the current computed mask
45738 + */
45739 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
45740 + cap_raise(cap_mask, cap);
45741 + if (cap_raised(curracl->cap_lower, cap))
45742 + cap_raise(cap_drop, cap);
45743 + }
45744 + }
45745 +
45746 + if (!cap_raised(cap_drop, cap))
45747 + return 1;
45748 +
45749 + return 0;
45750 +}
45751 +
45752 diff -urNp linux-2.6.39.4/grsecurity/gracl_fs.c linux-2.6.39.4/grsecurity/gracl_fs.c
45753 --- linux-2.6.39.4/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
45754 +++ linux-2.6.39.4/grsecurity/gracl_fs.c 2011-08-05 19:44:37.000000000 -0400
45755 @@ -0,0 +1,431 @@
45756 +#include <linux/kernel.h>
45757 +#include <linux/sched.h>
45758 +#include <linux/types.h>
45759 +#include <linux/fs.h>
45760 +#include <linux/file.h>
45761 +#include <linux/stat.h>
45762 +#include <linux/grsecurity.h>
45763 +#include <linux/grinternal.h>
45764 +#include <linux/gracl.h>
45765 +
45766 +__u32
45767 +gr_acl_handle_hidden_file(const struct dentry * dentry,
45768 + const struct vfsmount * mnt)
45769 +{
45770 + __u32 mode;
45771 +
45772 + if (unlikely(!dentry->d_inode))
45773 + return GR_FIND;
45774 +
45775 + mode =
45776 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
45777 +
45778 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
45779 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
45780 + return mode;
45781 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
45782 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
45783 + return 0;
45784 + } else if (unlikely(!(mode & GR_FIND)))
45785 + return 0;
45786 +
45787 + return GR_FIND;
45788 +}
45789 +
45790 +__u32
45791 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
45792 + const int fmode)
45793 +{
45794 + __u32 reqmode = GR_FIND;
45795 + __u32 mode;
45796 +
45797 + if (unlikely(!dentry->d_inode))
45798 + return reqmode;
45799 +
45800 + if (unlikely(fmode & O_APPEND))
45801 + reqmode |= GR_APPEND;
45802 + else if (unlikely(fmode & FMODE_WRITE))
45803 + reqmode |= GR_WRITE;
45804 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
45805 + reqmode |= GR_READ;
45806 + if ((fmode & FMODE_GREXEC) && (fmode & __FMODE_EXEC))
45807 + reqmode &= ~GR_READ;
45808 + mode =
45809 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
45810 + mnt);
45811 +
45812 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45813 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
45814 + reqmode & GR_READ ? " reading" : "",
45815 + reqmode & GR_WRITE ? " writing" : reqmode &
45816 + GR_APPEND ? " appending" : "");
45817 + return reqmode;
45818 + } else
45819 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45820 + {
45821 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
45822 + reqmode & GR_READ ? " reading" : "",
45823 + reqmode & GR_WRITE ? " writing" : reqmode &
45824 + GR_APPEND ? " appending" : "");
45825 + return 0;
45826 + } else if (unlikely((mode & reqmode) != reqmode))
45827 + return 0;
45828 +
45829 + return reqmode;
45830 +}
45831 +
45832 +__u32
45833 +gr_acl_handle_creat(const struct dentry * dentry,
45834 + const struct dentry * p_dentry,
45835 + const struct vfsmount * p_mnt, const int fmode,
45836 + const int imode)
45837 +{
45838 + __u32 reqmode = GR_WRITE | GR_CREATE;
45839 + __u32 mode;
45840 +
45841 + if (unlikely(fmode & O_APPEND))
45842 + reqmode |= GR_APPEND;
45843 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
45844 + reqmode |= GR_READ;
45845 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
45846 + reqmode |= GR_SETID;
45847 +
45848 + mode =
45849 + gr_check_create(dentry, p_dentry, p_mnt,
45850 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
45851 +
45852 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45853 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
45854 + reqmode & GR_READ ? " reading" : "",
45855 + reqmode & GR_WRITE ? " writing" : reqmode &
45856 + GR_APPEND ? " appending" : "");
45857 + return reqmode;
45858 + } else
45859 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45860 + {
45861 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
45862 + reqmode & GR_READ ? " reading" : "",
45863 + reqmode & GR_WRITE ? " writing" : reqmode &
45864 + GR_APPEND ? " appending" : "");
45865 + return 0;
45866 + } else if (unlikely((mode & reqmode) != reqmode))
45867 + return 0;
45868 +
45869 + return reqmode;
45870 +}
45871 +
45872 +__u32
45873 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
45874 + const int fmode)
45875 +{
45876 + __u32 mode, reqmode = GR_FIND;
45877 +
45878 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
45879 + reqmode |= GR_EXEC;
45880 + if (fmode & S_IWOTH)
45881 + reqmode |= GR_WRITE;
45882 + if (fmode & S_IROTH)
45883 + reqmode |= GR_READ;
45884 +
45885 + mode =
45886 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
45887 + mnt);
45888 +
45889 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45890 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
45891 + reqmode & GR_READ ? " reading" : "",
45892 + reqmode & GR_WRITE ? " writing" : "",
45893 + reqmode & GR_EXEC ? " executing" : "");
45894 + return reqmode;
45895 + } else
45896 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45897 + {
45898 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
45899 + reqmode & GR_READ ? " reading" : "",
45900 + reqmode & GR_WRITE ? " writing" : "",
45901 + reqmode & GR_EXEC ? " executing" : "");
45902 + return 0;
45903 + } else if (unlikely((mode & reqmode) != reqmode))
45904 + return 0;
45905 +
45906 + return reqmode;
45907 +}
45908 +
45909 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
45910 +{
45911 + __u32 mode;
45912 +
45913 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
45914 +
45915 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
45916 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
45917 + return mode;
45918 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
45919 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
45920 + return 0;
45921 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
45922 + return 0;
45923 +
45924 + return (reqmode);
45925 +}
45926 +
45927 +__u32
45928 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
45929 +{
45930 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
45931 +}
45932 +
45933 +__u32
45934 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
45935 +{
45936 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
45937 +}
45938 +
45939 +__u32
45940 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
45941 +{
45942 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
45943 +}
45944 +
45945 +__u32
45946 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
45947 +{
45948 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
45949 +}
45950 +
45951 +__u32
45952 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
45953 + mode_t mode)
45954 +{
45955 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
45956 + return 1;
45957 +
45958 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
45959 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
45960 + GR_FCHMOD_ACL_MSG);
45961 + } else {
45962 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
45963 + }
45964 +}
45965 +
45966 +__u32
45967 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
45968 + mode_t mode)
45969 +{
45970 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
45971 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
45972 + GR_CHMOD_ACL_MSG);
45973 + } else {
45974 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
45975 + }
45976 +}
45977 +
45978 +__u32
45979 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
45980 +{
45981 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
45982 +}
45983 +
45984 +__u32
45985 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
45986 +{
45987 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
45988 +}
45989 +
45990 +__u32
45991 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
45992 +{
45993 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
45994 +}
45995 +
45996 +__u32
45997 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
45998 +{
45999 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
46000 + GR_UNIXCONNECT_ACL_MSG);
46001 +}
46002 +
46003 +/* hardlinks require at minimum create permission,
46004 + any additional privilege required is based on the
46005 + privilege of the file being linked to
46006 +*/
46007 +__u32
46008 +gr_acl_handle_link(const struct dentry * new_dentry,
46009 + const struct dentry * parent_dentry,
46010 + const struct vfsmount * parent_mnt,
46011 + const struct dentry * old_dentry,
46012 + const struct vfsmount * old_mnt, const char *to)
46013 +{
46014 + __u32 mode;
46015 + __u32 needmode = GR_CREATE | GR_LINK;
46016 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
46017 +
46018 + mode =
46019 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
46020 + old_mnt);
46021 +
46022 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
46023 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
46024 + return mode;
46025 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
46026 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
46027 + return 0;
46028 + } else if (unlikely((mode & needmode) != needmode))
46029 + return 0;
46030 +
46031 + return 1;
46032 +}
46033 +
46034 +__u32
46035 +gr_acl_handle_symlink(const struct dentry * new_dentry,
46036 + const struct dentry * parent_dentry,
46037 + const struct vfsmount * parent_mnt, const char *from)
46038 +{
46039 + __u32 needmode = GR_WRITE | GR_CREATE;
46040 + __u32 mode;
46041 +
46042 + mode =
46043 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
46044 + GR_CREATE | GR_AUDIT_CREATE |
46045 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
46046 +
46047 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
46048 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
46049 + return mode;
46050 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
46051 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
46052 + return 0;
46053 + } else if (unlikely((mode & needmode) != needmode))
46054 + return 0;
46055 +
46056 + return (GR_WRITE | GR_CREATE);
46057 +}
46058 +
46059 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
46060 +{
46061 + __u32 mode;
46062 +
46063 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
46064 +
46065 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
46066 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
46067 + return mode;
46068 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
46069 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
46070 + return 0;
46071 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
46072 + return 0;
46073 +
46074 + return (reqmode);
46075 +}
46076 +
46077 +__u32
46078 +gr_acl_handle_mknod(const struct dentry * new_dentry,
46079 + const struct dentry * parent_dentry,
46080 + const struct vfsmount * parent_mnt,
46081 + const int mode)
46082 +{
46083 + __u32 reqmode = GR_WRITE | GR_CREATE;
46084 + if (unlikely(mode & (S_ISUID | S_ISGID)))
46085 + reqmode |= GR_SETID;
46086 +
46087 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
46088 + reqmode, GR_MKNOD_ACL_MSG);
46089 +}
46090 +
46091 +__u32
46092 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
46093 + const struct dentry *parent_dentry,
46094 + const struct vfsmount *parent_mnt)
46095 +{
46096 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
46097 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
46098 +}
46099 +
46100 +#define RENAME_CHECK_SUCCESS(old, new) \
46101 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
46102 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
46103 +
46104 +int
46105 +gr_acl_handle_rename(struct dentry *new_dentry,
46106 + struct dentry *parent_dentry,
46107 + const struct vfsmount *parent_mnt,
46108 + struct dentry *old_dentry,
46109 + struct inode *old_parent_inode,
46110 + struct vfsmount *old_mnt, const char *newname)
46111 +{
46112 + __u32 comp1, comp2;
46113 + int error = 0;
46114 +
46115 + if (unlikely(!gr_acl_is_enabled()))
46116 + return 0;
46117 +
46118 + if (!new_dentry->d_inode) {
46119 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
46120 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
46121 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
46122 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
46123 + GR_DELETE | GR_AUDIT_DELETE |
46124 + GR_AUDIT_READ | GR_AUDIT_WRITE |
46125 + GR_SUPPRESS, old_mnt);
46126 + } else {
46127 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
46128 + GR_CREATE | GR_DELETE |
46129 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
46130 + GR_AUDIT_READ | GR_AUDIT_WRITE |
46131 + GR_SUPPRESS, parent_mnt);
46132 + comp2 =
46133 + gr_search_file(old_dentry,
46134 + GR_READ | GR_WRITE | GR_AUDIT_READ |
46135 + GR_DELETE | GR_AUDIT_DELETE |
46136 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
46137 + }
46138 +
46139 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
46140 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
46141 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
46142 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
46143 + && !(comp2 & GR_SUPPRESS)) {
46144 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
46145 + error = -EACCES;
46146 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
46147 + error = -EACCES;
46148 +
46149 + return error;
46150 +}
46151 +
46152 +void
46153 +gr_acl_handle_exit(void)
46154 +{
46155 + u16 id;
46156 + char *rolename;
46157 + struct file *exec_file;
46158 +
46159 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
46160 + !(current->role->roletype & GR_ROLE_PERSIST))) {
46161 + id = current->acl_role_id;
46162 + rolename = current->role->rolename;
46163 + gr_set_acls(1);
46164 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
46165 + }
46166 +
46167 + write_lock(&grsec_exec_file_lock);
46168 + exec_file = current->exec_file;
46169 + current->exec_file = NULL;
46170 + write_unlock(&grsec_exec_file_lock);
46171 +
46172 + if (exec_file)
46173 + fput(exec_file);
46174 +}
46175 +
46176 +int
46177 +gr_acl_handle_procpidmem(const struct task_struct *task)
46178 +{
46179 + if (unlikely(!gr_acl_is_enabled()))
46180 + return 0;
46181 +
46182 + if (task != current && task->acl->mode & GR_PROTPROCFD)
46183 + return -EACCES;
46184 +
46185 + return 0;
46186 +}
46187 diff -urNp linux-2.6.39.4/grsecurity/gracl_ip.c linux-2.6.39.4/grsecurity/gracl_ip.c
46188 --- linux-2.6.39.4/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
46189 +++ linux-2.6.39.4/grsecurity/gracl_ip.c 2011-08-05 19:44:37.000000000 -0400
46190 @@ -0,0 +1,381 @@
46191 +#include <linux/kernel.h>
46192 +#include <asm/uaccess.h>
46193 +#include <asm/errno.h>
46194 +#include <net/sock.h>
46195 +#include <linux/file.h>
46196 +#include <linux/fs.h>
46197 +#include <linux/net.h>
46198 +#include <linux/in.h>
46199 +#include <linux/skbuff.h>
46200 +#include <linux/ip.h>
46201 +#include <linux/udp.h>
46202 +#include <linux/types.h>
46203 +#include <linux/sched.h>
46204 +#include <linux/netdevice.h>
46205 +#include <linux/inetdevice.h>
46206 +#include <linux/gracl.h>
46207 +#include <linux/grsecurity.h>
46208 +#include <linux/grinternal.h>
46209 +
46210 +#define GR_BIND 0x01
46211 +#define GR_CONNECT 0x02
46212 +#define GR_INVERT 0x04
46213 +#define GR_BINDOVERRIDE 0x08
46214 +#define GR_CONNECTOVERRIDE 0x10
46215 +#define GR_SOCK_FAMILY 0x20
46216 +
46217 +static const char * gr_protocols[IPPROTO_MAX] = {
46218 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
46219 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
46220 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
46221 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
46222 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
46223 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
46224 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
46225 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
46226 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
46227 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
46228 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
46229 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
46230 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
46231 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
46232 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
46233 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
46234 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
46235 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
46236 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
46237 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
46238 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
46239 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
46240 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
46241 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
46242 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
46243 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
46244 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
46245 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
46246 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
46247 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
46248 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
46249 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
46250 + };
46251 +
46252 +static const char * gr_socktypes[SOCK_MAX] = {
46253 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
46254 + "unknown:7", "unknown:8", "unknown:9", "packet"
46255 + };
46256 +
46257 +static const char * gr_sockfamilies[AF_MAX+1] = {
46258 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
46259 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
46260 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
46261 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
46262 + };
46263 +
46264 +const char *
46265 +gr_proto_to_name(unsigned char proto)
46266 +{
46267 + return gr_protocols[proto];
46268 +}
46269 +
46270 +const char *
46271 +gr_socktype_to_name(unsigned char type)
46272 +{
46273 + return gr_socktypes[type];
46274 +}
46275 +
46276 +const char *
46277 +gr_sockfamily_to_name(unsigned char family)
46278 +{
46279 + return gr_sockfamilies[family];
46280 +}
46281 +
46282 +int
46283 +gr_search_socket(const int domain, const int type, const int protocol)
46284 +{
46285 + struct acl_subject_label *curr;
46286 + const struct cred *cred = current_cred();
46287 +
46288 + if (unlikely(!gr_acl_is_enabled()))
46289 + goto exit;
46290 +
46291 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
46292 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
46293 + goto exit; // let the kernel handle it
46294 +
46295 + curr = current->acl;
46296 +
46297 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
46298 + /* the family is allowed, if this is PF_INET allow it only if
46299 + the extra sock type/protocol checks pass */
46300 + if (domain == PF_INET)
46301 + goto inet_check;
46302 + goto exit;
46303 + } else {
46304 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46305 + __u32 fakeip = 0;
46306 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46307 + current->role->roletype, cred->uid,
46308 + cred->gid, current->exec_file ?
46309 + gr_to_filename(current->exec_file->f_path.dentry,
46310 + current->exec_file->f_path.mnt) :
46311 + curr->filename, curr->filename,
46312 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
46313 + &current->signal->saved_ip);
46314 + goto exit;
46315 + }
46316 + goto exit_fail;
46317 + }
46318 +
46319 +inet_check:
46320 + /* the rest of this checking is for IPv4 only */
46321 + if (!curr->ips)
46322 + goto exit;
46323 +
46324 + if ((curr->ip_type & (1 << type)) &&
46325 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
46326 + goto exit;
46327 +
46328 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46329 + /* we don't place acls on raw sockets , and sometimes
46330 + dgram/ip sockets are opened for ioctl and not
46331 + bind/connect, so we'll fake a bind learn log */
46332 + if (type == SOCK_RAW || type == SOCK_PACKET) {
46333 + __u32 fakeip = 0;
46334 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46335 + current->role->roletype, cred->uid,
46336 + cred->gid, current->exec_file ?
46337 + gr_to_filename(current->exec_file->f_path.dentry,
46338 + current->exec_file->f_path.mnt) :
46339 + curr->filename, curr->filename,
46340 + &fakeip, 0, type,
46341 + protocol, GR_CONNECT, &current->signal->saved_ip);
46342 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
46343 + __u32 fakeip = 0;
46344 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46345 + current->role->roletype, cred->uid,
46346 + cred->gid, current->exec_file ?
46347 + gr_to_filename(current->exec_file->f_path.dentry,
46348 + current->exec_file->f_path.mnt) :
46349 + curr->filename, curr->filename,
46350 + &fakeip, 0, type,
46351 + protocol, GR_BIND, &current->signal->saved_ip);
46352 + }
46353 + /* we'll log when they use connect or bind */
46354 + goto exit;
46355 + }
46356 +
46357 +exit_fail:
46358 + if (domain == PF_INET)
46359 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
46360 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
46361 + else
46362 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
46363 + gr_socktype_to_name(type), protocol);
46364 +
46365 + return 0;
46366 +exit:
46367 + return 1;
46368 +}
46369 +
46370 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
46371 +{
46372 + if ((ip->mode & mode) &&
46373 + (ip_port >= ip->low) &&
46374 + (ip_port <= ip->high) &&
46375 + ((ntohl(ip_addr) & our_netmask) ==
46376 + (ntohl(our_addr) & our_netmask))
46377 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
46378 + && (ip->type & (1 << type))) {
46379 + if (ip->mode & GR_INVERT)
46380 + return 2; // specifically denied
46381 + else
46382 + return 1; // allowed
46383 + }
46384 +
46385 + return 0; // not specifically allowed, may continue parsing
46386 +}
46387 +
46388 +static int
46389 +gr_search_connectbind(const int full_mode, struct sock *sk,
46390 + struct sockaddr_in *addr, const int type)
46391 +{
46392 + char iface[IFNAMSIZ] = {0};
46393 + struct acl_subject_label *curr;
46394 + struct acl_ip_label *ip;
46395 + struct inet_sock *isk;
46396 + struct net_device *dev;
46397 + struct in_device *idev;
46398 + unsigned long i;
46399 + int ret;
46400 + int mode = full_mode & (GR_BIND | GR_CONNECT);
46401 + __u32 ip_addr = 0;
46402 + __u32 our_addr;
46403 + __u32 our_netmask;
46404 + char *p;
46405 + __u16 ip_port = 0;
46406 + const struct cred *cred = current_cred();
46407 +
46408 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
46409 + return 0;
46410 +
46411 + curr = current->acl;
46412 + isk = inet_sk(sk);
46413 +
46414 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
46415 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
46416 + addr->sin_addr.s_addr = curr->inaddr_any_override;
46417 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
46418 + struct sockaddr_in saddr;
46419 + int err;
46420 +
46421 + saddr.sin_family = AF_INET;
46422 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
46423 + saddr.sin_port = isk->inet_sport;
46424 +
46425 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
46426 + if (err)
46427 + return err;
46428 +
46429 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
46430 + if (err)
46431 + return err;
46432 + }
46433 +
46434 + if (!curr->ips)
46435 + return 0;
46436 +
46437 + ip_addr = addr->sin_addr.s_addr;
46438 + ip_port = ntohs(addr->sin_port);
46439 +
46440 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46441 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46442 + current->role->roletype, cred->uid,
46443 + cred->gid, current->exec_file ?
46444 + gr_to_filename(current->exec_file->f_path.dentry,
46445 + current->exec_file->f_path.mnt) :
46446 + curr->filename, curr->filename,
46447 + &ip_addr, ip_port, type,
46448 + sk->sk_protocol, mode, &current->signal->saved_ip);
46449 + return 0;
46450 + }
46451 +
46452 + for (i = 0; i < curr->ip_num; i++) {
46453 + ip = *(curr->ips + i);
46454 + if (ip->iface != NULL) {
46455 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
46456 + p = strchr(iface, ':');
46457 + if (p != NULL)
46458 + *p = '\0';
46459 + dev = dev_get_by_name(sock_net(sk), iface);
46460 + if (dev == NULL)
46461 + continue;
46462 + idev = in_dev_get(dev);
46463 + if (idev == NULL) {
46464 + dev_put(dev);
46465 + continue;
46466 + }
46467 + rcu_read_lock();
46468 + for_ifa(idev) {
46469 + if (!strcmp(ip->iface, ifa->ifa_label)) {
46470 + our_addr = ifa->ifa_address;
46471 + our_netmask = 0xffffffff;
46472 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
46473 + if (ret == 1) {
46474 + rcu_read_unlock();
46475 + in_dev_put(idev);
46476 + dev_put(dev);
46477 + return 0;
46478 + } else if (ret == 2) {
46479 + rcu_read_unlock();
46480 + in_dev_put(idev);
46481 + dev_put(dev);
46482 + goto denied;
46483 + }
46484 + }
46485 + } endfor_ifa(idev);
46486 + rcu_read_unlock();
46487 + in_dev_put(idev);
46488 + dev_put(dev);
46489 + } else {
46490 + our_addr = ip->addr;
46491 + our_netmask = ip->netmask;
46492 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
46493 + if (ret == 1)
46494 + return 0;
46495 + else if (ret == 2)
46496 + goto denied;
46497 + }
46498 + }
46499 +
46500 +denied:
46501 + if (mode == GR_BIND)
46502 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
46503 + else if (mode == GR_CONNECT)
46504 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
46505 +
46506 + return -EACCES;
46507 +}
46508 +
46509 +int
46510 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
46511 +{
46512 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
46513 +}
46514 +
46515 +int
46516 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
46517 +{
46518 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
46519 +}
46520 +
46521 +int gr_search_listen(struct socket *sock)
46522 +{
46523 + struct sock *sk = sock->sk;
46524 + struct sockaddr_in addr;
46525 +
46526 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
46527 + addr.sin_port = inet_sk(sk)->inet_sport;
46528 +
46529 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
46530 +}
46531 +
46532 +int gr_search_accept(struct socket *sock)
46533 +{
46534 + struct sock *sk = sock->sk;
46535 + struct sockaddr_in addr;
46536 +
46537 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
46538 + addr.sin_port = inet_sk(sk)->inet_sport;
46539 +
46540 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
46541 +}
46542 +
46543 +int
46544 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
46545 +{
46546 + if (addr)
46547 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
46548 + else {
46549 + struct sockaddr_in sin;
46550 + const struct inet_sock *inet = inet_sk(sk);
46551 +
46552 + sin.sin_addr.s_addr = inet->inet_daddr;
46553 + sin.sin_port = inet->inet_dport;
46554 +
46555 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
46556 + }
46557 +}
46558 +
46559 +int
46560 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
46561 +{
46562 + struct sockaddr_in sin;
46563 +
46564 + if (unlikely(skb->len < sizeof (struct udphdr)))
46565 + return 0; // skip this packet
46566 +
46567 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
46568 + sin.sin_port = udp_hdr(skb)->source;
46569 +
46570 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
46571 +}
46572 diff -urNp linux-2.6.39.4/grsecurity/gracl_learn.c linux-2.6.39.4/grsecurity/gracl_learn.c
46573 --- linux-2.6.39.4/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
46574 +++ linux-2.6.39.4/grsecurity/gracl_learn.c 2011-08-05 19:44:37.000000000 -0400
46575 @@ -0,0 +1,207 @@
46576 +#include <linux/kernel.h>
46577 +#include <linux/mm.h>
46578 +#include <linux/sched.h>
46579 +#include <linux/poll.h>
46580 +#include <linux/string.h>
46581 +#include <linux/file.h>
46582 +#include <linux/types.h>
46583 +#include <linux/vmalloc.h>
46584 +#include <linux/grinternal.h>
46585 +
46586 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
46587 + size_t count, loff_t *ppos);
46588 +extern int gr_acl_is_enabled(void);
46589 +
46590 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
46591 +static int gr_learn_attached;
46592 +
46593 +/* use a 512k buffer */
46594 +#define LEARN_BUFFER_SIZE (512 * 1024)
46595 +
46596 +static DEFINE_SPINLOCK(gr_learn_lock);
46597 +static DEFINE_MUTEX(gr_learn_user_mutex);
46598 +
46599 +/* we need to maintain two buffers, so that the kernel context of grlearn
46600 + uses a semaphore around the userspace copying, and the other kernel contexts
46601 + use a spinlock when copying into the buffer, since they cannot sleep
46602 +*/
46603 +static char *learn_buffer;
46604 +static char *learn_buffer_user;
46605 +static int learn_buffer_len;
46606 +static int learn_buffer_user_len;
46607 +
46608 +static ssize_t
46609 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
46610 +{
46611 + DECLARE_WAITQUEUE(wait, current);
46612 + ssize_t retval = 0;
46613 +
46614 + add_wait_queue(&learn_wait, &wait);
46615 + set_current_state(TASK_INTERRUPTIBLE);
46616 + do {
46617 + mutex_lock(&gr_learn_user_mutex);
46618 + spin_lock(&gr_learn_lock);
46619 + if (learn_buffer_len)
46620 + break;
46621 + spin_unlock(&gr_learn_lock);
46622 + mutex_unlock(&gr_learn_user_mutex);
46623 + if (file->f_flags & O_NONBLOCK) {
46624 + retval = -EAGAIN;
46625 + goto out;
46626 + }
46627 + if (signal_pending(current)) {
46628 + retval = -ERESTARTSYS;
46629 + goto out;
46630 + }
46631 +
46632 + schedule();
46633 + } while (1);
46634 +
46635 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
46636 + learn_buffer_user_len = learn_buffer_len;
46637 + retval = learn_buffer_len;
46638 + learn_buffer_len = 0;
46639 +
46640 + spin_unlock(&gr_learn_lock);
46641 +
46642 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
46643 + retval = -EFAULT;
46644 +
46645 + mutex_unlock(&gr_learn_user_mutex);
46646 +out:
46647 + set_current_state(TASK_RUNNING);
46648 + remove_wait_queue(&learn_wait, &wait);
46649 + return retval;
46650 +}
46651 +
46652 +static unsigned int
46653 +poll_learn(struct file * file, poll_table * wait)
46654 +{
46655 + poll_wait(file, &learn_wait, wait);
46656 +
46657 + if (learn_buffer_len)
46658 + return (POLLIN | POLLRDNORM);
46659 +
46660 + return 0;
46661 +}
46662 +
46663 +void
46664 +gr_clear_learn_entries(void)
46665 +{
46666 + char *tmp;
46667 +
46668 + mutex_lock(&gr_learn_user_mutex);
46669 + spin_lock(&gr_learn_lock);
46670 + tmp = learn_buffer;
46671 + learn_buffer = NULL;
46672 + spin_unlock(&gr_learn_lock);
46673 + if (tmp)
46674 + vfree(tmp);
46675 + if (learn_buffer_user != NULL) {
46676 + vfree(learn_buffer_user);
46677 + learn_buffer_user = NULL;
46678 + }
46679 + learn_buffer_len = 0;
46680 + mutex_unlock(&gr_learn_user_mutex);
46681 +
46682 + return;
46683 +}
46684 +
46685 +void
46686 +gr_add_learn_entry(const char *fmt, ...)
46687 +{
46688 + va_list args;
46689 + unsigned int len;
46690 +
46691 + if (!gr_learn_attached)
46692 + return;
46693 +
46694 + spin_lock(&gr_learn_lock);
46695 +
46696 + /* leave a gap at the end so we know when it's "full" but don't have to
46697 + compute the exact length of the string we're trying to append
46698 + */
46699 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
46700 + spin_unlock(&gr_learn_lock);
46701 + wake_up_interruptible(&learn_wait);
46702 + return;
46703 + }
46704 + if (learn_buffer == NULL) {
46705 + spin_unlock(&gr_learn_lock);
46706 + return;
46707 + }
46708 +
46709 + va_start(args, fmt);
46710 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
46711 + va_end(args);
46712 +
46713 + learn_buffer_len += len + 1;
46714 +
46715 + spin_unlock(&gr_learn_lock);
46716 + wake_up_interruptible(&learn_wait);
46717 +
46718 + return;
46719 +}
46720 +
46721 +static int
46722 +open_learn(struct inode *inode, struct file *file)
46723 +{
46724 + if (file->f_mode & FMODE_READ && gr_learn_attached)
46725 + return -EBUSY;
46726 + if (file->f_mode & FMODE_READ) {
46727 + int retval = 0;
46728 + mutex_lock(&gr_learn_user_mutex);
46729 + if (learn_buffer == NULL)
46730 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
46731 + if (learn_buffer_user == NULL)
46732 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
46733 + if (learn_buffer == NULL) {
46734 + retval = -ENOMEM;
46735 + goto out_error;
46736 + }
46737 + if (learn_buffer_user == NULL) {
46738 + retval = -ENOMEM;
46739 + goto out_error;
46740 + }
46741 + learn_buffer_len = 0;
46742 + learn_buffer_user_len = 0;
46743 + gr_learn_attached = 1;
46744 +out_error:
46745 + mutex_unlock(&gr_learn_user_mutex);
46746 + return retval;
46747 + }
46748 + return 0;
46749 +}
46750 +
46751 +static int
46752 +close_learn(struct inode *inode, struct file *file)
46753 +{
46754 + if (file->f_mode & FMODE_READ) {
46755 + char *tmp = NULL;
46756 + mutex_lock(&gr_learn_user_mutex);
46757 + spin_lock(&gr_learn_lock);
46758 + tmp = learn_buffer;
46759 + learn_buffer = NULL;
46760 + spin_unlock(&gr_learn_lock);
46761 + if (tmp)
46762 + vfree(tmp);
46763 + if (learn_buffer_user != NULL) {
46764 + vfree(learn_buffer_user);
46765 + learn_buffer_user = NULL;
46766 + }
46767 + learn_buffer_len = 0;
46768 + learn_buffer_user_len = 0;
46769 + gr_learn_attached = 0;
46770 + mutex_unlock(&gr_learn_user_mutex);
46771 + }
46772 +
46773 + return 0;
46774 +}
46775 +
46776 +const struct file_operations grsec_fops = {
46777 + .read = read_learn,
46778 + .write = write_grsec_handler,
46779 + .open = open_learn,
46780 + .release = close_learn,
46781 + .poll = poll_learn,
46782 +};
46783 diff -urNp linux-2.6.39.4/grsecurity/gracl_res.c linux-2.6.39.4/grsecurity/gracl_res.c
46784 --- linux-2.6.39.4/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
46785 +++ linux-2.6.39.4/grsecurity/gracl_res.c 2011-08-05 19:44:37.000000000 -0400
46786 @@ -0,0 +1,68 @@
46787 +#include <linux/kernel.h>
46788 +#include <linux/sched.h>
46789 +#include <linux/gracl.h>
46790 +#include <linux/grinternal.h>
46791 +
46792 +static const char *restab_log[] = {
46793 + [RLIMIT_CPU] = "RLIMIT_CPU",
46794 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
46795 + [RLIMIT_DATA] = "RLIMIT_DATA",
46796 + [RLIMIT_STACK] = "RLIMIT_STACK",
46797 + [RLIMIT_CORE] = "RLIMIT_CORE",
46798 + [RLIMIT_RSS] = "RLIMIT_RSS",
46799 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
46800 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
46801 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
46802 + [RLIMIT_AS] = "RLIMIT_AS",
46803 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
46804 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
46805 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
46806 + [RLIMIT_NICE] = "RLIMIT_NICE",
46807 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
46808 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
46809 + [GR_CRASH_RES] = "RLIMIT_CRASH"
46810 +};
46811 +
46812 +void
46813 +gr_log_resource(const struct task_struct *task,
46814 + const int res, const unsigned long wanted, const int gt)
46815 +{
46816 + const struct cred *cred;
46817 + unsigned long rlim;
46818 +
46819 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
46820 + return;
46821 +
46822 + // not yet supported resource
46823 + if (unlikely(!restab_log[res]))
46824 + return;
46825 +
46826 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
46827 + rlim = task_rlimit_max(task, res);
46828 + else
46829 + rlim = task_rlimit(task, res);
46830 +
46831 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
46832 + return;
46833 +
46834 + rcu_read_lock();
46835 + cred = __task_cred(task);
46836 +
46837 + if (res == RLIMIT_NPROC &&
46838 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
46839 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
46840 + goto out_rcu_unlock;
46841 + else if (res == RLIMIT_MEMLOCK &&
46842 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
46843 + goto out_rcu_unlock;
46844 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
46845 + goto out_rcu_unlock;
46846 + rcu_read_unlock();
46847 +
46848 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
46849 +
46850 + return;
46851 +out_rcu_unlock:
46852 + rcu_read_unlock();
46853 + return;
46854 +}
46855 diff -urNp linux-2.6.39.4/grsecurity/gracl_segv.c linux-2.6.39.4/grsecurity/gracl_segv.c
46856 --- linux-2.6.39.4/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
46857 +++ linux-2.6.39.4/grsecurity/gracl_segv.c 2011-08-05 19:44:37.000000000 -0400
46858 @@ -0,0 +1,299 @@
46859 +#include <linux/kernel.h>
46860 +#include <linux/mm.h>
46861 +#include <asm/uaccess.h>
46862 +#include <asm/errno.h>
46863 +#include <asm/mman.h>
46864 +#include <net/sock.h>
46865 +#include <linux/file.h>
46866 +#include <linux/fs.h>
46867 +#include <linux/net.h>
46868 +#include <linux/in.h>
46869 +#include <linux/slab.h>
46870 +#include <linux/types.h>
46871 +#include <linux/sched.h>
46872 +#include <linux/timer.h>
46873 +#include <linux/gracl.h>
46874 +#include <linux/grsecurity.h>
46875 +#include <linux/grinternal.h>
46876 +
46877 +static struct crash_uid *uid_set;
46878 +static unsigned short uid_used;
46879 +static DEFINE_SPINLOCK(gr_uid_lock);
46880 +extern rwlock_t gr_inode_lock;
46881 +extern struct acl_subject_label *
46882 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
46883 + struct acl_role_label *role);
46884 +
46885 +#ifdef CONFIG_BTRFS_FS
46886 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
46887 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
46888 +#endif
46889 +
46890 +static inline dev_t __get_dev(const struct dentry *dentry)
46891 +{
46892 +#ifdef CONFIG_BTRFS_FS
46893 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
46894 + return get_btrfs_dev_from_inode(dentry->d_inode);
46895 + else
46896 +#endif
46897 + return dentry->d_inode->i_sb->s_dev;
46898 +}
46899 +
46900 +int
46901 +gr_init_uidset(void)
46902 +{
46903 + uid_set =
46904 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
46905 + uid_used = 0;
46906 +
46907 + return uid_set ? 1 : 0;
46908 +}
46909 +
46910 +void
46911 +gr_free_uidset(void)
46912 +{
46913 + if (uid_set)
46914 + kfree(uid_set);
46915 +
46916 + return;
46917 +}
46918 +
46919 +int
46920 +gr_find_uid(const uid_t uid)
46921 +{
46922 + struct crash_uid *tmp = uid_set;
46923 + uid_t buid;
46924 + int low = 0, high = uid_used - 1, mid;
46925 +
46926 + while (high >= low) {
46927 + mid = (low + high) >> 1;
46928 + buid = tmp[mid].uid;
46929 + if (buid == uid)
46930 + return mid;
46931 + if (buid > uid)
46932 + high = mid - 1;
46933 + if (buid < uid)
46934 + low = mid + 1;
46935 + }
46936 +
46937 + return -1;
46938 +}
46939 +
46940 +static __inline__ void
46941 +gr_insertsort(void)
46942 +{
46943 + unsigned short i, j;
46944 + struct crash_uid index;
46945 +
46946 + for (i = 1; i < uid_used; i++) {
46947 + index = uid_set[i];
46948 + j = i;
46949 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
46950 + uid_set[j] = uid_set[j - 1];
46951 + j--;
46952 + }
46953 + uid_set[j] = index;
46954 + }
46955 +
46956 + return;
46957 +}
46958 +
46959 +static __inline__ void
46960 +gr_insert_uid(const uid_t uid, const unsigned long expires)
46961 +{
46962 + int loc;
46963 +
46964 + if (uid_used == GR_UIDTABLE_MAX)
46965 + return;
46966 +
46967 + loc = gr_find_uid(uid);
46968 +
46969 + if (loc >= 0) {
46970 + uid_set[loc].expires = expires;
46971 + return;
46972 + }
46973 +
46974 + uid_set[uid_used].uid = uid;
46975 + uid_set[uid_used].expires = expires;
46976 + uid_used++;
46977 +
46978 + gr_insertsort();
46979 +
46980 + return;
46981 +}
46982 +
46983 +void
46984 +gr_remove_uid(const unsigned short loc)
46985 +{
46986 + unsigned short i;
46987 +
46988 + for (i = loc + 1; i < uid_used; i++)
46989 + uid_set[i - 1] = uid_set[i];
46990 +
46991 + uid_used--;
46992 +
46993 + return;
46994 +}
46995 +
46996 +int
46997 +gr_check_crash_uid(const uid_t uid)
46998 +{
46999 + int loc;
47000 + int ret = 0;
47001 +
47002 + if (unlikely(!gr_acl_is_enabled()))
47003 + return 0;
47004 +
47005 + spin_lock(&gr_uid_lock);
47006 + loc = gr_find_uid(uid);
47007 +
47008 + if (loc < 0)
47009 + goto out_unlock;
47010 +
47011 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
47012 + gr_remove_uid(loc);
47013 + else
47014 + ret = 1;
47015 +
47016 +out_unlock:
47017 + spin_unlock(&gr_uid_lock);
47018 + return ret;
47019 +}
47020 +
47021 +static __inline__ int
47022 +proc_is_setxid(const struct cred *cred)
47023 +{
47024 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
47025 + cred->uid != cred->fsuid)
47026 + return 1;
47027 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
47028 + cred->gid != cred->fsgid)
47029 + return 1;
47030 +
47031 + return 0;
47032 +}
47033 +
47034 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
47035 +
47036 +void
47037 +gr_handle_crash(struct task_struct *task, const int sig)
47038 +{
47039 + struct acl_subject_label *curr;
47040 + struct acl_subject_label *curr2;
47041 + struct task_struct *tsk, *tsk2;
47042 + const struct cred *cred;
47043 + const struct cred *cred2;
47044 +
47045 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
47046 + return;
47047 +
47048 + if (unlikely(!gr_acl_is_enabled()))
47049 + return;
47050 +
47051 + curr = task->acl;
47052 +
47053 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
47054 + return;
47055 +
47056 + if (time_before_eq(curr->expires, get_seconds())) {
47057 + curr->expires = 0;
47058 + curr->crashes = 0;
47059 + }
47060 +
47061 + curr->crashes++;
47062 +
47063 + if (!curr->expires)
47064 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
47065 +
47066 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
47067 + time_after(curr->expires, get_seconds())) {
47068 + rcu_read_lock();
47069 + cred = __task_cred(task);
47070 + if (cred->uid && proc_is_setxid(cred)) {
47071 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
47072 + spin_lock(&gr_uid_lock);
47073 + gr_insert_uid(cred->uid, curr->expires);
47074 + spin_unlock(&gr_uid_lock);
47075 + curr->expires = 0;
47076 + curr->crashes = 0;
47077 + read_lock(&tasklist_lock);
47078 + do_each_thread(tsk2, tsk) {
47079 + cred2 = __task_cred(tsk);
47080 + if (tsk != task && cred2->uid == cred->uid)
47081 + gr_fake_force_sig(SIGKILL, tsk);
47082 + } while_each_thread(tsk2, tsk);
47083 + read_unlock(&tasklist_lock);
47084 + } else {
47085 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
47086 + read_lock(&tasklist_lock);
47087 + do_each_thread(tsk2, tsk) {
47088 + if (likely(tsk != task)) {
47089 + curr2 = tsk->acl;
47090 +
47091 + if (curr2->device == curr->device &&
47092 + curr2->inode == curr->inode)
47093 + gr_fake_force_sig(SIGKILL, tsk);
47094 + }
47095 + } while_each_thread(tsk2, tsk);
47096 + read_unlock(&tasklist_lock);
47097 + }
47098 + rcu_read_unlock();
47099 + }
47100 +
47101 + return;
47102 +}
47103 +
47104 +int
47105 +gr_check_crash_exec(const struct file *filp)
47106 +{
47107 + struct acl_subject_label *curr;
47108 +
47109 + if (unlikely(!gr_acl_is_enabled()))
47110 + return 0;
47111 +
47112 + read_lock(&gr_inode_lock);
47113 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
47114 + __get_dev(filp->f_path.dentry),
47115 + current->role);
47116 + read_unlock(&gr_inode_lock);
47117 +
47118 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
47119 + (!curr->crashes && !curr->expires))
47120 + return 0;
47121 +
47122 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
47123 + time_after(curr->expires, get_seconds()))
47124 + return 1;
47125 + else if (time_before_eq(curr->expires, get_seconds())) {
47126 + curr->crashes = 0;
47127 + curr->expires = 0;
47128 + }
47129 +
47130 + return 0;
47131 +}
47132 +
47133 +void
47134 +gr_handle_alertkill(struct task_struct *task)
47135 +{
47136 + struct acl_subject_label *curracl;
47137 + __u32 curr_ip;
47138 + struct task_struct *p, *p2;
47139 +
47140 + if (unlikely(!gr_acl_is_enabled()))
47141 + return;
47142 +
47143 + curracl = task->acl;
47144 + curr_ip = task->signal->curr_ip;
47145 +
47146 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
47147 + read_lock(&tasklist_lock);
47148 + do_each_thread(p2, p) {
47149 + if (p->signal->curr_ip == curr_ip)
47150 + gr_fake_force_sig(SIGKILL, p);
47151 + } while_each_thread(p2, p);
47152 + read_unlock(&tasklist_lock);
47153 + } else if (curracl->mode & GR_KILLPROC)
47154 + gr_fake_force_sig(SIGKILL, task);
47155 +
47156 + return;
47157 +}
47158 diff -urNp linux-2.6.39.4/grsecurity/gracl_shm.c linux-2.6.39.4/grsecurity/gracl_shm.c
47159 --- linux-2.6.39.4/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
47160 +++ linux-2.6.39.4/grsecurity/gracl_shm.c 2011-08-05 19:44:37.000000000 -0400
47161 @@ -0,0 +1,40 @@
47162 +#include <linux/kernel.h>
47163 +#include <linux/mm.h>
47164 +#include <linux/sched.h>
47165 +#include <linux/file.h>
47166 +#include <linux/ipc.h>
47167 +#include <linux/gracl.h>
47168 +#include <linux/grsecurity.h>
47169 +#include <linux/grinternal.h>
47170 +
47171 +int
47172 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47173 + const time_t shm_createtime, const uid_t cuid, const int shmid)
47174 +{
47175 + struct task_struct *task;
47176 +
47177 + if (!gr_acl_is_enabled())
47178 + return 1;
47179 +
47180 + rcu_read_lock();
47181 + read_lock(&tasklist_lock);
47182 +
47183 + task = find_task_by_vpid(shm_cprid);
47184 +
47185 + if (unlikely(!task))
47186 + task = find_task_by_vpid(shm_lapid);
47187 +
47188 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
47189 + (task->pid == shm_lapid)) &&
47190 + (task->acl->mode & GR_PROTSHM) &&
47191 + (task->acl != current->acl))) {
47192 + read_unlock(&tasklist_lock);
47193 + rcu_read_unlock();
47194 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
47195 + return 0;
47196 + }
47197 + read_unlock(&tasklist_lock);
47198 + rcu_read_unlock();
47199 +
47200 + return 1;
47201 +}
47202 diff -urNp linux-2.6.39.4/grsecurity/grsec_chdir.c linux-2.6.39.4/grsecurity/grsec_chdir.c
47203 --- linux-2.6.39.4/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
47204 +++ linux-2.6.39.4/grsecurity/grsec_chdir.c 2011-08-05 19:44:37.000000000 -0400
47205 @@ -0,0 +1,19 @@
47206 +#include <linux/kernel.h>
47207 +#include <linux/sched.h>
47208 +#include <linux/fs.h>
47209 +#include <linux/file.h>
47210 +#include <linux/grsecurity.h>
47211 +#include <linux/grinternal.h>
47212 +
47213 +void
47214 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
47215 +{
47216 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
47217 + if ((grsec_enable_chdir && grsec_enable_group &&
47218 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
47219 + !grsec_enable_group)) {
47220 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
47221 + }
47222 +#endif
47223 + return;
47224 +}
47225 diff -urNp linux-2.6.39.4/grsecurity/grsec_chroot.c linux-2.6.39.4/grsecurity/grsec_chroot.c
47226 --- linux-2.6.39.4/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
47227 +++ linux-2.6.39.4/grsecurity/grsec_chroot.c 2011-08-05 19:44:37.000000000 -0400
47228 @@ -0,0 +1,349 @@
47229 +#include <linux/kernel.h>
47230 +#include <linux/module.h>
47231 +#include <linux/sched.h>
47232 +#include <linux/file.h>
47233 +#include <linux/fs.h>
47234 +#include <linux/mount.h>
47235 +#include <linux/types.h>
47236 +#include <linux/pid_namespace.h>
47237 +#include <linux/grsecurity.h>
47238 +#include <linux/grinternal.h>
47239 +
47240 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
47241 +{
47242 +#ifdef CONFIG_GRKERNSEC
47243 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
47244 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
47245 + task->gr_is_chrooted = 1;
47246 + else
47247 + task->gr_is_chrooted = 0;
47248 +
47249 + task->gr_chroot_dentry = path->dentry;
47250 +#endif
47251 + return;
47252 +}
47253 +
47254 +void gr_clear_chroot_entries(struct task_struct *task)
47255 +{
47256 +#ifdef CONFIG_GRKERNSEC
47257 + task->gr_is_chrooted = 0;
47258 + task->gr_chroot_dentry = NULL;
47259 +#endif
47260 + return;
47261 +}
47262 +
47263 +int
47264 +gr_handle_chroot_unix(const pid_t pid)
47265 +{
47266 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
47267 + struct task_struct *p;
47268 +
47269 + if (unlikely(!grsec_enable_chroot_unix))
47270 + return 1;
47271 +
47272 + if (likely(!proc_is_chrooted(current)))
47273 + return 1;
47274 +
47275 + rcu_read_lock();
47276 + read_lock(&tasklist_lock);
47277 + p = find_task_by_vpid_unrestricted(pid);
47278 + if (unlikely(p && !have_same_root(current, p))) {
47279 + read_unlock(&tasklist_lock);
47280 + rcu_read_unlock();
47281 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
47282 + return 0;
47283 + }
47284 + read_unlock(&tasklist_lock);
47285 + rcu_read_unlock();
47286 +#endif
47287 + return 1;
47288 +}
47289 +
47290 +int
47291 +gr_handle_chroot_nice(void)
47292 +{
47293 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
47294 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
47295 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
47296 + return -EPERM;
47297 + }
47298 +#endif
47299 + return 0;
47300 +}
47301 +
47302 +int
47303 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
47304 +{
47305 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
47306 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
47307 + && proc_is_chrooted(current)) {
47308 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
47309 + return -EACCES;
47310 + }
47311 +#endif
47312 + return 0;
47313 +}
47314 +
47315 +int
47316 +gr_handle_chroot_rawio(const struct inode *inode)
47317 +{
47318 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47319 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
47320 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
47321 + return 1;
47322 +#endif
47323 + return 0;
47324 +}
47325 +
47326 +int
47327 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
47328 +{
47329 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
47330 + struct task_struct *p;
47331 + int ret = 0;
47332 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
47333 + return ret;
47334 +
47335 + read_lock(&tasklist_lock);
47336 + do_each_pid_task(pid, type, p) {
47337 + if (!have_same_root(current, p)) {
47338 + ret = 1;
47339 + goto out;
47340 + }
47341 + } while_each_pid_task(pid, type, p);
47342 +out:
47343 + read_unlock(&tasklist_lock);
47344 + return ret;
47345 +#endif
47346 + return 0;
47347 +}
47348 +
47349 +int
47350 +gr_pid_is_chrooted(struct task_struct *p)
47351 +{
47352 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
47353 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
47354 + return 0;
47355 +
47356 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
47357 + !have_same_root(current, p)) {
47358 + return 1;
47359 + }
47360 +#endif
47361 + return 0;
47362 +}
47363 +
47364 +EXPORT_SYMBOL(gr_pid_is_chrooted);
47365 +
47366 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
47367 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
47368 +{
47369 + struct path path, currentroot;
47370 + int ret = 0;
47371 +
47372 + path.dentry = (struct dentry *)u_dentry;
47373 + path.mnt = (struct vfsmount *)u_mnt;
47374 + get_fs_root(current->fs, &currentroot);
47375 + if (path_is_under(&path, &currentroot))
47376 + ret = 1;
47377 + path_put(&currentroot);
47378 +
47379 + return ret;
47380 +}
47381 +#endif
47382 +
47383 +int
47384 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
47385 +{
47386 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
47387 + if (!grsec_enable_chroot_fchdir)
47388 + return 1;
47389 +
47390 + if (!proc_is_chrooted(current))
47391 + return 1;
47392 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
47393 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
47394 + return 0;
47395 + }
47396 +#endif
47397 + return 1;
47398 +}
47399 +
47400 +int
47401 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47402 + const time_t shm_createtime)
47403 +{
47404 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
47405 + struct task_struct *p;
47406 + time_t starttime;
47407 +
47408 + if (unlikely(!grsec_enable_chroot_shmat))
47409 + return 1;
47410 +
47411 + if (likely(!proc_is_chrooted(current)))
47412 + return 1;
47413 +
47414 + rcu_read_lock();
47415 + read_lock(&tasklist_lock);
47416 +
47417 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
47418 + starttime = p->start_time.tv_sec;
47419 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
47420 + if (have_same_root(current, p)) {
47421 + goto allow;
47422 + } else {
47423 + read_unlock(&tasklist_lock);
47424 + rcu_read_unlock();
47425 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
47426 + return 0;
47427 + }
47428 + }
47429 + /* creator exited, pid reuse, fall through to next check */
47430 + }
47431 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
47432 + if (unlikely(!have_same_root(current, p))) {
47433 + read_unlock(&tasklist_lock);
47434 + rcu_read_unlock();
47435 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
47436 + return 0;
47437 + }
47438 + }
47439 +
47440 +allow:
47441 + read_unlock(&tasklist_lock);
47442 + rcu_read_unlock();
47443 +#endif
47444 + return 1;
47445 +}
47446 +
47447 +void
47448 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
47449 +{
47450 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
47451 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
47452 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
47453 +#endif
47454 + return;
47455 +}
47456 +
47457 +int
47458 +gr_handle_chroot_mknod(const struct dentry *dentry,
47459 + const struct vfsmount *mnt, const int mode)
47460 +{
47461 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
47462 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
47463 + proc_is_chrooted(current)) {
47464 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
47465 + return -EPERM;
47466 + }
47467 +#endif
47468 + return 0;
47469 +}
47470 +
47471 +int
47472 +gr_handle_chroot_mount(const struct dentry *dentry,
47473 + const struct vfsmount *mnt, const char *dev_name)
47474 +{
47475 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
47476 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
47477 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
47478 + return -EPERM;
47479 + }
47480 +#endif
47481 + return 0;
47482 +}
47483 +
47484 +int
47485 +gr_handle_chroot_pivot(void)
47486 +{
47487 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
47488 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
47489 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
47490 + return -EPERM;
47491 + }
47492 +#endif
47493 + return 0;
47494 +}
47495 +
47496 +int
47497 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
47498 +{
47499 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
47500 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
47501 + !gr_is_outside_chroot(dentry, mnt)) {
47502 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
47503 + return -EPERM;
47504 + }
47505 +#endif
47506 + return 0;
47507 +}
47508 +
47509 +int
47510 +gr_handle_chroot_caps(struct path *path)
47511 +{
47512 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47513 + if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
47514 + (init_task.fs->root.dentry != path->dentry) &&
47515 + (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
47516 +
47517 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
47518 + const struct cred *old = current_cred();
47519 + struct cred *new = prepare_creds();
47520 + if (new == NULL)
47521 + return 1;
47522 +
47523 + new->cap_permitted = cap_drop(old->cap_permitted,
47524 + chroot_caps);
47525 + new->cap_inheritable = cap_drop(old->cap_inheritable,
47526 + chroot_caps);
47527 + new->cap_effective = cap_drop(old->cap_effective,
47528 + chroot_caps);
47529 +
47530 + commit_creds(new);
47531 +
47532 + return 0;
47533 + }
47534 +#endif
47535 + return 0;
47536 +}
47537 +
47538 +int
47539 +gr_handle_chroot_sysctl(const int op)
47540 +{
47541 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
47542 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
47543 + proc_is_chrooted(current))
47544 + return -EACCES;
47545 +#endif
47546 + return 0;
47547 +}
47548 +
47549 +void
47550 +gr_handle_chroot_chdir(struct path *path)
47551 +{
47552 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
47553 + if (grsec_enable_chroot_chdir)
47554 + set_fs_pwd(current->fs, path);
47555 +#endif
47556 + return;
47557 +}
47558 +
47559 +int
47560 +gr_handle_chroot_chmod(const struct dentry *dentry,
47561 + const struct vfsmount *mnt, const int mode)
47562 +{
47563 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
47564 + /* allow chmod +s on directories, but not files */
47565 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
47566 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
47567 + proc_is_chrooted(current)) {
47568 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
47569 + return -EPERM;
47570 + }
47571 +#endif
47572 + return 0;
47573 +}
47574 +
47575 +#ifdef CONFIG_SECURITY
47576 +EXPORT_SYMBOL(gr_handle_chroot_caps);
47577 +#endif
47578 diff -urNp linux-2.6.39.4/grsecurity/grsec_disabled.c linux-2.6.39.4/grsecurity/grsec_disabled.c
47579 --- linux-2.6.39.4/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
47580 +++ linux-2.6.39.4/grsecurity/grsec_disabled.c 2011-08-05 19:44:37.000000000 -0400
47581 @@ -0,0 +1,447 @@
47582 +#include <linux/kernel.h>
47583 +#include <linux/module.h>
47584 +#include <linux/sched.h>
47585 +#include <linux/file.h>
47586 +#include <linux/fs.h>
47587 +#include <linux/kdev_t.h>
47588 +#include <linux/net.h>
47589 +#include <linux/in.h>
47590 +#include <linux/ip.h>
47591 +#include <linux/skbuff.h>
47592 +#include <linux/sysctl.h>
47593 +
47594 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
47595 +void
47596 +pax_set_initial_flags(struct linux_binprm *bprm)
47597 +{
47598 + return;
47599 +}
47600 +#endif
47601 +
47602 +#ifdef CONFIG_SYSCTL
47603 +__u32
47604 +gr_handle_sysctl(const struct ctl_table * table, const int op)
47605 +{
47606 + return 0;
47607 +}
47608 +#endif
47609 +
47610 +#ifdef CONFIG_TASKSTATS
47611 +int gr_is_taskstats_denied(int pid)
47612 +{
47613 + return 0;
47614 +}
47615 +#endif
47616 +
47617 +int
47618 +gr_acl_is_enabled(void)
47619 +{
47620 + return 0;
47621 +}
47622 +
47623 +int
47624 +gr_handle_rawio(const struct inode *inode)
47625 +{
47626 + return 0;
47627 +}
47628 +
47629 +void
47630 +gr_acl_handle_psacct(struct task_struct *task, const long code)
47631 +{
47632 + return;
47633 +}
47634 +
47635 +int
47636 +gr_handle_ptrace(struct task_struct *task, const long request)
47637 +{
47638 + return 0;
47639 +}
47640 +
47641 +int
47642 +gr_handle_proc_ptrace(struct task_struct *task)
47643 +{
47644 + return 0;
47645 +}
47646 +
47647 +void
47648 +gr_learn_resource(const struct task_struct *task,
47649 + const int res, const unsigned long wanted, const int gt)
47650 +{
47651 + return;
47652 +}
47653 +
47654 +int
47655 +gr_set_acls(const int type)
47656 +{
47657 + return 0;
47658 +}
47659 +
47660 +int
47661 +gr_check_hidden_task(const struct task_struct *tsk)
47662 +{
47663 + return 0;
47664 +}
47665 +
47666 +int
47667 +gr_check_protected_task(const struct task_struct *task)
47668 +{
47669 + return 0;
47670 +}
47671 +
47672 +int
47673 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
47674 +{
47675 + return 0;
47676 +}
47677 +
47678 +void
47679 +gr_copy_label(struct task_struct *tsk)
47680 +{
47681 + return;
47682 +}
47683 +
47684 +void
47685 +gr_set_pax_flags(struct task_struct *task)
47686 +{
47687 + return;
47688 +}
47689 +
47690 +int
47691 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
47692 + const int unsafe_share)
47693 +{
47694 + return 0;
47695 +}
47696 +
47697 +void
47698 +gr_handle_delete(const ino_t ino, const dev_t dev)
47699 +{
47700 + return;
47701 +}
47702 +
47703 +void
47704 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
47705 +{
47706 + return;
47707 +}
47708 +
47709 +void
47710 +gr_handle_crash(struct task_struct *task, const int sig)
47711 +{
47712 + return;
47713 +}
47714 +
47715 +int
47716 +gr_check_crash_exec(const struct file *filp)
47717 +{
47718 + return 0;
47719 +}
47720 +
47721 +int
47722 +gr_check_crash_uid(const uid_t uid)
47723 +{
47724 + return 0;
47725 +}
47726 +
47727 +void
47728 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
47729 + struct dentry *old_dentry,
47730 + struct dentry *new_dentry,
47731 + struct vfsmount *mnt, const __u8 replace)
47732 +{
47733 + return;
47734 +}
47735 +
47736 +int
47737 +gr_search_socket(const int family, const int type, const int protocol)
47738 +{
47739 + return 1;
47740 +}
47741 +
47742 +int
47743 +gr_search_connectbind(const int mode, const struct socket *sock,
47744 + const struct sockaddr_in *addr)
47745 +{
47746 + return 0;
47747 +}
47748 +
47749 +int
47750 +gr_is_capable(const int cap)
47751 +{
47752 + return 1;
47753 +}
47754 +
47755 +int
47756 +gr_is_capable_nolog(const int cap)
47757 +{
47758 + return 1;
47759 +}
47760 +
47761 +void
47762 +gr_handle_alertkill(struct task_struct *task)
47763 +{
47764 + return;
47765 +}
47766 +
47767 +__u32
47768 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
47769 +{
47770 + return 1;
47771 +}
47772 +
47773 +__u32
47774 +gr_acl_handle_hidden_file(const struct dentry * dentry,
47775 + const struct vfsmount * mnt)
47776 +{
47777 + return 1;
47778 +}
47779 +
47780 +__u32
47781 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
47782 + const int fmode)
47783 +{
47784 + return 1;
47785 +}
47786 +
47787 +__u32
47788 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
47789 +{
47790 + return 1;
47791 +}
47792 +
47793 +__u32
47794 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
47795 +{
47796 + return 1;
47797 +}
47798 +
47799 +int
47800 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
47801 + unsigned int *vm_flags)
47802 +{
47803 + return 1;
47804 +}
47805 +
47806 +__u32
47807 +gr_acl_handle_truncate(const struct dentry * dentry,
47808 + const struct vfsmount * mnt)
47809 +{
47810 + return 1;
47811 +}
47812 +
47813 +__u32
47814 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
47815 +{
47816 + return 1;
47817 +}
47818 +
47819 +__u32
47820 +gr_acl_handle_access(const struct dentry * dentry,
47821 + const struct vfsmount * mnt, const int fmode)
47822 +{
47823 + return 1;
47824 +}
47825 +
47826 +__u32
47827 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
47828 + mode_t mode)
47829 +{
47830 + return 1;
47831 +}
47832 +
47833 +__u32
47834 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
47835 + mode_t mode)
47836 +{
47837 + return 1;
47838 +}
47839 +
47840 +__u32
47841 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
47842 +{
47843 + return 1;
47844 +}
47845 +
47846 +__u32
47847 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
47848 +{
47849 + return 1;
47850 +}
47851 +
47852 +void
47853 +grsecurity_init(void)
47854 +{
47855 + return;
47856 +}
47857 +
47858 +__u32
47859 +gr_acl_handle_mknod(const struct dentry * new_dentry,
47860 + const struct dentry * parent_dentry,
47861 + const struct vfsmount * parent_mnt,
47862 + const int mode)
47863 +{
47864 + return 1;
47865 +}
47866 +
47867 +__u32
47868 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
47869 + const struct dentry * parent_dentry,
47870 + const struct vfsmount * parent_mnt)
47871 +{
47872 + return 1;
47873 +}
47874 +
47875 +__u32
47876 +gr_acl_handle_symlink(const struct dentry * new_dentry,
47877 + const struct dentry * parent_dentry,
47878 + const struct vfsmount * parent_mnt, const char *from)
47879 +{
47880 + return 1;
47881 +}
47882 +
47883 +__u32
47884 +gr_acl_handle_link(const struct dentry * new_dentry,
47885 + const struct dentry * parent_dentry,
47886 + const struct vfsmount * parent_mnt,
47887 + const struct dentry * old_dentry,
47888 + const struct vfsmount * old_mnt, const char *to)
47889 +{
47890 + return 1;
47891 +}
47892 +
47893 +int
47894 +gr_acl_handle_rename(const struct dentry *new_dentry,
47895 + const struct dentry *parent_dentry,
47896 + const struct vfsmount *parent_mnt,
47897 + const struct dentry *old_dentry,
47898 + const struct inode *old_parent_inode,
47899 + const struct vfsmount *old_mnt, const char *newname)
47900 +{
47901 + return 0;
47902 +}
47903 +
47904 +int
47905 +gr_acl_handle_filldir(const struct file *file, const char *name,
47906 + const int namelen, const ino_t ino)
47907 +{
47908 + return 1;
47909 +}
47910 +
47911 +int
47912 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47913 + const time_t shm_createtime, const uid_t cuid, const int shmid)
47914 +{
47915 + return 1;
47916 +}
47917 +
47918 +int
47919 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
47920 +{
47921 + return 0;
47922 +}
47923 +
47924 +int
47925 +gr_search_accept(const struct socket *sock)
47926 +{
47927 + return 0;
47928 +}
47929 +
47930 +int
47931 +gr_search_listen(const struct socket *sock)
47932 +{
47933 + return 0;
47934 +}
47935 +
47936 +int
47937 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
47938 +{
47939 + return 0;
47940 +}
47941 +
47942 +__u32
47943 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
47944 +{
47945 + return 1;
47946 +}
47947 +
47948 +__u32
47949 +gr_acl_handle_creat(const struct dentry * dentry,
47950 + const struct dentry * p_dentry,
47951 + const struct vfsmount * p_mnt, const int fmode,
47952 + const int imode)
47953 +{
47954 + return 1;
47955 +}
47956 +
47957 +void
47958 +gr_acl_handle_exit(void)
47959 +{
47960 + return;
47961 +}
47962 +
47963 +int
47964 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
47965 +{
47966 + return 1;
47967 +}
47968 +
47969 +void
47970 +gr_set_role_label(const uid_t uid, const gid_t gid)
47971 +{
47972 + return;
47973 +}
47974 +
47975 +int
47976 +gr_acl_handle_procpidmem(const struct task_struct *task)
47977 +{
47978 + return 0;
47979 +}
47980 +
47981 +int
47982 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
47983 +{
47984 + return 0;
47985 +}
47986 +
47987 +int
47988 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
47989 +{
47990 + return 0;
47991 +}
47992 +
47993 +void
47994 +gr_set_kernel_label(struct task_struct *task)
47995 +{
47996 + return;
47997 +}
47998 +
47999 +int
48000 +gr_check_user_change(int real, int effective, int fs)
48001 +{
48002 + return 0;
48003 +}
48004 +
48005 +int
48006 +gr_check_group_change(int real, int effective, int fs)
48007 +{
48008 + return 0;
48009 +}
48010 +
48011 +int gr_acl_enable_at_secure(void)
48012 +{
48013 + return 0;
48014 +}
48015 +
48016 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
48017 +{
48018 + return dentry->d_inode->i_sb->s_dev;
48019 +}
48020 +
48021 +EXPORT_SYMBOL(gr_is_capable);
48022 +EXPORT_SYMBOL(gr_is_capable_nolog);
48023 +EXPORT_SYMBOL(gr_learn_resource);
48024 +EXPORT_SYMBOL(gr_set_kernel_label);
48025 +#ifdef CONFIG_SECURITY
48026 +EXPORT_SYMBOL(gr_check_user_change);
48027 +EXPORT_SYMBOL(gr_check_group_change);
48028 +#endif
48029 diff -urNp linux-2.6.39.4/grsecurity/grsec_exec.c linux-2.6.39.4/grsecurity/grsec_exec.c
48030 --- linux-2.6.39.4/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
48031 +++ linux-2.6.39.4/grsecurity/grsec_exec.c 2011-08-05 19:44:37.000000000 -0400
48032 @@ -0,0 +1,146 @@
48033 +#include <linux/kernel.h>
48034 +#include <linux/sched.h>
48035 +#include <linux/file.h>
48036 +#include <linux/binfmts.h>
48037 +#include <linux/fs.h>
48038 +#include <linux/types.h>
48039 +#include <linux/grdefs.h>
48040 +#include <linux/grinternal.h>
48041 +#include <linux/capability.h>
48042 +#include <linux/compat.h>
48043 +
48044 +#include <asm/uaccess.h>
48045 +
48046 +#ifdef CONFIG_GRKERNSEC_EXECLOG
48047 +static char gr_exec_arg_buf[132];
48048 +static DEFINE_MUTEX(gr_exec_arg_mutex);
48049 +#endif
48050 +
48051 +int
48052 +gr_handle_nproc(void)
48053 +{
48054 +#ifdef CONFIG_GRKERNSEC_EXECVE
48055 + const struct cred *cred = current_cred();
48056 + if (grsec_enable_execve && cred->user &&
48057 + (atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) &&
48058 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
48059 + gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG);
48060 + return -EAGAIN;
48061 + }
48062 +#endif
48063 + return 0;
48064 +}
48065 +
48066 +void
48067 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
48068 +{
48069 +#ifdef CONFIG_GRKERNSEC_EXECLOG
48070 + char *grarg = gr_exec_arg_buf;
48071 + unsigned int i, x, execlen = 0;
48072 + char c;
48073 +
48074 + if (!((grsec_enable_execlog && grsec_enable_group &&
48075 + in_group_p(grsec_audit_gid))
48076 + || (grsec_enable_execlog && !grsec_enable_group)))
48077 + return;
48078 +
48079 + mutex_lock(&gr_exec_arg_mutex);
48080 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
48081 +
48082 + if (unlikely(argv == NULL))
48083 + goto log;
48084 +
48085 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
48086 + const char __user *p;
48087 + unsigned int len;
48088 +
48089 + if (copy_from_user(&p, argv + i, sizeof(p)))
48090 + goto log;
48091 + if (!p)
48092 + goto log;
48093 + len = strnlen_user(p, 128 - execlen);
48094 + if (len > 128 - execlen)
48095 + len = 128 - execlen;
48096 + else if (len > 0)
48097 + len--;
48098 + if (copy_from_user(grarg + execlen, p, len))
48099 + goto log;
48100 +
48101 + /* rewrite unprintable characters */
48102 + for (x = 0; x < len; x++) {
48103 + c = *(grarg + execlen + x);
48104 + if (c < 32 || c > 126)
48105 + *(grarg + execlen + x) = ' ';
48106 + }
48107 +
48108 + execlen += len;
48109 + *(grarg + execlen) = ' ';
48110 + *(grarg + execlen + 1) = '\0';
48111 + execlen++;
48112 + }
48113 +
48114 + log:
48115 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
48116 + bprm->file->f_path.mnt, grarg);
48117 + mutex_unlock(&gr_exec_arg_mutex);
48118 +#endif
48119 + return;
48120 +}
48121 +
48122 +#ifdef CONFIG_COMPAT
48123 +void
48124 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
48125 +{
48126 +#ifdef CONFIG_GRKERNSEC_EXECLOG
48127 + char *grarg = gr_exec_arg_buf;
48128 + unsigned int i, x, execlen = 0;
48129 + char c;
48130 +
48131 + if (!((grsec_enable_execlog && grsec_enable_group &&
48132 + in_group_p(grsec_audit_gid))
48133 + || (grsec_enable_execlog && !grsec_enable_group)))
48134 + return;
48135 +
48136 + mutex_lock(&gr_exec_arg_mutex);
48137 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
48138 +
48139 + if (unlikely(argv == NULL))
48140 + goto log;
48141 +
48142 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
48143 + compat_uptr_t p;
48144 + unsigned int len;
48145 +
48146 + if (get_user(p, argv + i))
48147 + goto log;
48148 + len = strnlen_user(compat_ptr(p), 128 - execlen);
48149 + if (len > 128 - execlen)
48150 + len = 128 - execlen;
48151 + else if (len > 0)
48152 + len--;
48153 + else
48154 + goto log;
48155 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
48156 + goto log;
48157 +
48158 + /* rewrite unprintable characters */
48159 + for (x = 0; x < len; x++) {
48160 + c = *(grarg + execlen + x);
48161 + if (c < 32 || c > 126)
48162 + *(grarg + execlen + x) = ' ';
48163 + }
48164 +
48165 + execlen += len;
48166 + *(grarg + execlen) = ' ';
48167 + *(grarg + execlen + 1) = '\0';
48168 + execlen++;
48169 + }
48170 +
48171 + log:
48172 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
48173 + bprm->file->f_path.mnt, grarg);
48174 + mutex_unlock(&gr_exec_arg_mutex);
48175 +#endif
48176 + return;
48177 +}
48178 +#endif
48179 diff -urNp linux-2.6.39.4/grsecurity/grsec_fifo.c linux-2.6.39.4/grsecurity/grsec_fifo.c
48180 --- linux-2.6.39.4/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
48181 +++ linux-2.6.39.4/grsecurity/grsec_fifo.c 2011-08-05 19:44:37.000000000 -0400
48182 @@ -0,0 +1,24 @@
48183 +#include <linux/kernel.h>
48184 +#include <linux/sched.h>
48185 +#include <linux/fs.h>
48186 +#include <linux/file.h>
48187 +#include <linux/grinternal.h>
48188 +
48189 +int
48190 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
48191 + const struct dentry *dir, const int flag, const int acc_mode)
48192 +{
48193 +#ifdef CONFIG_GRKERNSEC_FIFO
48194 + const struct cred *cred = current_cred();
48195 +
48196 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
48197 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
48198 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
48199 + (cred->fsuid != dentry->d_inode->i_uid)) {
48200 + if (!inode_permission(dentry->d_inode, acc_mode))
48201 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
48202 + return -EACCES;
48203 + }
48204 +#endif
48205 + return 0;
48206 +}
48207 diff -urNp linux-2.6.39.4/grsecurity/grsec_fork.c linux-2.6.39.4/grsecurity/grsec_fork.c
48208 --- linux-2.6.39.4/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
48209 +++ linux-2.6.39.4/grsecurity/grsec_fork.c 2011-08-05 19:44:37.000000000 -0400
48210 @@ -0,0 +1,23 @@
48211 +#include <linux/kernel.h>
48212 +#include <linux/sched.h>
48213 +#include <linux/grsecurity.h>
48214 +#include <linux/grinternal.h>
48215 +#include <linux/errno.h>
48216 +
48217 +void
48218 +gr_log_forkfail(const int retval)
48219 +{
48220 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
48221 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
48222 + switch (retval) {
48223 + case -EAGAIN:
48224 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
48225 + break;
48226 + case -ENOMEM:
48227 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
48228 + break;
48229 + }
48230 + }
48231 +#endif
48232 + return;
48233 +}
48234 diff -urNp linux-2.6.39.4/grsecurity/grsec_init.c linux-2.6.39.4/grsecurity/grsec_init.c
48235 --- linux-2.6.39.4/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
48236 +++ linux-2.6.39.4/grsecurity/grsec_init.c 2011-08-05 19:44:37.000000000 -0400
48237 @@ -0,0 +1,273 @@
48238 +#include <linux/kernel.h>
48239 +#include <linux/sched.h>
48240 +#include <linux/mm.h>
48241 +#include <linux/gracl.h>
48242 +#include <linux/slab.h>
48243 +#include <linux/vmalloc.h>
48244 +#include <linux/percpu.h>
48245 +#include <linux/module.h>
48246 +
48247 +int grsec_enable_brute;
48248 +int grsec_enable_link;
48249 +int grsec_enable_dmesg;
48250 +int grsec_enable_harden_ptrace;
48251 +int grsec_enable_fifo;
48252 +int grsec_enable_execve;
48253 +int grsec_enable_execlog;
48254 +int grsec_enable_signal;
48255 +int grsec_enable_forkfail;
48256 +int grsec_enable_audit_ptrace;
48257 +int grsec_enable_time;
48258 +int grsec_enable_audit_textrel;
48259 +int grsec_enable_group;
48260 +int grsec_audit_gid;
48261 +int grsec_enable_chdir;
48262 +int grsec_enable_mount;
48263 +int grsec_enable_rofs;
48264 +int grsec_enable_chroot_findtask;
48265 +int grsec_enable_chroot_mount;
48266 +int grsec_enable_chroot_shmat;
48267 +int grsec_enable_chroot_fchdir;
48268 +int grsec_enable_chroot_double;
48269 +int grsec_enable_chroot_pivot;
48270 +int grsec_enable_chroot_chdir;
48271 +int grsec_enable_chroot_chmod;
48272 +int grsec_enable_chroot_mknod;
48273 +int grsec_enable_chroot_nice;
48274 +int grsec_enable_chroot_execlog;
48275 +int grsec_enable_chroot_caps;
48276 +int grsec_enable_chroot_sysctl;
48277 +int grsec_enable_chroot_unix;
48278 +int grsec_enable_tpe;
48279 +int grsec_tpe_gid;
48280 +int grsec_enable_blackhole;
48281 +#ifdef CONFIG_IPV6_MODULE
48282 +EXPORT_SYMBOL(grsec_enable_blackhole);
48283 +#endif
48284 +int grsec_lastack_retries;
48285 +int grsec_enable_tpe_all;
48286 +int grsec_enable_tpe_invert;
48287 +int grsec_enable_socket_all;
48288 +int grsec_socket_all_gid;
48289 +int grsec_enable_socket_client;
48290 +int grsec_socket_client_gid;
48291 +int grsec_enable_socket_server;
48292 +int grsec_socket_server_gid;
48293 +int grsec_resource_logging;
48294 +int grsec_disable_privio;
48295 +int grsec_enable_log_rwxmaps;
48296 +int grsec_lock;
48297 +
48298 +DEFINE_SPINLOCK(grsec_alert_lock);
48299 +unsigned long grsec_alert_wtime = 0;
48300 +unsigned long grsec_alert_fyet = 0;
48301 +
48302 +DEFINE_SPINLOCK(grsec_audit_lock);
48303 +
48304 +DEFINE_RWLOCK(grsec_exec_file_lock);
48305 +
48306 +char *gr_shared_page[4];
48307 +
48308 +char *gr_alert_log_fmt;
48309 +char *gr_audit_log_fmt;
48310 +char *gr_alert_log_buf;
48311 +char *gr_audit_log_buf;
48312 +
48313 +extern struct gr_arg *gr_usermode;
48314 +extern unsigned char *gr_system_salt;
48315 +extern unsigned char *gr_system_sum;
48316 +
48317 +void __init
48318 +grsecurity_init(void)
48319 +{
48320 + int j;
48321 + /* create the per-cpu shared pages */
48322 +
48323 +#ifdef CONFIG_X86
48324 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
48325 +#endif
48326 +
48327 + for (j = 0; j < 4; j++) {
48328 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
48329 + if (gr_shared_page[j] == NULL) {
48330 + panic("Unable to allocate grsecurity shared page");
48331 + return;
48332 + }
48333 + }
48334 +
48335 + /* allocate log buffers */
48336 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
48337 + if (!gr_alert_log_fmt) {
48338 + panic("Unable to allocate grsecurity alert log format buffer");
48339 + return;
48340 + }
48341 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
48342 + if (!gr_audit_log_fmt) {
48343 + panic("Unable to allocate grsecurity audit log format buffer");
48344 + return;
48345 + }
48346 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
48347 + if (!gr_alert_log_buf) {
48348 + panic("Unable to allocate grsecurity alert log buffer");
48349 + return;
48350 + }
48351 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
48352 + if (!gr_audit_log_buf) {
48353 + panic("Unable to allocate grsecurity audit log buffer");
48354 + return;
48355 + }
48356 +
48357 + /* allocate memory for authentication structure */
48358 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
48359 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
48360 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
48361 +
48362 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
48363 + panic("Unable to allocate grsecurity authentication structure");
48364 + return;
48365 + }
48366 +
48367 +
48368 +#ifdef CONFIG_GRKERNSEC_IO
48369 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
48370 + grsec_disable_privio = 1;
48371 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
48372 + grsec_disable_privio = 1;
48373 +#else
48374 + grsec_disable_privio = 0;
48375 +#endif
48376 +#endif
48377 +
48378 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
48379 + /* for backward compatibility, tpe_invert always defaults to on if
48380 + enabled in the kernel
48381 + */
48382 + grsec_enable_tpe_invert = 1;
48383 +#endif
48384 +
48385 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
48386 +#ifndef CONFIG_GRKERNSEC_SYSCTL
48387 + grsec_lock = 1;
48388 +#endif
48389 +
48390 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
48391 + grsec_enable_audit_textrel = 1;
48392 +#endif
48393 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48394 + grsec_enable_log_rwxmaps = 1;
48395 +#endif
48396 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
48397 + grsec_enable_group = 1;
48398 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
48399 +#endif
48400 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
48401 + grsec_enable_chdir = 1;
48402 +#endif
48403 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
48404 + grsec_enable_harden_ptrace = 1;
48405 +#endif
48406 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48407 + grsec_enable_mount = 1;
48408 +#endif
48409 +#ifdef CONFIG_GRKERNSEC_LINK
48410 + grsec_enable_link = 1;
48411 +#endif
48412 +#ifdef CONFIG_GRKERNSEC_BRUTE
48413 + grsec_enable_brute = 1;
48414 +#endif
48415 +#ifdef CONFIG_GRKERNSEC_DMESG
48416 + grsec_enable_dmesg = 1;
48417 +#endif
48418 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
48419 + grsec_enable_blackhole = 1;
48420 + grsec_lastack_retries = 4;
48421 +#endif
48422 +#ifdef CONFIG_GRKERNSEC_FIFO
48423 + grsec_enable_fifo = 1;
48424 +#endif
48425 +#ifdef CONFIG_GRKERNSEC_EXECVE
48426 + grsec_enable_execve = 1;
48427 +#endif
48428 +#ifdef CONFIG_GRKERNSEC_EXECLOG
48429 + grsec_enable_execlog = 1;
48430 +#endif
48431 +#ifdef CONFIG_GRKERNSEC_SIGNAL
48432 + grsec_enable_signal = 1;
48433 +#endif
48434 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
48435 + grsec_enable_forkfail = 1;
48436 +#endif
48437 +#ifdef CONFIG_GRKERNSEC_TIME
48438 + grsec_enable_time = 1;
48439 +#endif
48440 +#ifdef CONFIG_GRKERNSEC_RESLOG
48441 + grsec_resource_logging = 1;
48442 +#endif
48443 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
48444 + grsec_enable_chroot_findtask = 1;
48445 +#endif
48446 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
48447 + grsec_enable_chroot_unix = 1;
48448 +#endif
48449 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
48450 + grsec_enable_chroot_mount = 1;
48451 +#endif
48452 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
48453 + grsec_enable_chroot_fchdir = 1;
48454 +#endif
48455 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
48456 + grsec_enable_chroot_shmat = 1;
48457 +#endif
48458 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
48459 + grsec_enable_audit_ptrace = 1;
48460 +#endif
48461 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
48462 + grsec_enable_chroot_double = 1;
48463 +#endif
48464 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
48465 + grsec_enable_chroot_pivot = 1;
48466 +#endif
48467 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
48468 + grsec_enable_chroot_chdir = 1;
48469 +#endif
48470 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
48471 + grsec_enable_chroot_chmod = 1;
48472 +#endif
48473 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
48474 + grsec_enable_chroot_mknod = 1;
48475 +#endif
48476 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
48477 + grsec_enable_chroot_nice = 1;
48478 +#endif
48479 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
48480 + grsec_enable_chroot_execlog = 1;
48481 +#endif
48482 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48483 + grsec_enable_chroot_caps = 1;
48484 +#endif
48485 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
48486 + grsec_enable_chroot_sysctl = 1;
48487 +#endif
48488 +#ifdef CONFIG_GRKERNSEC_TPE
48489 + grsec_enable_tpe = 1;
48490 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
48491 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
48492 + grsec_enable_tpe_all = 1;
48493 +#endif
48494 +#endif
48495 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
48496 + grsec_enable_socket_all = 1;
48497 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
48498 +#endif
48499 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
48500 + grsec_enable_socket_client = 1;
48501 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
48502 +#endif
48503 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
48504 + grsec_enable_socket_server = 1;
48505 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
48506 +#endif
48507 +#endif
48508 +
48509 + return;
48510 +}
48511 diff -urNp linux-2.6.39.4/grsecurity/grsec_link.c linux-2.6.39.4/grsecurity/grsec_link.c
48512 --- linux-2.6.39.4/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
48513 +++ linux-2.6.39.4/grsecurity/grsec_link.c 2011-08-05 19:44:37.000000000 -0400
48514 @@ -0,0 +1,43 @@
48515 +#include <linux/kernel.h>
48516 +#include <linux/sched.h>
48517 +#include <linux/fs.h>
48518 +#include <linux/file.h>
48519 +#include <linux/grinternal.h>
48520 +
48521 +int
48522 +gr_handle_follow_link(const struct inode *parent,
48523 + const struct inode *inode,
48524 + const struct dentry *dentry, const struct vfsmount *mnt)
48525 +{
48526 +#ifdef CONFIG_GRKERNSEC_LINK
48527 + const struct cred *cred = current_cred();
48528 +
48529 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
48530 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
48531 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
48532 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
48533 + return -EACCES;
48534 + }
48535 +#endif
48536 + return 0;
48537 +}
48538 +
48539 +int
48540 +gr_handle_hardlink(const struct dentry *dentry,
48541 + const struct vfsmount *mnt,
48542 + struct inode *inode, const int mode, const char *to)
48543 +{
48544 +#ifdef CONFIG_GRKERNSEC_LINK
48545 + const struct cred *cred = current_cred();
48546 +
48547 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
48548 + (!S_ISREG(mode) || (mode & S_ISUID) ||
48549 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
48550 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
48551 + !capable(CAP_FOWNER) && cred->uid) {
48552 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
48553 + return -EPERM;
48554 + }
48555 +#endif
48556 + return 0;
48557 +}
48558 diff -urNp linux-2.6.39.4/grsecurity/grsec_log.c linux-2.6.39.4/grsecurity/grsec_log.c
48559 --- linux-2.6.39.4/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
48560 +++ linux-2.6.39.4/grsecurity/grsec_log.c 2011-08-05 19:44:37.000000000 -0400
48561 @@ -0,0 +1,310 @@
48562 +#include <linux/kernel.h>
48563 +#include <linux/sched.h>
48564 +#include <linux/file.h>
48565 +#include <linux/tty.h>
48566 +#include <linux/fs.h>
48567 +#include <linux/grinternal.h>
48568 +
48569 +#ifdef CONFIG_TREE_PREEMPT_RCU
48570 +#define DISABLE_PREEMPT() preempt_disable()
48571 +#define ENABLE_PREEMPT() preempt_enable()
48572 +#else
48573 +#define DISABLE_PREEMPT()
48574 +#define ENABLE_PREEMPT()
48575 +#endif
48576 +
48577 +#define BEGIN_LOCKS(x) \
48578 + DISABLE_PREEMPT(); \
48579 + rcu_read_lock(); \
48580 + read_lock(&tasklist_lock); \
48581 + read_lock(&grsec_exec_file_lock); \
48582 + if (x != GR_DO_AUDIT) \
48583 + spin_lock(&grsec_alert_lock); \
48584 + else \
48585 + spin_lock(&grsec_audit_lock)
48586 +
48587 +#define END_LOCKS(x) \
48588 + if (x != GR_DO_AUDIT) \
48589 + spin_unlock(&grsec_alert_lock); \
48590 + else \
48591 + spin_unlock(&grsec_audit_lock); \
48592 + read_unlock(&grsec_exec_file_lock); \
48593 + read_unlock(&tasklist_lock); \
48594 + rcu_read_unlock(); \
48595 + ENABLE_PREEMPT(); \
48596 + if (x == GR_DONT_AUDIT) \
48597 + gr_handle_alertkill(current)
48598 +
48599 +enum {
48600 + FLOODING,
48601 + NO_FLOODING
48602 +};
48603 +
48604 +extern char *gr_alert_log_fmt;
48605 +extern char *gr_audit_log_fmt;
48606 +extern char *gr_alert_log_buf;
48607 +extern char *gr_audit_log_buf;
48608 +
48609 +static int gr_log_start(int audit)
48610 +{
48611 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
48612 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
48613 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48614 +
48615 + if (audit == GR_DO_AUDIT)
48616 + goto set_fmt;
48617 +
48618 + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
48619 + grsec_alert_wtime = jiffies;
48620 + grsec_alert_fyet = 0;
48621 + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
48622 + grsec_alert_fyet++;
48623 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
48624 + grsec_alert_wtime = jiffies;
48625 + grsec_alert_fyet++;
48626 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
48627 + return FLOODING;
48628 + } else return FLOODING;
48629 +
48630 +set_fmt:
48631 + memset(buf, 0, PAGE_SIZE);
48632 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
48633 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
48634 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
48635 + } else if (current->signal->curr_ip) {
48636 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
48637 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
48638 + } else if (gr_acl_is_enabled()) {
48639 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
48640 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
48641 + } else {
48642 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
48643 + strcpy(buf, fmt);
48644 + }
48645 +
48646 + return NO_FLOODING;
48647 +}
48648 +
48649 +static void gr_log_middle(int audit, const char *msg, va_list ap)
48650 + __attribute__ ((format (printf, 2, 0)));
48651 +
48652 +static void gr_log_middle(int audit, const char *msg, va_list ap)
48653 +{
48654 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48655 + unsigned int len = strlen(buf);
48656 +
48657 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
48658 +
48659 + return;
48660 +}
48661 +
48662 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
48663 + __attribute__ ((format (printf, 2, 3)));
48664 +
48665 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
48666 +{
48667 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48668 + unsigned int len = strlen(buf);
48669 + va_list ap;
48670 +
48671 + va_start(ap, msg);
48672 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
48673 + va_end(ap);
48674 +
48675 + return;
48676 +}
48677 +
48678 +static void gr_log_end(int audit)
48679 +{
48680 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48681 + unsigned int len = strlen(buf);
48682 +
48683 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
48684 + printk("%s\n", buf);
48685 +
48686 + return;
48687 +}
48688 +
48689 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
48690 +{
48691 + int logtype;
48692 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
48693 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
48694 + void *voidptr = NULL;
48695 + int num1 = 0, num2 = 0;
48696 + unsigned long ulong1 = 0, ulong2 = 0;
48697 + struct dentry *dentry = NULL;
48698 + struct vfsmount *mnt = NULL;
48699 + struct file *file = NULL;
48700 + struct task_struct *task = NULL;
48701 + const struct cred *cred, *pcred;
48702 + va_list ap;
48703 +
48704 + BEGIN_LOCKS(audit);
48705 + logtype = gr_log_start(audit);
48706 + if (logtype == FLOODING) {
48707 + END_LOCKS(audit);
48708 + return;
48709 + }
48710 + va_start(ap, argtypes);
48711 + switch (argtypes) {
48712 + case GR_TTYSNIFF:
48713 + task = va_arg(ap, struct task_struct *);
48714 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
48715 + break;
48716 + case GR_SYSCTL_HIDDEN:
48717 + str1 = va_arg(ap, char *);
48718 + gr_log_middle_varargs(audit, msg, result, str1);
48719 + break;
48720 + case GR_RBAC:
48721 + dentry = va_arg(ap, struct dentry *);
48722 + mnt = va_arg(ap, struct vfsmount *);
48723 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
48724 + break;
48725 + case GR_RBAC_STR:
48726 + dentry = va_arg(ap, struct dentry *);
48727 + mnt = va_arg(ap, struct vfsmount *);
48728 + str1 = va_arg(ap, char *);
48729 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
48730 + break;
48731 + case GR_STR_RBAC:
48732 + str1 = va_arg(ap, char *);
48733 + dentry = va_arg(ap, struct dentry *);
48734 + mnt = va_arg(ap, struct vfsmount *);
48735 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
48736 + break;
48737 + case GR_RBAC_MODE2:
48738 + dentry = va_arg(ap, struct dentry *);
48739 + mnt = va_arg(ap, struct vfsmount *);
48740 + str1 = va_arg(ap, char *);
48741 + str2 = va_arg(ap, char *);
48742 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
48743 + break;
48744 + case GR_RBAC_MODE3:
48745 + dentry = va_arg(ap, struct dentry *);
48746 + mnt = va_arg(ap, struct vfsmount *);
48747 + str1 = va_arg(ap, char *);
48748 + str2 = va_arg(ap, char *);
48749 + str3 = va_arg(ap, char *);
48750 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
48751 + break;
48752 + case GR_FILENAME:
48753 + dentry = va_arg(ap, struct dentry *);
48754 + mnt = va_arg(ap, struct vfsmount *);
48755 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
48756 + break;
48757 + case GR_STR_FILENAME:
48758 + str1 = va_arg(ap, char *);
48759 + dentry = va_arg(ap, struct dentry *);
48760 + mnt = va_arg(ap, struct vfsmount *);
48761 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
48762 + break;
48763 + case GR_FILENAME_STR:
48764 + dentry = va_arg(ap, struct dentry *);
48765 + mnt = va_arg(ap, struct vfsmount *);
48766 + str1 = va_arg(ap, char *);
48767 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
48768 + break;
48769 + case GR_FILENAME_TWO_INT:
48770 + dentry = va_arg(ap, struct dentry *);
48771 + mnt = va_arg(ap, struct vfsmount *);
48772 + num1 = va_arg(ap, int);
48773 + num2 = va_arg(ap, int);
48774 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
48775 + break;
48776 + case GR_FILENAME_TWO_INT_STR:
48777 + dentry = va_arg(ap, struct dentry *);
48778 + mnt = va_arg(ap, struct vfsmount *);
48779 + num1 = va_arg(ap, int);
48780 + num2 = va_arg(ap, int);
48781 + str1 = va_arg(ap, char *);
48782 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
48783 + break;
48784 + case GR_TEXTREL:
48785 + file = va_arg(ap, struct file *);
48786 + ulong1 = va_arg(ap, unsigned long);
48787 + ulong2 = va_arg(ap, unsigned long);
48788 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
48789 + break;
48790 + case GR_PTRACE:
48791 + task = va_arg(ap, struct task_struct *);
48792 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
48793 + break;
48794 + case GR_RESOURCE:
48795 + task = va_arg(ap, struct task_struct *);
48796 + cred = __task_cred(task);
48797 + pcred = __task_cred(task->real_parent);
48798 + ulong1 = va_arg(ap, unsigned long);
48799 + str1 = va_arg(ap, char *);
48800 + ulong2 = va_arg(ap, unsigned long);
48801 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48802 + break;
48803 + case GR_CAP:
48804 + task = va_arg(ap, struct task_struct *);
48805 + cred = __task_cred(task);
48806 + pcred = __task_cred(task->real_parent);
48807 + str1 = va_arg(ap, char *);
48808 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48809 + break;
48810 + case GR_SIG:
48811 + str1 = va_arg(ap, char *);
48812 + voidptr = va_arg(ap, void *);
48813 + gr_log_middle_varargs(audit, msg, str1, voidptr);
48814 + break;
48815 + case GR_SIG2:
48816 + task = va_arg(ap, struct task_struct *);
48817 + cred = __task_cred(task);
48818 + pcred = __task_cred(task->real_parent);
48819 + num1 = va_arg(ap, int);
48820 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48821 + break;
48822 + case GR_CRASH1:
48823 + task = va_arg(ap, struct task_struct *);
48824 + cred = __task_cred(task);
48825 + pcred = __task_cred(task->real_parent);
48826 + ulong1 = va_arg(ap, unsigned long);
48827 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
48828 + break;
48829 + case GR_CRASH2:
48830 + task = va_arg(ap, struct task_struct *);
48831 + cred = __task_cred(task);
48832 + pcred = __task_cred(task->real_parent);
48833 + ulong1 = va_arg(ap, unsigned long);
48834 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
48835 + break;
48836 + case GR_RWXMAP:
48837 + file = va_arg(ap, struct file *);
48838 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
48839 + break;
48840 + case GR_PSACCT:
48841 + {
48842 + unsigned int wday, cday;
48843 + __u8 whr, chr;
48844 + __u8 wmin, cmin;
48845 + __u8 wsec, csec;
48846 + char cur_tty[64] = { 0 };
48847 + char parent_tty[64] = { 0 };
48848 +
48849 + task = va_arg(ap, struct task_struct *);
48850 + wday = va_arg(ap, unsigned int);
48851 + cday = va_arg(ap, unsigned int);
48852 + whr = va_arg(ap, int);
48853 + chr = va_arg(ap, int);
48854 + wmin = va_arg(ap, int);
48855 + cmin = va_arg(ap, int);
48856 + wsec = va_arg(ap, int);
48857 + csec = va_arg(ap, int);
48858 + ulong1 = va_arg(ap, unsigned long);
48859 + cred = __task_cred(task);
48860 + pcred = __task_cred(task->real_parent);
48861 +
48862 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48863 + }
48864 + break;
48865 + default:
48866 + gr_log_middle(audit, msg, ap);
48867 + }
48868 + va_end(ap);
48869 + gr_log_end(audit);
48870 + END_LOCKS(audit);
48871 +}
48872 diff -urNp linux-2.6.39.4/grsecurity/grsec_mem.c linux-2.6.39.4/grsecurity/grsec_mem.c
48873 --- linux-2.6.39.4/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
48874 +++ linux-2.6.39.4/grsecurity/grsec_mem.c 2011-08-05 19:44:37.000000000 -0400
48875 @@ -0,0 +1,33 @@
48876 +#include <linux/kernel.h>
48877 +#include <linux/sched.h>
48878 +#include <linux/mm.h>
48879 +#include <linux/mman.h>
48880 +#include <linux/grinternal.h>
48881 +
48882 +void
48883 +gr_handle_ioperm(void)
48884 +{
48885 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
48886 + return;
48887 +}
48888 +
48889 +void
48890 +gr_handle_iopl(void)
48891 +{
48892 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
48893 + return;
48894 +}
48895 +
48896 +void
48897 +gr_handle_mem_readwrite(u64 from, u64 to)
48898 +{
48899 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
48900 + return;
48901 +}
48902 +
48903 +void
48904 +gr_handle_vm86(void)
48905 +{
48906 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
48907 + return;
48908 +}
48909 diff -urNp linux-2.6.39.4/grsecurity/grsec_mount.c linux-2.6.39.4/grsecurity/grsec_mount.c
48910 --- linux-2.6.39.4/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
48911 +++ linux-2.6.39.4/grsecurity/grsec_mount.c 2011-08-05 19:44:37.000000000 -0400
48912 @@ -0,0 +1,62 @@
48913 +#include <linux/kernel.h>
48914 +#include <linux/sched.h>
48915 +#include <linux/mount.h>
48916 +#include <linux/grsecurity.h>
48917 +#include <linux/grinternal.h>
48918 +
48919 +void
48920 +gr_log_remount(const char *devname, const int retval)
48921 +{
48922 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48923 + if (grsec_enable_mount && (retval >= 0))
48924 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
48925 +#endif
48926 + return;
48927 +}
48928 +
48929 +void
48930 +gr_log_unmount(const char *devname, const int retval)
48931 +{
48932 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48933 + if (grsec_enable_mount && (retval >= 0))
48934 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
48935 +#endif
48936 + return;
48937 +}
48938 +
48939 +void
48940 +gr_log_mount(const char *from, const char *to, const int retval)
48941 +{
48942 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48943 + if (grsec_enable_mount && (retval >= 0))
48944 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
48945 +#endif
48946 + return;
48947 +}
48948 +
48949 +int
48950 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
48951 +{
48952 +#ifdef CONFIG_GRKERNSEC_ROFS
48953 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
48954 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
48955 + return -EPERM;
48956 + } else
48957 + return 0;
48958 +#endif
48959 + return 0;
48960 +}
48961 +
48962 +int
48963 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
48964 +{
48965 +#ifdef CONFIG_GRKERNSEC_ROFS
48966 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
48967 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
48968 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
48969 + return -EPERM;
48970 + } else
48971 + return 0;
48972 +#endif
48973 + return 0;
48974 +}
48975 diff -urNp linux-2.6.39.4/grsecurity/grsec_pax.c linux-2.6.39.4/grsecurity/grsec_pax.c
48976 --- linux-2.6.39.4/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
48977 +++ linux-2.6.39.4/grsecurity/grsec_pax.c 2011-08-05 19:44:37.000000000 -0400
48978 @@ -0,0 +1,36 @@
48979 +#include <linux/kernel.h>
48980 +#include <linux/sched.h>
48981 +#include <linux/mm.h>
48982 +#include <linux/file.h>
48983 +#include <linux/grinternal.h>
48984 +#include <linux/grsecurity.h>
48985 +
48986 +void
48987 +gr_log_textrel(struct vm_area_struct * vma)
48988 +{
48989 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
48990 + if (grsec_enable_audit_textrel)
48991 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
48992 +#endif
48993 + return;
48994 +}
48995 +
48996 +void
48997 +gr_log_rwxmmap(struct file *file)
48998 +{
48999 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
49000 + if (grsec_enable_log_rwxmaps)
49001 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
49002 +#endif
49003 + return;
49004 +}
49005 +
49006 +void
49007 +gr_log_rwxmprotect(struct file *file)
49008 +{
49009 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
49010 + if (grsec_enable_log_rwxmaps)
49011 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
49012 +#endif
49013 + return;
49014 +}
49015 diff -urNp linux-2.6.39.4/grsecurity/grsec_ptrace.c linux-2.6.39.4/grsecurity/grsec_ptrace.c
49016 --- linux-2.6.39.4/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
49017 +++ linux-2.6.39.4/grsecurity/grsec_ptrace.c 2011-08-05 19:44:37.000000000 -0400
49018 @@ -0,0 +1,14 @@
49019 +#include <linux/kernel.h>
49020 +#include <linux/sched.h>
49021 +#include <linux/grinternal.h>
49022 +#include <linux/grsecurity.h>
49023 +
49024 +void
49025 +gr_audit_ptrace(struct task_struct *task)
49026 +{
49027 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
49028 + if (grsec_enable_audit_ptrace)
49029 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
49030 +#endif
49031 + return;
49032 +}
49033 diff -urNp linux-2.6.39.4/grsecurity/grsec_sig.c linux-2.6.39.4/grsecurity/grsec_sig.c
49034 --- linux-2.6.39.4/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
49035 +++ linux-2.6.39.4/grsecurity/grsec_sig.c 2011-08-05 19:44:37.000000000 -0400
49036 @@ -0,0 +1,206 @@
49037 +#include <linux/kernel.h>
49038 +#include <linux/sched.h>
49039 +#include <linux/delay.h>
49040 +#include <linux/grsecurity.h>
49041 +#include <linux/grinternal.h>
49042 +#include <linux/hardirq.h>
49043 +
49044 +char *signames[] = {
49045 + [SIGSEGV] = "Segmentation fault",
49046 + [SIGILL] = "Illegal instruction",
49047 + [SIGABRT] = "Abort",
49048 + [SIGBUS] = "Invalid alignment/Bus error"
49049 +};
49050 +
49051 +void
49052 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
49053 +{
49054 +#ifdef CONFIG_GRKERNSEC_SIGNAL
49055 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
49056 + (sig == SIGABRT) || (sig == SIGBUS))) {
49057 + if (t->pid == current->pid) {
49058 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
49059 + } else {
49060 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
49061 + }
49062 + }
49063 +#endif
49064 + return;
49065 +}
49066 +
49067 +int
49068 +gr_handle_signal(const struct task_struct *p, const int sig)
49069 +{
49070 +#ifdef CONFIG_GRKERNSEC
49071 + if (current->pid > 1 && gr_check_protected_task(p)) {
49072 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
49073 + return -EPERM;
49074 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
49075 + return -EPERM;
49076 + }
49077 +#endif
49078 + return 0;
49079 +}
49080 +
49081 +#ifdef CONFIG_GRKERNSEC
49082 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
49083 +
49084 +int gr_fake_force_sig(int sig, struct task_struct *t)
49085 +{
49086 + unsigned long int flags;
49087 + int ret, blocked, ignored;
49088 + struct k_sigaction *action;
49089 +
49090 + spin_lock_irqsave(&t->sighand->siglock, flags);
49091 + action = &t->sighand->action[sig-1];
49092 + ignored = action->sa.sa_handler == SIG_IGN;
49093 + blocked = sigismember(&t->blocked, sig);
49094 + if (blocked || ignored) {
49095 + action->sa.sa_handler = SIG_DFL;
49096 + if (blocked) {
49097 + sigdelset(&t->blocked, sig);
49098 + recalc_sigpending_and_wake(t);
49099 + }
49100 + }
49101 + if (action->sa.sa_handler == SIG_DFL)
49102 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
49103 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
49104 +
49105 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
49106 +
49107 + return ret;
49108 +}
49109 +#endif
49110 +
49111 +#ifdef CONFIG_GRKERNSEC_BRUTE
49112 +#define GR_USER_BAN_TIME (15 * 60)
49113 +
49114 +static int __get_dumpable(unsigned long mm_flags)
49115 +{
49116 + int ret;
49117 +
49118 + ret = mm_flags & MMF_DUMPABLE_MASK;
49119 + return (ret >= 2) ? 2 : ret;
49120 +}
49121 +#endif
49122 +
49123 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
49124 +{
49125 +#ifdef CONFIG_GRKERNSEC_BRUTE
49126 + uid_t uid = 0;
49127 +
49128 + if (!grsec_enable_brute)
49129 + return;
49130 +
49131 + rcu_read_lock();
49132 + read_lock(&tasklist_lock);
49133 + read_lock(&grsec_exec_file_lock);
49134 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
49135 + p->real_parent->brute = 1;
49136 + else {
49137 + const struct cred *cred = __task_cred(p), *cred2;
49138 + struct task_struct *tsk, *tsk2;
49139 +
49140 + if (!__get_dumpable(mm_flags) && cred->uid) {
49141 + struct user_struct *user;
49142 +
49143 + uid = cred->uid;
49144 +
49145 + /* this is put upon execution past expiration */
49146 + user = find_user(uid);
49147 + if (user == NULL)
49148 + goto unlock;
49149 + user->banned = 1;
49150 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
49151 + if (user->ban_expires == ~0UL)
49152 + user->ban_expires--;
49153 +
49154 + do_each_thread(tsk2, tsk) {
49155 + cred2 = __task_cred(tsk);
49156 + if (tsk != p && cred2->uid == uid)
49157 + gr_fake_force_sig(SIGKILL, tsk);
49158 + } while_each_thread(tsk2, tsk);
49159 + }
49160 + }
49161 +unlock:
49162 + read_unlock(&grsec_exec_file_lock);
49163 + read_unlock(&tasklist_lock);
49164 + rcu_read_unlock();
49165 +
49166 + if (uid)
49167 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
49168 +
49169 +#endif
49170 + return;
49171 +}
49172 +
49173 +void gr_handle_brute_check(void)
49174 +{
49175 +#ifdef CONFIG_GRKERNSEC_BRUTE
49176 + if (current->brute)
49177 + msleep(30 * 1000);
49178 +#endif
49179 + return;
49180 +}
49181 +
49182 +void gr_handle_kernel_exploit(void)
49183 +{
49184 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
49185 + const struct cred *cred;
49186 + struct task_struct *tsk, *tsk2;
49187 + struct user_struct *user;
49188 + uid_t uid;
49189 +
49190 + if (in_irq() || in_serving_softirq() || in_nmi())
49191 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
49192 +
49193 + uid = current_uid();
49194 +
49195 + if (uid == 0)
49196 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
49197 + else {
49198 + /* kill all the processes of this user, hold a reference
49199 + to their creds struct, and prevent them from creating
49200 + another process until system reset
49201 + */
49202 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
49203 + /* we intentionally leak this ref */
49204 + user = get_uid(current->cred->user);
49205 + if (user) {
49206 + user->banned = 1;
49207 + user->ban_expires = ~0UL;
49208 + }
49209 +
49210 + read_lock(&tasklist_lock);
49211 + do_each_thread(tsk2, tsk) {
49212 + cred = __task_cred(tsk);
49213 + if (cred->uid == uid)
49214 + gr_fake_force_sig(SIGKILL, tsk);
49215 + } while_each_thread(tsk2, tsk);
49216 + read_unlock(&tasklist_lock);
49217 + }
49218 +#endif
49219 +}
49220 +
49221 +int __gr_process_user_ban(struct user_struct *user)
49222 +{
49223 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49224 + if (unlikely(user->banned)) {
49225 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
49226 + user->banned = 0;
49227 + user->ban_expires = 0;
49228 + free_uid(user);
49229 + } else
49230 + return -EPERM;
49231 + }
49232 +#endif
49233 + return 0;
49234 +}
49235 +
49236 +int gr_process_user_ban(void)
49237 +{
49238 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49239 + return __gr_process_user_ban(current->cred->user);
49240 +#endif
49241 + return 0;
49242 +}
49243 diff -urNp linux-2.6.39.4/grsecurity/grsec_sock.c linux-2.6.39.4/grsecurity/grsec_sock.c
49244 --- linux-2.6.39.4/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
49245 +++ linux-2.6.39.4/grsecurity/grsec_sock.c 2011-08-05 19:44:37.000000000 -0400
49246 @@ -0,0 +1,244 @@
49247 +#include <linux/kernel.h>
49248 +#include <linux/module.h>
49249 +#include <linux/sched.h>
49250 +#include <linux/file.h>
49251 +#include <linux/net.h>
49252 +#include <linux/in.h>
49253 +#include <linux/ip.h>
49254 +#include <net/sock.h>
49255 +#include <net/inet_sock.h>
49256 +#include <linux/grsecurity.h>
49257 +#include <linux/grinternal.h>
49258 +#include <linux/gracl.h>
49259 +
49260 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
49261 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
49262 +
49263 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
49264 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
49265 +
49266 +#ifdef CONFIG_UNIX_MODULE
49267 +EXPORT_SYMBOL(gr_acl_handle_unix);
49268 +EXPORT_SYMBOL(gr_acl_handle_mknod);
49269 +EXPORT_SYMBOL(gr_handle_chroot_unix);
49270 +EXPORT_SYMBOL(gr_handle_create);
49271 +#endif
49272 +
49273 +#ifdef CONFIG_GRKERNSEC
49274 +#define gr_conn_table_size 32749
49275 +struct conn_table_entry {
49276 + struct conn_table_entry *next;
49277 + struct signal_struct *sig;
49278 +};
49279 +
49280 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
49281 +DEFINE_SPINLOCK(gr_conn_table_lock);
49282 +
49283 +extern const char * gr_socktype_to_name(unsigned char type);
49284 +extern const char * gr_proto_to_name(unsigned char proto);
49285 +extern const char * gr_sockfamily_to_name(unsigned char family);
49286 +
49287 +static __inline__ int
49288 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
49289 +{
49290 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
49291 +}
49292 +
49293 +static __inline__ int
49294 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
49295 + __u16 sport, __u16 dport)
49296 +{
49297 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
49298 + sig->gr_sport == sport && sig->gr_dport == dport))
49299 + return 1;
49300 + else
49301 + return 0;
49302 +}
49303 +
49304 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
49305 +{
49306 + struct conn_table_entry **match;
49307 + unsigned int index;
49308 +
49309 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
49310 + sig->gr_sport, sig->gr_dport,
49311 + gr_conn_table_size);
49312 +
49313 + newent->sig = sig;
49314 +
49315 + match = &gr_conn_table[index];
49316 + newent->next = *match;
49317 + *match = newent;
49318 +
49319 + return;
49320 +}
49321 +
49322 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
49323 +{
49324 + struct conn_table_entry *match, *last = NULL;
49325 + unsigned int index;
49326 +
49327 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
49328 + sig->gr_sport, sig->gr_dport,
49329 + gr_conn_table_size);
49330 +
49331 + match = gr_conn_table[index];
49332 + while (match && !conn_match(match->sig,
49333 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
49334 + sig->gr_dport)) {
49335 + last = match;
49336 + match = match->next;
49337 + }
49338 +
49339 + if (match) {
49340 + if (last)
49341 + last->next = match->next;
49342 + else
49343 + gr_conn_table[index] = NULL;
49344 + kfree(match);
49345 + }
49346 +
49347 + return;
49348 +}
49349 +
49350 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
49351 + __u16 sport, __u16 dport)
49352 +{
49353 + struct conn_table_entry *match;
49354 + unsigned int index;
49355 +
49356 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
49357 +
49358 + match = gr_conn_table[index];
49359 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
49360 + match = match->next;
49361 +
49362 + if (match)
49363 + return match->sig;
49364 + else
49365 + return NULL;
49366 +}
49367 +
49368 +#endif
49369 +
49370 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
49371 +{
49372 +#ifdef CONFIG_GRKERNSEC
49373 + struct signal_struct *sig = task->signal;
49374 + struct conn_table_entry *newent;
49375 +
49376 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
49377 + if (newent == NULL)
49378 + return;
49379 + /* no bh lock needed since we are called with bh disabled */
49380 + spin_lock(&gr_conn_table_lock);
49381 + gr_del_task_from_ip_table_nolock(sig);
49382 + sig->gr_saddr = inet->inet_rcv_saddr;
49383 + sig->gr_daddr = inet->inet_daddr;
49384 + sig->gr_sport = inet->inet_sport;
49385 + sig->gr_dport = inet->inet_dport;
49386 + gr_add_to_task_ip_table_nolock(sig, newent);
49387 + spin_unlock(&gr_conn_table_lock);
49388 +#endif
49389 + return;
49390 +}
49391 +
49392 +void gr_del_task_from_ip_table(struct task_struct *task)
49393 +{
49394 +#ifdef CONFIG_GRKERNSEC
49395 + spin_lock_bh(&gr_conn_table_lock);
49396 + gr_del_task_from_ip_table_nolock(task->signal);
49397 + spin_unlock_bh(&gr_conn_table_lock);
49398 +#endif
49399 + return;
49400 +}
49401 +
49402 +void
49403 +gr_attach_curr_ip(const struct sock *sk)
49404 +{
49405 +#ifdef CONFIG_GRKERNSEC
49406 + struct signal_struct *p, *set;
49407 + const struct inet_sock *inet = inet_sk(sk);
49408 +
49409 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
49410 + return;
49411 +
49412 + set = current->signal;
49413 +
49414 + spin_lock_bh(&gr_conn_table_lock);
49415 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
49416 + inet->inet_dport, inet->inet_sport);
49417 + if (unlikely(p != NULL)) {
49418 + set->curr_ip = p->curr_ip;
49419 + set->used_accept = 1;
49420 + gr_del_task_from_ip_table_nolock(p);
49421 + spin_unlock_bh(&gr_conn_table_lock);
49422 + return;
49423 + }
49424 + spin_unlock_bh(&gr_conn_table_lock);
49425 +
49426 + set->curr_ip = inet->inet_daddr;
49427 + set->used_accept = 1;
49428 +#endif
49429 + return;
49430 +}
49431 +
49432 +int
49433 +gr_handle_sock_all(const int family, const int type, const int protocol)
49434 +{
49435 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
49436 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
49437 + (family != AF_UNIX)) {
49438 + if (family == AF_INET)
49439 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
49440 + else
49441 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
49442 + return -EACCES;
49443 + }
49444 +#endif
49445 + return 0;
49446 +}
49447 +
49448 +int
49449 +gr_handle_sock_server(const struct sockaddr *sck)
49450 +{
49451 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49452 + if (grsec_enable_socket_server &&
49453 + in_group_p(grsec_socket_server_gid) &&
49454 + sck && (sck->sa_family != AF_UNIX) &&
49455 + (sck->sa_family != AF_LOCAL)) {
49456 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
49457 + return -EACCES;
49458 + }
49459 +#endif
49460 + return 0;
49461 +}
49462 +
49463 +int
49464 +gr_handle_sock_server_other(const struct sock *sck)
49465 +{
49466 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49467 + if (grsec_enable_socket_server &&
49468 + in_group_p(grsec_socket_server_gid) &&
49469 + sck && (sck->sk_family != AF_UNIX) &&
49470 + (sck->sk_family != AF_LOCAL)) {
49471 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
49472 + return -EACCES;
49473 + }
49474 +#endif
49475 + return 0;
49476 +}
49477 +
49478 +int
49479 +gr_handle_sock_client(const struct sockaddr *sck)
49480 +{
49481 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
49482 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
49483 + sck && (sck->sa_family != AF_UNIX) &&
49484 + (sck->sa_family != AF_LOCAL)) {
49485 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
49486 + return -EACCES;
49487 + }
49488 +#endif
49489 + return 0;
49490 +}
49491 diff -urNp linux-2.6.39.4/grsecurity/grsec_sysctl.c linux-2.6.39.4/grsecurity/grsec_sysctl.c
49492 --- linux-2.6.39.4/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
49493 +++ linux-2.6.39.4/grsecurity/grsec_sysctl.c 2011-08-05 19:44:37.000000000 -0400
49494 @@ -0,0 +1,442 @@
49495 +#include <linux/kernel.h>
49496 +#include <linux/sched.h>
49497 +#include <linux/sysctl.h>
49498 +#include <linux/grsecurity.h>
49499 +#include <linux/grinternal.h>
49500 +
49501 +int
49502 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
49503 +{
49504 +#ifdef CONFIG_GRKERNSEC_SYSCTL
49505 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
49506 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
49507 + return -EACCES;
49508 + }
49509 +#endif
49510 + return 0;
49511 +}
49512 +
49513 +#ifdef CONFIG_GRKERNSEC_ROFS
49514 +static int __maybe_unused one = 1;
49515 +#endif
49516 +
49517 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
49518 +struct ctl_table grsecurity_table[] = {
49519 +#ifdef CONFIG_GRKERNSEC_SYSCTL
49520 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
49521 +#ifdef CONFIG_GRKERNSEC_IO
49522 + {
49523 + .procname = "disable_priv_io",
49524 + .data = &grsec_disable_privio,
49525 + .maxlen = sizeof(int),
49526 + .mode = 0600,
49527 + .proc_handler = &proc_dointvec,
49528 + },
49529 +#endif
49530 +#endif
49531 +#ifdef CONFIG_GRKERNSEC_LINK
49532 + {
49533 + .procname = "linking_restrictions",
49534 + .data = &grsec_enable_link,
49535 + .maxlen = sizeof(int),
49536 + .mode = 0600,
49537 + .proc_handler = &proc_dointvec,
49538 + },
49539 +#endif
49540 +#ifdef CONFIG_GRKERNSEC_BRUTE
49541 + {
49542 + .procname = "deter_bruteforce",
49543 + .data = &grsec_enable_brute,
49544 + .maxlen = sizeof(int),
49545 + .mode = 0600,
49546 + .proc_handler = &proc_dointvec,
49547 + },
49548 +#endif
49549 +#ifdef CONFIG_GRKERNSEC_FIFO
49550 + {
49551 + .procname = "fifo_restrictions",
49552 + .data = &grsec_enable_fifo,
49553 + .maxlen = sizeof(int),
49554 + .mode = 0600,
49555 + .proc_handler = &proc_dointvec,
49556 + },
49557 +#endif
49558 +#ifdef CONFIG_GRKERNSEC_EXECVE
49559 + {
49560 + .procname = "execve_limiting",
49561 + .data = &grsec_enable_execve,
49562 + .maxlen = sizeof(int),
49563 + .mode = 0600,
49564 + .proc_handler = &proc_dointvec,
49565 + },
49566 +#endif
49567 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
49568 + {
49569 + .procname = "ip_blackhole",
49570 + .data = &grsec_enable_blackhole,
49571 + .maxlen = sizeof(int),
49572 + .mode = 0600,
49573 + .proc_handler = &proc_dointvec,
49574 + },
49575 + {
49576 + .procname = "lastack_retries",
49577 + .data = &grsec_lastack_retries,
49578 + .maxlen = sizeof(int),
49579 + .mode = 0600,
49580 + .proc_handler = &proc_dointvec,
49581 + },
49582 +#endif
49583 +#ifdef CONFIG_GRKERNSEC_EXECLOG
49584 + {
49585 + .procname = "exec_logging",
49586 + .data = &grsec_enable_execlog,
49587 + .maxlen = sizeof(int),
49588 + .mode = 0600,
49589 + .proc_handler = &proc_dointvec,
49590 + },
49591 +#endif
49592 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
49593 + {
49594 + .procname = "rwxmap_logging",
49595 + .data = &grsec_enable_log_rwxmaps,
49596 + .maxlen = sizeof(int),
49597 + .mode = 0600,
49598 + .proc_handler = &proc_dointvec,
49599 + },
49600 +#endif
49601 +#ifdef CONFIG_GRKERNSEC_SIGNAL
49602 + {
49603 + .procname = "signal_logging",
49604 + .data = &grsec_enable_signal,
49605 + .maxlen = sizeof(int),
49606 + .mode = 0600,
49607 + .proc_handler = &proc_dointvec,
49608 + },
49609 +#endif
49610 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
49611 + {
49612 + .procname = "forkfail_logging",
49613 + .data = &grsec_enable_forkfail,
49614 + .maxlen = sizeof(int),
49615 + .mode = 0600,
49616 + .proc_handler = &proc_dointvec,
49617 + },
49618 +#endif
49619 +#ifdef CONFIG_GRKERNSEC_TIME
49620 + {
49621 + .procname = "timechange_logging",
49622 + .data = &grsec_enable_time,
49623 + .maxlen = sizeof(int),
49624 + .mode = 0600,
49625 + .proc_handler = &proc_dointvec,
49626 + },
49627 +#endif
49628 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
49629 + {
49630 + .procname = "chroot_deny_shmat",
49631 + .data = &grsec_enable_chroot_shmat,
49632 + .maxlen = sizeof(int),
49633 + .mode = 0600,
49634 + .proc_handler = &proc_dointvec,
49635 + },
49636 +#endif
49637 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
49638 + {
49639 + .procname = "chroot_deny_unix",
49640 + .data = &grsec_enable_chroot_unix,
49641 + .maxlen = sizeof(int),
49642 + .mode = 0600,
49643 + .proc_handler = &proc_dointvec,
49644 + },
49645 +#endif
49646 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
49647 + {
49648 + .procname = "chroot_deny_mount",
49649 + .data = &grsec_enable_chroot_mount,
49650 + .maxlen = sizeof(int),
49651 + .mode = 0600,
49652 + .proc_handler = &proc_dointvec,
49653 + },
49654 +#endif
49655 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
49656 + {
49657 + .procname = "chroot_deny_fchdir",
49658 + .data = &grsec_enable_chroot_fchdir,
49659 + .maxlen = sizeof(int),
49660 + .mode = 0600,
49661 + .proc_handler = &proc_dointvec,
49662 + },
49663 +#endif
49664 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
49665 + {
49666 + .procname = "chroot_deny_chroot",
49667 + .data = &grsec_enable_chroot_double,
49668 + .maxlen = sizeof(int),
49669 + .mode = 0600,
49670 + .proc_handler = &proc_dointvec,
49671 + },
49672 +#endif
49673 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
49674 + {
49675 + .procname = "chroot_deny_pivot",
49676 + .data = &grsec_enable_chroot_pivot,
49677 + .maxlen = sizeof(int),
49678 + .mode = 0600,
49679 + .proc_handler = &proc_dointvec,
49680 + },
49681 +#endif
49682 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
49683 + {
49684 + .procname = "chroot_enforce_chdir",
49685 + .data = &grsec_enable_chroot_chdir,
49686 + .maxlen = sizeof(int),
49687 + .mode = 0600,
49688 + .proc_handler = &proc_dointvec,
49689 + },
49690 +#endif
49691 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
49692 + {
49693 + .procname = "chroot_deny_chmod",
49694 + .data = &grsec_enable_chroot_chmod,
49695 + .maxlen = sizeof(int),
49696 + .mode = 0600,
49697 + .proc_handler = &proc_dointvec,
49698 + },
49699 +#endif
49700 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
49701 + {
49702 + .procname = "chroot_deny_mknod",
49703 + .data = &grsec_enable_chroot_mknod,
49704 + .maxlen = sizeof(int),
49705 + .mode = 0600,
49706 + .proc_handler = &proc_dointvec,
49707 + },
49708 +#endif
49709 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49710 + {
49711 + .procname = "chroot_restrict_nice",
49712 + .data = &grsec_enable_chroot_nice,
49713 + .maxlen = sizeof(int),
49714 + .mode = 0600,
49715 + .proc_handler = &proc_dointvec,
49716 + },
49717 +#endif
49718 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
49719 + {
49720 + .procname = "chroot_execlog",
49721 + .data = &grsec_enable_chroot_execlog,
49722 + .maxlen = sizeof(int),
49723 + .mode = 0600,
49724 + .proc_handler = &proc_dointvec,
49725 + },
49726 +#endif
49727 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49728 + {
49729 + .procname = "chroot_caps",
49730 + .data = &grsec_enable_chroot_caps,
49731 + .maxlen = sizeof(int),
49732 + .mode = 0600,
49733 + .proc_handler = &proc_dointvec,
49734 + },
49735 +#endif
49736 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
49737 + {
49738 + .procname = "chroot_deny_sysctl",
49739 + .data = &grsec_enable_chroot_sysctl,
49740 + .maxlen = sizeof(int),
49741 + .mode = 0600,
49742 + .proc_handler = &proc_dointvec,
49743 + },
49744 +#endif
49745 +#ifdef CONFIG_GRKERNSEC_TPE
49746 + {
49747 + .procname = "tpe",
49748 + .data = &grsec_enable_tpe,
49749 + .maxlen = sizeof(int),
49750 + .mode = 0600,
49751 + .proc_handler = &proc_dointvec,
49752 + },
49753 + {
49754 + .procname = "tpe_gid",
49755 + .data = &grsec_tpe_gid,
49756 + .maxlen = sizeof(int),
49757 + .mode = 0600,
49758 + .proc_handler = &proc_dointvec,
49759 + },
49760 +#endif
49761 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
49762 + {
49763 + .procname = "tpe_invert",
49764 + .data = &grsec_enable_tpe_invert,
49765 + .maxlen = sizeof(int),
49766 + .mode = 0600,
49767 + .proc_handler = &proc_dointvec,
49768 + },
49769 +#endif
49770 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
49771 + {
49772 + .procname = "tpe_restrict_all",
49773 + .data = &grsec_enable_tpe_all,
49774 + .maxlen = sizeof(int),
49775 + .mode = 0600,
49776 + .proc_handler = &proc_dointvec,
49777 + },
49778 +#endif
49779 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
49780 + {
49781 + .procname = "socket_all",
49782 + .data = &grsec_enable_socket_all,
49783 + .maxlen = sizeof(int),
49784 + .mode = 0600,
49785 + .proc_handler = &proc_dointvec,
49786 + },
49787 + {
49788 + .procname = "socket_all_gid",
49789 + .data = &grsec_socket_all_gid,
49790 + .maxlen = sizeof(int),
49791 + .mode = 0600,
49792 + .proc_handler = &proc_dointvec,
49793 + },
49794 +#endif
49795 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
49796 + {
49797 + .procname = "socket_client",
49798 + .data = &grsec_enable_socket_client,
49799 + .maxlen = sizeof(int),
49800 + .mode = 0600,
49801 + .proc_handler = &proc_dointvec,
49802 + },
49803 + {
49804 + .procname = "socket_client_gid",
49805 + .data = &grsec_socket_client_gid,
49806 + .maxlen = sizeof(int),
49807 + .mode = 0600,
49808 + .proc_handler = &proc_dointvec,
49809 + },
49810 +#endif
49811 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49812 + {
49813 + .procname = "socket_server",
49814 + .data = &grsec_enable_socket_server,
49815 + .maxlen = sizeof(int),
49816 + .mode = 0600,
49817 + .proc_handler = &proc_dointvec,
49818 + },
49819 + {
49820 + .procname = "socket_server_gid",
49821 + .data = &grsec_socket_server_gid,
49822 + .maxlen = sizeof(int),
49823 + .mode = 0600,
49824 + .proc_handler = &proc_dointvec,
49825 + },
49826 +#endif
49827 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
49828 + {
49829 + .procname = "audit_group",
49830 + .data = &grsec_enable_group,
49831 + .maxlen = sizeof(int),
49832 + .mode = 0600,
49833 + .proc_handler = &proc_dointvec,
49834 + },
49835 + {
49836 + .procname = "audit_gid",
49837 + .data = &grsec_audit_gid,
49838 + .maxlen = sizeof(int),
49839 + .mode = 0600,
49840 + .proc_handler = &proc_dointvec,
49841 + },
49842 +#endif
49843 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
49844 + {
49845 + .procname = "audit_chdir",
49846 + .data = &grsec_enable_chdir,
49847 + .maxlen = sizeof(int),
49848 + .mode = 0600,
49849 + .proc_handler = &proc_dointvec,
49850 + },
49851 +#endif
49852 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
49853 + {
49854 + .procname = "audit_mount",
49855 + .data = &grsec_enable_mount,
49856 + .maxlen = sizeof(int),
49857 + .mode = 0600,
49858 + .proc_handler = &proc_dointvec,
49859 + },
49860 +#endif
49861 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
49862 + {
49863 + .procname = "audit_textrel",
49864 + .data = &grsec_enable_audit_textrel,
49865 + .maxlen = sizeof(int),
49866 + .mode = 0600,
49867 + .proc_handler = &proc_dointvec,
49868 + },
49869 +#endif
49870 +#ifdef CONFIG_GRKERNSEC_DMESG
49871 + {
49872 + .procname = "dmesg",
49873 + .data = &grsec_enable_dmesg,
49874 + .maxlen = sizeof(int),
49875 + .mode = 0600,
49876 + .proc_handler = &proc_dointvec,
49877 + },
49878 +#endif
49879 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49880 + {
49881 + .procname = "chroot_findtask",
49882 + .data = &grsec_enable_chroot_findtask,
49883 + .maxlen = sizeof(int),
49884 + .mode = 0600,
49885 + .proc_handler = &proc_dointvec,
49886 + },
49887 +#endif
49888 +#ifdef CONFIG_GRKERNSEC_RESLOG
49889 + {
49890 + .procname = "resource_logging",
49891 + .data = &grsec_resource_logging,
49892 + .maxlen = sizeof(int),
49893 + .mode = 0600,
49894 + .proc_handler = &proc_dointvec,
49895 + },
49896 +#endif
49897 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
49898 + {
49899 + .procname = "audit_ptrace",
49900 + .data = &grsec_enable_audit_ptrace,
49901 + .maxlen = sizeof(int),
49902 + .mode = 0600,
49903 + .proc_handler = &proc_dointvec,
49904 + },
49905 +#endif
49906 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
49907 + {
49908 + .procname = "harden_ptrace",
49909 + .data = &grsec_enable_harden_ptrace,
49910 + .maxlen = sizeof(int),
49911 + .mode = 0600,
49912 + .proc_handler = &proc_dointvec,
49913 + },
49914 +#endif
49915 + {
49916 + .procname = "grsec_lock",
49917 + .data = &grsec_lock,
49918 + .maxlen = sizeof(int),
49919 + .mode = 0600,
49920 + .proc_handler = &proc_dointvec,
49921 + },
49922 +#endif
49923 +#ifdef CONFIG_GRKERNSEC_ROFS
49924 + {
49925 + .procname = "romount_protect",
49926 + .data = &grsec_enable_rofs,
49927 + .maxlen = sizeof(int),
49928 + .mode = 0600,
49929 + .proc_handler = &proc_dointvec_minmax,
49930 + .extra1 = &one,
49931 + .extra2 = &one,
49932 + },
49933 +#endif
49934 + { }
49935 +};
49936 +#endif
49937 diff -urNp linux-2.6.39.4/grsecurity/grsec_time.c linux-2.6.39.4/grsecurity/grsec_time.c
49938 --- linux-2.6.39.4/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
49939 +++ linux-2.6.39.4/grsecurity/grsec_time.c 2011-08-05 19:44:37.000000000 -0400
49940 @@ -0,0 +1,16 @@
49941 +#include <linux/kernel.h>
49942 +#include <linux/sched.h>
49943 +#include <linux/grinternal.h>
49944 +#include <linux/module.h>
49945 +
49946 +void
49947 +gr_log_timechange(void)
49948 +{
49949 +#ifdef CONFIG_GRKERNSEC_TIME
49950 + if (grsec_enable_time)
49951 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
49952 +#endif
49953 + return;
49954 +}
49955 +
49956 +EXPORT_SYMBOL(gr_log_timechange);
49957 diff -urNp linux-2.6.39.4/grsecurity/grsec_tpe.c linux-2.6.39.4/grsecurity/grsec_tpe.c
49958 --- linux-2.6.39.4/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
49959 +++ linux-2.6.39.4/grsecurity/grsec_tpe.c 2011-08-05 19:44:37.000000000 -0400
49960 @@ -0,0 +1,39 @@
49961 +#include <linux/kernel.h>
49962 +#include <linux/sched.h>
49963 +#include <linux/file.h>
49964 +#include <linux/fs.h>
49965 +#include <linux/grinternal.h>
49966 +
49967 +extern int gr_acl_tpe_check(void);
49968 +
49969 +int
49970 +gr_tpe_allow(const struct file *file)
49971 +{
49972 +#ifdef CONFIG_GRKERNSEC
49973 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
49974 + const struct cred *cred = current_cred();
49975 +
49976 + if (cred->uid && ((grsec_enable_tpe &&
49977 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
49978 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
49979 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
49980 +#else
49981 + in_group_p(grsec_tpe_gid)
49982 +#endif
49983 + ) || gr_acl_tpe_check()) &&
49984 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
49985 + (inode->i_mode & S_IWOTH))))) {
49986 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
49987 + return 0;
49988 + }
49989 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
49990 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
49991 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
49992 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
49993 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
49994 + return 0;
49995 + }
49996 +#endif
49997 +#endif
49998 + return 1;
49999 +}
50000 diff -urNp linux-2.6.39.4/grsecurity/grsum.c linux-2.6.39.4/grsecurity/grsum.c
50001 --- linux-2.6.39.4/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
50002 +++ linux-2.6.39.4/grsecurity/grsum.c 2011-08-05 19:44:37.000000000 -0400
50003 @@ -0,0 +1,61 @@
50004 +#include <linux/err.h>
50005 +#include <linux/kernel.h>
50006 +#include <linux/sched.h>
50007 +#include <linux/mm.h>
50008 +#include <linux/scatterlist.h>
50009 +#include <linux/crypto.h>
50010 +#include <linux/gracl.h>
50011 +
50012 +
50013 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
50014 +#error "crypto and sha256 must be built into the kernel"
50015 +#endif
50016 +
50017 +int
50018 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
50019 +{
50020 + char *p;
50021 + struct crypto_hash *tfm;
50022 + struct hash_desc desc;
50023 + struct scatterlist sg;
50024 + unsigned char temp_sum[GR_SHA_LEN];
50025 + volatile int retval = 0;
50026 + volatile int dummy = 0;
50027 + unsigned int i;
50028 +
50029 + sg_init_table(&sg, 1);
50030 +
50031 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
50032 + if (IS_ERR(tfm)) {
50033 + /* should never happen, since sha256 should be built in */
50034 + return 1;
50035 + }
50036 +
50037 + desc.tfm = tfm;
50038 + desc.flags = 0;
50039 +
50040 + crypto_hash_init(&desc);
50041 +
50042 + p = salt;
50043 + sg_set_buf(&sg, p, GR_SALT_LEN);
50044 + crypto_hash_update(&desc, &sg, sg.length);
50045 +
50046 + p = entry->pw;
50047 + sg_set_buf(&sg, p, strlen(p));
50048 +
50049 + crypto_hash_update(&desc, &sg, sg.length);
50050 +
50051 + crypto_hash_final(&desc, temp_sum);
50052 +
50053 + memset(entry->pw, 0, GR_PW_LEN);
50054 +
50055 + for (i = 0; i < GR_SHA_LEN; i++)
50056 + if (sum[i] != temp_sum[i])
50057 + retval = 1;
50058 + else
50059 + dummy = 1; // waste a cycle
50060 +
50061 + crypto_free_hash(tfm);
50062 +
50063 + return retval;
50064 +}
50065 diff -urNp linux-2.6.39.4/grsecurity/Kconfig linux-2.6.39.4/grsecurity/Kconfig
50066 --- linux-2.6.39.4/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
50067 +++ linux-2.6.39.4/grsecurity/Kconfig 2011-08-17 19:04:52.000000000 -0400
50068 @@ -0,0 +1,1050 @@
50069 +#
50070 +# grecurity configuration
50071 +#
50072 +
50073 +menu "Grsecurity"
50074 +
50075 +config GRKERNSEC
50076 + bool "Grsecurity"
50077 + select CRYPTO
50078 + select CRYPTO_SHA256
50079 + help
50080 + If you say Y here, you will be able to configure many features
50081 + that will enhance the security of your system. It is highly
50082 + recommended that you say Y here and read through the help
50083 + for each option so that you fully understand the features and
50084 + can evaluate their usefulness for your machine.
50085 +
50086 +choice
50087 + prompt "Security Level"
50088 + depends on GRKERNSEC
50089 + default GRKERNSEC_CUSTOM
50090 +
50091 +config GRKERNSEC_LOW
50092 + bool "Low"
50093 + select GRKERNSEC_LINK
50094 + select GRKERNSEC_FIFO
50095 + select GRKERNSEC_EXECVE
50096 + select GRKERNSEC_RANDNET
50097 + select GRKERNSEC_DMESG
50098 + select GRKERNSEC_CHROOT
50099 + select GRKERNSEC_CHROOT_CHDIR
50100 +
50101 + help
50102 + If you choose this option, several of the grsecurity options will
50103 + be enabled that will give you greater protection against a number
50104 + of attacks, while assuring that none of your software will have any
50105 + conflicts with the additional security measures. If you run a lot
50106 + of unusual software, or you are having problems with the higher
50107 + security levels, you should say Y here. With this option, the
50108 + following features are enabled:
50109 +
50110 + - Linking restrictions
50111 + - FIFO restrictions
50112 + - Enforcing RLIMIT_NPROC on execve
50113 + - Restricted dmesg
50114 + - Enforced chdir("/") on chroot
50115 + - Runtime module disabling
50116 +
50117 +config GRKERNSEC_MEDIUM
50118 + bool "Medium"
50119 + select PAX
50120 + select PAX_EI_PAX
50121 + select PAX_PT_PAX_FLAGS
50122 + select PAX_HAVE_ACL_FLAGS
50123 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
50124 + select GRKERNSEC_CHROOT
50125 + select GRKERNSEC_CHROOT_SYSCTL
50126 + select GRKERNSEC_LINK
50127 + select GRKERNSEC_FIFO
50128 + select GRKERNSEC_EXECVE
50129 + select GRKERNSEC_DMESG
50130 + select GRKERNSEC_RANDNET
50131 + select GRKERNSEC_FORKFAIL
50132 + select GRKERNSEC_TIME
50133 + select GRKERNSEC_SIGNAL
50134 + select GRKERNSEC_CHROOT
50135 + select GRKERNSEC_CHROOT_UNIX
50136 + select GRKERNSEC_CHROOT_MOUNT
50137 + select GRKERNSEC_CHROOT_PIVOT
50138 + select GRKERNSEC_CHROOT_DOUBLE
50139 + select GRKERNSEC_CHROOT_CHDIR
50140 + select GRKERNSEC_CHROOT_MKNOD
50141 + select GRKERNSEC_PROC
50142 + select GRKERNSEC_PROC_USERGROUP
50143 + select PAX_RANDUSTACK
50144 + select PAX_ASLR
50145 + select PAX_RANDMMAP
50146 + select PAX_REFCOUNT if (X86 || SPARC64)
50147 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
50148 +
50149 + help
50150 + If you say Y here, several features in addition to those included
50151 + in the low additional security level will be enabled. These
50152 + features provide even more security to your system, though in rare
50153 + cases they may be incompatible with very old or poorly written
50154 + software. If you enable this option, make sure that your auth
50155 + service (identd) is running as gid 1001. With this option,
50156 + the following features (in addition to those provided in the
50157 + low additional security level) will be enabled:
50158 +
50159 + - Failed fork logging
50160 + - Time change logging
50161 + - Signal logging
50162 + - Deny mounts in chroot
50163 + - Deny double chrooting
50164 + - Deny sysctl writes in chroot
50165 + - Deny mknod in chroot
50166 + - Deny access to abstract AF_UNIX sockets out of chroot
50167 + - Deny pivot_root in chroot
50168 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
50169 + - /proc restrictions with special GID set to 10 (usually wheel)
50170 + - Address Space Layout Randomization (ASLR)
50171 + - Prevent exploitation of most refcount overflows
50172 + - Bounds checking of copying between the kernel and userland
50173 +
50174 +config GRKERNSEC_HIGH
50175 + bool "High"
50176 + select GRKERNSEC_LINK
50177 + select GRKERNSEC_FIFO
50178 + select GRKERNSEC_EXECVE
50179 + select GRKERNSEC_DMESG
50180 + select GRKERNSEC_FORKFAIL
50181 + select GRKERNSEC_TIME
50182 + select GRKERNSEC_SIGNAL
50183 + select GRKERNSEC_CHROOT
50184 + select GRKERNSEC_CHROOT_SHMAT
50185 + select GRKERNSEC_CHROOT_UNIX
50186 + select GRKERNSEC_CHROOT_MOUNT
50187 + select GRKERNSEC_CHROOT_FCHDIR
50188 + select GRKERNSEC_CHROOT_PIVOT
50189 + select GRKERNSEC_CHROOT_DOUBLE
50190 + select GRKERNSEC_CHROOT_CHDIR
50191 + select GRKERNSEC_CHROOT_MKNOD
50192 + select GRKERNSEC_CHROOT_CAPS
50193 + select GRKERNSEC_CHROOT_SYSCTL
50194 + select GRKERNSEC_CHROOT_FINDTASK
50195 + select GRKERNSEC_SYSFS_RESTRICT
50196 + select GRKERNSEC_PROC
50197 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
50198 + select GRKERNSEC_HIDESYM
50199 + select GRKERNSEC_BRUTE
50200 + select GRKERNSEC_PROC_USERGROUP
50201 + select GRKERNSEC_KMEM
50202 + select GRKERNSEC_RESLOG
50203 + select GRKERNSEC_RANDNET
50204 + select GRKERNSEC_PROC_ADD
50205 + select GRKERNSEC_CHROOT_CHMOD
50206 + select GRKERNSEC_CHROOT_NICE
50207 + select GRKERNSEC_AUDIT_MOUNT
50208 + select GRKERNSEC_MODHARDEN if (MODULES)
50209 + select GRKERNSEC_HARDEN_PTRACE
50210 + select GRKERNSEC_VM86 if (X86_32)
50211 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
50212 + select PAX
50213 + select PAX_RANDUSTACK
50214 + select PAX_ASLR
50215 + select PAX_RANDMMAP
50216 + select PAX_NOEXEC
50217 + select PAX_MPROTECT
50218 + select PAX_EI_PAX
50219 + select PAX_PT_PAX_FLAGS
50220 + select PAX_HAVE_ACL_FLAGS
50221 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
50222 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
50223 + select PAX_RANDKSTACK if (X86_TSC && X86)
50224 + select PAX_SEGMEXEC if (X86_32)
50225 + select PAX_PAGEEXEC
50226 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
50227 + select PAX_EMUTRAMP if (PARISC)
50228 + select PAX_EMUSIGRT if (PARISC)
50229 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
50230 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
50231 + select PAX_REFCOUNT if (X86 || SPARC64)
50232 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
50233 + help
50234 + If you say Y here, many of the features of grsecurity will be
50235 + enabled, which will protect you against many kinds of attacks
50236 + against your system. The heightened security comes at a cost
50237 + of an increased chance of incompatibilities with rare software
50238 + on your machine. Since this security level enables PaX, you should
50239 + view <http://pax.grsecurity.net> and read about the PaX
50240 + project. While you are there, download chpax and run it on
50241 + binaries that cause problems with PaX. Also remember that
50242 + since the /proc restrictions are enabled, you must run your
50243 + identd as gid 1001. This security level enables the following
50244 + features in addition to those listed in the low and medium
50245 + security levels:
50246 +
50247 + - Additional /proc restrictions
50248 + - Chmod restrictions in chroot
50249 + - No signals, ptrace, or viewing of processes outside of chroot
50250 + - Capability restrictions in chroot
50251 + - Deny fchdir out of chroot
50252 + - Priority restrictions in chroot
50253 + - Segmentation-based implementation of PaX
50254 + - Mprotect restrictions
50255 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
50256 + - Kernel stack randomization
50257 + - Mount/unmount/remount logging
50258 + - Kernel symbol hiding
50259 + - Prevention of memory exhaustion-based exploits
50260 + - Hardening of module auto-loading
50261 + - Ptrace restrictions
50262 + - Restricted vm86 mode
50263 + - Restricted sysfs/debugfs
50264 + - Active kernel exploit response
50265 +
50266 +config GRKERNSEC_CUSTOM
50267 + bool "Custom"
50268 + help
50269 + If you say Y here, you will be able to configure every grsecurity
50270 + option, which allows you to enable many more features that aren't
50271 + covered in the basic security levels. These additional features
50272 + include TPE, socket restrictions, and the sysctl system for
50273 + grsecurity. It is advised that you read through the help for
50274 + each option to determine its usefulness in your situation.
50275 +
50276 +endchoice
50277 +
50278 +menu "Address Space Protection"
50279 +depends on GRKERNSEC
50280 +
50281 +config GRKERNSEC_KMEM
50282 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
50283 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
50284 + help
50285 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
50286 + be written to via mmap or otherwise to modify the running kernel.
50287 + /dev/port will also not be allowed to be opened. If you have module
50288 + support disabled, enabling this will close up four ways that are
50289 + currently used to insert malicious code into the running kernel.
50290 + Even with all these features enabled, we still highly recommend that
50291 + you use the RBAC system, as it is still possible for an attacker to
50292 + modify the running kernel through privileged I/O granted by ioperm/iopl.
50293 + If you are not using XFree86, you may be able to stop this additional
50294 + case by enabling the 'Disable privileged I/O' option. Though nothing
50295 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
50296 + but only to video memory, which is the only writing we allow in this
50297 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
50298 + not be allowed to mprotect it with PROT_WRITE later.
50299 + It is highly recommended that you say Y here if you meet all the
50300 + conditions above.
50301 +
50302 +config GRKERNSEC_VM86
50303 + bool "Restrict VM86 mode"
50304 + depends on X86_32
50305 +
50306 + help
50307 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
50308 + make use of a special execution mode on 32bit x86 processors called
50309 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
50310 + video cards and will still work with this option enabled. The purpose
50311 + of the option is to prevent exploitation of emulation errors in
50312 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
50313 + Nearly all users should be able to enable this option.
50314 +
50315 +config GRKERNSEC_IO
50316 + bool "Disable privileged I/O"
50317 + depends on X86
50318 + select RTC_CLASS
50319 + select RTC_INTF_DEV
50320 + select RTC_DRV_CMOS
50321 +
50322 + help
50323 + If you say Y here, all ioperm and iopl calls will return an error.
50324 + Ioperm and iopl can be used to modify the running kernel.
50325 + Unfortunately, some programs need this access to operate properly,
50326 + the most notable of which are XFree86 and hwclock. hwclock can be
50327 + remedied by having RTC support in the kernel, so real-time
50328 + clock support is enabled if this option is enabled, to ensure
50329 + that hwclock operates correctly. XFree86 still will not
50330 + operate correctly with this option enabled, so DO NOT CHOOSE Y
50331 + IF YOU USE XFree86. If you use XFree86 and you still want to
50332 + protect your kernel against modification, use the RBAC system.
50333 +
50334 +config GRKERNSEC_PROC_MEMMAP
50335 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
50336 + default y if (PAX_NOEXEC || PAX_ASLR)
50337 + depends on PAX_NOEXEC || PAX_ASLR
50338 + help
50339 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
50340 + give no information about the addresses of its mappings if
50341 + PaX features that rely on random addresses are enabled on the task.
50342 + If you use PaX it is greatly recommended that you say Y here as it
50343 + closes up a hole that makes the full ASLR useless for suid
50344 + binaries.
50345 +
50346 +config GRKERNSEC_BRUTE
50347 + bool "Deter exploit bruteforcing"
50348 + help
50349 + If you say Y here, attempts to bruteforce exploits against forking
50350 + daemons such as apache or sshd, as well as against suid/sgid binaries
50351 + will be deterred. When a child of a forking daemon is killed by PaX
50352 + or crashes due to an illegal instruction or other suspicious signal,
50353 + the parent process will be delayed 30 seconds upon every subsequent
50354 + fork until the administrator is able to assess the situation and
50355 + restart the daemon.
50356 + In the suid/sgid case, the attempt is logged, the user has all their
50357 + processes terminated, and they are prevented from executing any further
50358 + processes for 15 minutes.
50359 + It is recommended that you also enable signal logging in the auditing
50360 + section so that logs are generated when a process triggers a suspicious
50361 + signal.
50362 + If the sysctl option is enabled, a sysctl option with name
50363 + "deter_bruteforce" is created.
50364 +
50365 +
50366 +config GRKERNSEC_MODHARDEN
50367 + bool "Harden module auto-loading"
50368 + depends on MODULES
50369 + help
50370 + If you say Y here, module auto-loading in response to use of some
50371 + feature implemented by an unloaded module will be restricted to
50372 + root users. Enabling this option helps defend against attacks
50373 + by unprivileged users who abuse the auto-loading behavior to
50374 + cause a vulnerable module to load that is then exploited.
50375 +
50376 + If this option prevents a legitimate use of auto-loading for a
50377 + non-root user, the administrator can execute modprobe manually
50378 + with the exact name of the module mentioned in the alert log.
50379 + Alternatively, the administrator can add the module to the list
50380 + of modules loaded at boot by modifying init scripts.
50381 +
50382 + Modification of init scripts will most likely be needed on
50383 + Ubuntu servers with encrypted home directory support enabled,
50384 + as the first non-root user logging in will cause the ecb(aes),
50385 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
50386 +
50387 +config GRKERNSEC_HIDESYM
50388 + bool "Hide kernel symbols"
50389 + help
50390 + If you say Y here, getting information on loaded modules, and
50391 + displaying all kernel symbols through a syscall will be restricted
50392 + to users with CAP_SYS_MODULE. For software compatibility reasons,
50393 + /proc/kallsyms will be restricted to the root user. The RBAC
50394 + system can hide that entry even from root.
50395 +
50396 + This option also prevents leaking of kernel addresses through
50397 + several /proc entries.
50398 +
50399 + Note that this option is only effective provided the following
50400 + conditions are met:
50401 + 1) The kernel using grsecurity is not precompiled by some distribution
50402 + 2) You have also enabled GRKERNSEC_DMESG
50403 + 3) You are using the RBAC system and hiding other files such as your
50404 + kernel image and System.map. Alternatively, enabling this option
50405 + causes the permissions on /boot, /lib/modules, and the kernel
50406 + source directory to change at compile time to prevent
50407 + reading by non-root users.
50408 + If the above conditions are met, this option will aid in providing a
50409 + useful protection against local kernel exploitation of overflows
50410 + and arbitrary read/write vulnerabilities.
50411 +
50412 +config GRKERNSEC_KERN_LOCKOUT
50413 + bool "Active kernel exploit response"
50414 + depends on X86 || ARM || PPC || SPARC
50415 + help
50416 + If you say Y here, when a PaX alert is triggered due to suspicious
50417 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
50418 + or an OOPs occurs due to bad memory accesses, instead of just
50419 + terminating the offending process (and potentially allowing
50420 + a subsequent exploit from the same user), we will take one of two
50421 + actions:
50422 + If the user was root, we will panic the system
50423 + If the user was non-root, we will log the attempt, terminate
50424 + all processes owned by the user, then prevent them from creating
50425 + any new processes until the system is restarted
50426 + This deters repeated kernel exploitation/bruteforcing attempts
50427 + and is useful for later forensics.
50428 +
50429 +endmenu
50430 +menu "Role Based Access Control Options"
50431 +depends on GRKERNSEC
50432 +
50433 +config GRKERNSEC_RBAC_DEBUG
50434 + bool
50435 +
50436 +config GRKERNSEC_NO_RBAC
50437 + bool "Disable RBAC system"
50438 + help
50439 + If you say Y here, the /dev/grsec device will be removed from the kernel,
50440 + preventing the RBAC system from being enabled. You should only say Y
50441 + here if you have no intention of using the RBAC system, so as to prevent
50442 + an attacker with root access from misusing the RBAC system to hide files
50443 + and processes when loadable module support and /dev/[k]mem have been
50444 + locked down.
50445 +
50446 +config GRKERNSEC_ACL_HIDEKERN
50447 + bool "Hide kernel processes"
50448 + help
50449 + If you say Y here, all kernel threads will be hidden to all
50450 + processes but those whose subject has the "view hidden processes"
50451 + flag.
50452 +
50453 +config GRKERNSEC_ACL_MAXTRIES
50454 + int "Maximum tries before password lockout"
50455 + default 3
50456 + help
50457 + This option enforces the maximum number of times a user can attempt
50458 + to authorize themselves with the grsecurity RBAC system before being
50459 + denied the ability to attempt authorization again for a specified time.
50460 + The lower the number, the harder it will be to brute-force a password.
50461 +
50462 +config GRKERNSEC_ACL_TIMEOUT
50463 + int "Time to wait after max password tries, in seconds"
50464 + default 30
50465 + help
50466 + This option specifies the time the user must wait after attempting to
50467 + authorize to the RBAC system with the maximum number of invalid
50468 + passwords. The higher the number, the harder it will be to brute-force
50469 + a password.
50470 +
50471 +endmenu
50472 +menu "Filesystem Protections"
50473 +depends on GRKERNSEC
50474 +
50475 +config GRKERNSEC_PROC
50476 + bool "Proc restrictions"
50477 + help
50478 + If you say Y here, the permissions of the /proc filesystem
50479 + will be altered to enhance system security and privacy. You MUST
50480 + choose either a user only restriction or a user and group restriction.
50481 + Depending upon the option you choose, you can either restrict users to
50482 + see only the processes they themselves run, or choose a group that can
50483 + view all processes and files normally restricted to root if you choose
50484 + the "restrict to user only" option. NOTE: If you're running identd as
50485 + a non-root user, you will have to run it as the group you specify here.
50486 +
50487 +config GRKERNSEC_PROC_USER
50488 + bool "Restrict /proc to user only"
50489 + depends on GRKERNSEC_PROC
50490 + help
50491 + If you say Y here, non-root users will only be able to view their own
50492 + processes, and restricts them from viewing network-related information,
50493 + and viewing kernel symbol and module information.
50494 +
50495 +config GRKERNSEC_PROC_USERGROUP
50496 + bool "Allow special group"
50497 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
50498 + help
50499 + If you say Y here, you will be able to select a group that will be
50500 + able to view all processes and network-related information. If you've
50501 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
50502 + remain hidden. This option is useful if you want to run identd as
50503 + a non-root user.
50504 +
50505 +config GRKERNSEC_PROC_GID
50506 + int "GID for special group"
50507 + depends on GRKERNSEC_PROC_USERGROUP
50508 + default 1001
50509 +
50510 +config GRKERNSEC_PROC_ADD
50511 + bool "Additional restrictions"
50512 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
50513 + help
50514 + If you say Y here, additional restrictions will be placed on
50515 + /proc that keep normal users from viewing device information and
50516 + slabinfo information that could be useful for exploits.
50517 +
50518 +config GRKERNSEC_LINK
50519 + bool "Linking restrictions"
50520 + help
50521 + If you say Y here, /tmp race exploits will be prevented, since users
50522 + will no longer be able to follow symlinks owned by other users in
50523 + world-writable +t directories (e.g. /tmp), unless the owner of the
50524 + symlink is the owner of the directory. users will also not be
50525 + able to hardlink to files they do not own. If the sysctl option is
50526 + enabled, a sysctl option with name "linking_restrictions" is created.
50527 +
50528 +config GRKERNSEC_FIFO
50529 + bool "FIFO restrictions"
50530 + help
50531 + If you say Y here, users will not be able to write to FIFOs they don't
50532 + own in world-writable +t directories (e.g. /tmp), unless the owner of
50533 + the FIFO is the same owner of the directory it's held in. If the sysctl
50534 + option is enabled, a sysctl option with name "fifo_restrictions" is
50535 + created.
50536 +
50537 +config GRKERNSEC_SYSFS_RESTRICT
50538 + bool "Sysfs/debugfs restriction"
50539 + depends on SYSFS
50540 + help
50541 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
50542 + any filesystem normally mounted under it (e.g. debugfs) will only
50543 + be accessible by root. These filesystems generally provide access
50544 + to hardware and debug information that isn't appropriate for unprivileged
50545 + users of the system. Sysfs and debugfs have also become a large source
50546 + of new vulnerabilities, ranging from infoleaks to local compromise.
50547 + There has been very little oversight with an eye toward security involved
50548 + in adding new exporters of information to these filesystems, so their
50549 + use is discouraged.
50550 + This option is equivalent to a chmod 0700 of the mount paths.
50551 +
50552 +config GRKERNSEC_ROFS
50553 + bool "Runtime read-only mount protection"
50554 + help
50555 + If you say Y here, a sysctl option with name "romount_protect" will
50556 + be created. By setting this option to 1 at runtime, filesystems
50557 + will be protected in the following ways:
50558 + * No new writable mounts will be allowed
50559 + * Existing read-only mounts won't be able to be remounted read/write
50560 + * Write operations will be denied on all block devices
50561 + This option acts independently of grsec_lock: once it is set to 1,
50562 + it cannot be turned off. Therefore, please be mindful of the resulting
50563 + behavior if this option is enabled in an init script on a read-only
50564 + filesystem. This feature is mainly intended for secure embedded systems.
50565 +
50566 +config GRKERNSEC_CHROOT
50567 + bool "Chroot jail restrictions"
50568 + help
50569 + If you say Y here, you will be able to choose several options that will
50570 + make breaking out of a chrooted jail much more difficult. If you
50571 + encounter no software incompatibilities with the following options, it
50572 + is recommended that you enable each one.
50573 +
50574 +config GRKERNSEC_CHROOT_MOUNT
50575 + bool "Deny mounts"
50576 + depends on GRKERNSEC_CHROOT
50577 + help
50578 + If you say Y here, processes inside a chroot will not be able to
50579 + mount or remount filesystems. If the sysctl option is enabled, a
50580 + sysctl option with name "chroot_deny_mount" is created.
50581 +
50582 +config GRKERNSEC_CHROOT_DOUBLE
50583 + bool "Deny double-chroots"
50584 + depends on GRKERNSEC_CHROOT
50585 + help
50586 + If you say Y here, processes inside a chroot will not be able to chroot
50587 + again outside the chroot. This is a widely used method of breaking
50588 + out of a chroot jail and should not be allowed. If the sysctl
50589 + option is enabled, a sysctl option with name
50590 + "chroot_deny_chroot" is created.
50591 +
50592 +config GRKERNSEC_CHROOT_PIVOT
50593 + bool "Deny pivot_root in chroot"
50594 + depends on GRKERNSEC_CHROOT
50595 + help
50596 + If you say Y here, processes inside a chroot will not be able to use
50597 + a function called pivot_root() that was introduced in Linux 2.3.41. It
50598 + works similar to chroot in that it changes the root filesystem. This
50599 + function could be misused in a chrooted process to attempt to break out
50600 + of the chroot, and therefore should not be allowed. If the sysctl
50601 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
50602 + created.
50603 +
50604 +config GRKERNSEC_CHROOT_CHDIR
50605 + bool "Enforce chdir(\"/\") on all chroots"
50606 + depends on GRKERNSEC_CHROOT
50607 + help
50608 + If you say Y here, the current working directory of all newly-chrooted
50609 + applications will be set to the the root directory of the chroot.
50610 + The man page on chroot(2) states:
50611 + Note that this call does not change the current working
50612 + directory, so that `.' can be outside the tree rooted at
50613 + `/'. In particular, the super-user can escape from a
50614 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
50615 +
50616 + It is recommended that you say Y here, since it's not known to break
50617 + any software. If the sysctl option is enabled, a sysctl option with
50618 + name "chroot_enforce_chdir" is created.
50619 +
50620 +config GRKERNSEC_CHROOT_CHMOD
50621 + bool "Deny (f)chmod +s"
50622 + depends on GRKERNSEC_CHROOT
50623 + help
50624 + If you say Y here, processes inside a chroot will not be able to chmod
50625 + or fchmod files to make them have suid or sgid bits. This protects
50626 + against another published method of breaking a chroot. If the sysctl
50627 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
50628 + created.
50629 +
50630 +config GRKERNSEC_CHROOT_FCHDIR
50631 + bool "Deny fchdir out of chroot"
50632 + depends on GRKERNSEC_CHROOT
50633 + help
50634 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
50635 + to a file descriptor of the chrooting process that points to a directory
50636 + outside the filesystem will be stopped. If the sysctl option
50637 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
50638 +
50639 +config GRKERNSEC_CHROOT_MKNOD
50640 + bool "Deny mknod"
50641 + depends on GRKERNSEC_CHROOT
50642 + help
50643 + If you say Y here, processes inside a chroot will not be allowed to
50644 + mknod. The problem with using mknod inside a chroot is that it
50645 + would allow an attacker to create a device entry that is the same
50646 + as one on the physical root of your system, which could range from
50647 + anything from the console device to a device for your harddrive (which
50648 + they could then use to wipe the drive or steal data). It is recommended
50649 + that you say Y here, unless you run into software incompatibilities.
50650 + If the sysctl option is enabled, a sysctl option with name
50651 + "chroot_deny_mknod" is created.
50652 +
50653 +config GRKERNSEC_CHROOT_SHMAT
50654 + bool "Deny shmat() out of chroot"
50655 + depends on GRKERNSEC_CHROOT
50656 + help
50657 + If you say Y here, processes inside a chroot will not be able to attach
50658 + to shared memory segments that were created outside of the chroot jail.
50659 + It is recommended that you say Y here. If the sysctl option is enabled,
50660 + a sysctl option with name "chroot_deny_shmat" is created.
50661 +
50662 +config GRKERNSEC_CHROOT_UNIX
50663 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
50664 + depends on GRKERNSEC_CHROOT
50665 + help
50666 + If you say Y here, processes inside a chroot will not be able to
50667 + connect to abstract (meaning not belonging to a filesystem) Unix
50668 + domain sockets that were bound outside of a chroot. It is recommended
50669 + that you say Y here. If the sysctl option is enabled, a sysctl option
50670 + with name "chroot_deny_unix" is created.
50671 +
50672 +config GRKERNSEC_CHROOT_FINDTASK
50673 + bool "Protect outside processes"
50674 + depends on GRKERNSEC_CHROOT
50675 + help
50676 + If you say Y here, processes inside a chroot will not be able to
50677 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50678 + getsid, or view any process outside of the chroot. If the sysctl
50679 + option is enabled, a sysctl option with name "chroot_findtask" is
50680 + created.
50681 +
50682 +config GRKERNSEC_CHROOT_NICE
50683 + bool "Restrict priority changes"
50684 + depends on GRKERNSEC_CHROOT
50685 + help
50686 + If you say Y here, processes inside a chroot will not be able to raise
50687 + the priority of processes in the chroot, or alter the priority of
50688 + processes outside the chroot. This provides more security than simply
50689 + removing CAP_SYS_NICE from the process' capability set. If the
50690 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50691 + is created.
50692 +
50693 +config GRKERNSEC_CHROOT_SYSCTL
50694 + bool "Deny sysctl writes"
50695 + depends on GRKERNSEC_CHROOT
50696 + help
50697 + If you say Y here, an attacker in a chroot will not be able to
50698 + write to sysctl entries, either by sysctl(2) or through a /proc
50699 + interface. It is strongly recommended that you say Y here. If the
50700 + sysctl option is enabled, a sysctl option with name
50701 + "chroot_deny_sysctl" is created.
50702 +
50703 +config GRKERNSEC_CHROOT_CAPS
50704 + bool "Capability restrictions"
50705 + depends on GRKERNSEC_CHROOT
50706 + help
50707 + If you say Y here, the capabilities on all root processes within a
50708 + chroot jail will be lowered to stop module insertion, raw i/o,
50709 + system and net admin tasks, rebooting the system, modifying immutable
50710 + files, modifying IPC owned by another, and changing the system time.
50711 + This is left an option because it can break some apps. Disable this
50712 + if your chrooted apps are having problems performing those kinds of
50713 + tasks. If the sysctl option is enabled, a sysctl option with
50714 + name "chroot_caps" is created.
50715 +
50716 +endmenu
50717 +menu "Kernel Auditing"
50718 +depends on GRKERNSEC
50719 +
50720 +config GRKERNSEC_AUDIT_GROUP
50721 + bool "Single group for auditing"
50722 + help
50723 + If you say Y here, the exec, chdir, and (un)mount logging features
50724 + will only operate on a group you specify. This option is recommended
50725 + if you only want to watch certain users instead of having a large
50726 + amount of logs from the entire system. If the sysctl option is enabled,
50727 + a sysctl option with name "audit_group" is created.
50728 +
50729 +config GRKERNSEC_AUDIT_GID
50730 + int "GID for auditing"
50731 + depends on GRKERNSEC_AUDIT_GROUP
50732 + default 1007
50733 +
50734 +config GRKERNSEC_EXECLOG
50735 + bool "Exec logging"
50736 + help
50737 + If you say Y here, all execve() calls will be logged (since the
50738 + other exec*() calls are frontends to execve(), all execution
50739 + will be logged). Useful for shell-servers that like to keep track
50740 + of their users. If the sysctl option is enabled, a sysctl option with
50741 + name "exec_logging" is created.
50742 + WARNING: This option when enabled will produce a LOT of logs, especially
50743 + on an active system.
50744 +
50745 +config GRKERNSEC_RESLOG
50746 + bool "Resource logging"
50747 + help
50748 + If you say Y here, all attempts to overstep resource limits will
50749 + be logged with the resource name, the requested size, and the current
50750 + limit. It is highly recommended that you say Y here. If the sysctl
50751 + option is enabled, a sysctl option with name "resource_logging" is
50752 + created. If the RBAC system is enabled, the sysctl value is ignored.
50753 +
50754 +config GRKERNSEC_CHROOT_EXECLOG
50755 + bool "Log execs within chroot"
50756 + help
50757 + If you say Y here, all executions inside a chroot jail will be logged
50758 + to syslog. This can cause a large amount of logs if certain
50759 + applications (eg. djb's daemontools) are installed on the system, and
50760 + is therefore left as an option. If the sysctl option is enabled, a
50761 + sysctl option with name "chroot_execlog" is created.
50762 +
50763 +config GRKERNSEC_AUDIT_PTRACE
50764 + bool "Ptrace logging"
50765 + help
50766 + If you say Y here, all attempts to attach to a process via ptrace
50767 + will be logged. If the sysctl option is enabled, a sysctl option
50768 + with name "audit_ptrace" is created.
50769 +
50770 +config GRKERNSEC_AUDIT_CHDIR
50771 + bool "Chdir logging"
50772 + help
50773 + If you say Y here, all chdir() calls will be logged. If the sysctl
50774 + option is enabled, a sysctl option with name "audit_chdir" is created.
50775 +
50776 +config GRKERNSEC_AUDIT_MOUNT
50777 + bool "(Un)Mount logging"
50778 + help
50779 + If you say Y here, all mounts and unmounts will be logged. If the
50780 + sysctl option is enabled, a sysctl option with name "audit_mount" is
50781 + created.
50782 +
50783 +config GRKERNSEC_SIGNAL
50784 + bool "Signal logging"
50785 + help
50786 + If you say Y here, certain important signals will be logged, such as
50787 + SIGSEGV, which will as a result inform you of when a error in a program
50788 + occurred, which in some cases could mean a possible exploit attempt.
50789 + If the sysctl option is enabled, a sysctl option with name
50790 + "signal_logging" is created.
50791 +
50792 +config GRKERNSEC_FORKFAIL
50793 + bool "Fork failure logging"
50794 + help
50795 + If you say Y here, all failed fork() attempts will be logged.
50796 + This could suggest a fork bomb, or someone attempting to overstep
50797 + their process limit. If the sysctl option is enabled, a sysctl option
50798 + with name "forkfail_logging" is created.
50799 +
50800 +config GRKERNSEC_TIME
50801 + bool "Time change logging"
50802 + help
50803 + If you say Y here, any changes of the system clock will be logged.
50804 + If the sysctl option is enabled, a sysctl option with name
50805 + "timechange_logging" is created.
50806 +
50807 +config GRKERNSEC_PROC_IPADDR
50808 + bool "/proc/<pid>/ipaddr support"
50809 + help
50810 + If you say Y here, a new entry will be added to each /proc/<pid>
50811 + directory that contains the IP address of the person using the task.
50812 + The IP is carried across local TCP and AF_UNIX stream sockets.
50813 + This information can be useful for IDS/IPSes to perform remote response
50814 + to a local attack. The entry is readable by only the owner of the
50815 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
50816 + the RBAC system), and thus does not create privacy concerns.
50817 +
50818 +config GRKERNSEC_RWXMAP_LOG
50819 + bool 'Denied RWX mmap/mprotect logging'
50820 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
50821 + help
50822 + If you say Y here, calls to mmap() and mprotect() with explicit
50823 + usage of PROT_WRITE and PROT_EXEC together will be logged when
50824 + denied by the PAX_MPROTECT feature. If the sysctl option is
50825 + enabled, a sysctl option with name "rwxmap_logging" is created.
50826 +
50827 +config GRKERNSEC_AUDIT_TEXTREL
50828 + bool 'ELF text relocations logging (READ HELP)'
50829 + depends on PAX_MPROTECT
50830 + help
50831 + If you say Y here, text relocations will be logged with the filename
50832 + of the offending library or binary. The purpose of the feature is
50833 + to help Linux distribution developers get rid of libraries and
50834 + binaries that need text relocations which hinder the future progress
50835 + of PaX. Only Linux distribution developers should say Y here, and
50836 + never on a production machine, as this option creates an information
50837 + leak that could aid an attacker in defeating the randomization of
50838 + a single memory region. If the sysctl option is enabled, a sysctl
50839 + option with name "audit_textrel" is created.
50840 +
50841 +endmenu
50842 +
50843 +menu "Executable Protections"
50844 +depends on GRKERNSEC
50845 +
50846 +config GRKERNSEC_EXECVE
50847 + bool "Enforce RLIMIT_NPROC on execs"
50848 + help
50849 + If you say Y here, users with a resource limit on processes will
50850 + have the value checked during execve() calls. The current system
50851 + only checks the system limit during fork() calls. If the sysctl option
50852 + is enabled, a sysctl option with name "execve_limiting" is created.
50853 +
50854 +config GRKERNSEC_DMESG
50855 + bool "Dmesg(8) restriction"
50856 + help
50857 + If you say Y here, non-root users will not be able to use dmesg(8)
50858 + to view up to the last 4kb of messages in the kernel's log buffer.
50859 + The kernel's log buffer often contains kernel addresses and other
50860 + identifying information useful to an attacker in fingerprinting a
50861 + system for a targeted exploit.
50862 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
50863 + created.
50864 +
50865 +config GRKERNSEC_HARDEN_PTRACE
50866 + bool "Deter ptrace-based process snooping"
50867 + help
50868 + If you say Y here, TTY sniffers and other malicious monitoring
50869 + programs implemented through ptrace will be defeated. If you
50870 + have been using the RBAC system, this option has already been
50871 + enabled for several years for all users, with the ability to make
50872 + fine-grained exceptions.
50873 +
50874 + This option only affects the ability of non-root users to ptrace
50875 + processes that are not a descendent of the ptracing process.
50876 + This means that strace ./binary and gdb ./binary will still work,
50877 + but attaching to arbitrary processes will not. If the sysctl
50878 + option is enabled, a sysctl option with name "harden_ptrace" is
50879 + created.
50880 +
50881 +config GRKERNSEC_TPE
50882 + bool "Trusted Path Execution (TPE)"
50883 + help
50884 + If you say Y here, you will be able to choose a gid to add to the
50885 + supplementary groups of users you want to mark as "untrusted."
50886 + These users will not be able to execute any files that are not in
50887 + root-owned directories writable only by root. If the sysctl option
50888 + is enabled, a sysctl option with name "tpe" is created.
50889 +
50890 +config GRKERNSEC_TPE_ALL
50891 + bool "Partially restrict all non-root users"
50892 + depends on GRKERNSEC_TPE
50893 + help
50894 + If you say Y here, all non-root users will be covered under
50895 + a weaker TPE restriction. This is separate from, and in addition to,
50896 + the main TPE options that you have selected elsewhere. Thus, if a
50897 + "trusted" GID is chosen, this restriction applies to even that GID.
50898 + Under this restriction, all non-root users will only be allowed to
50899 + execute files in directories they own that are not group or
50900 + world-writable, or in directories owned by root and writable only by
50901 + root. If the sysctl option is enabled, a sysctl option with name
50902 + "tpe_restrict_all" is created.
50903 +
50904 +config GRKERNSEC_TPE_INVERT
50905 + bool "Invert GID option"
50906 + depends on GRKERNSEC_TPE
50907 + help
50908 + If you say Y here, the group you specify in the TPE configuration will
50909 + decide what group TPE restrictions will be *disabled* for. This
50910 + option is useful if you want TPE restrictions to be applied to most
50911 + users on the system. If the sysctl option is enabled, a sysctl option
50912 + with name "tpe_invert" is created. Unlike other sysctl options, this
50913 + entry will default to on for backward-compatibility.
50914 +
50915 +config GRKERNSEC_TPE_GID
50916 + int "GID for untrusted users"
50917 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
50918 + default 1005
50919 + help
50920 + Setting this GID determines what group TPE restrictions will be
50921 + *enabled* for. If the sysctl option is enabled, a sysctl option
50922 + with name "tpe_gid" is created.
50923 +
50924 +config GRKERNSEC_TPE_GID
50925 + int "GID for trusted users"
50926 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
50927 + default 1005
50928 + help
50929 + Setting this GID determines what group TPE restrictions will be
50930 + *disabled* for. If the sysctl option is enabled, a sysctl option
50931 + with name "tpe_gid" is created.
50932 +
50933 +endmenu
50934 +menu "Network Protections"
50935 +depends on GRKERNSEC
50936 +
50937 +config GRKERNSEC_RANDNET
50938 + bool "Larger entropy pools"
50939 + help
50940 + If you say Y here, the entropy pools used for many features of Linux
50941 + and grsecurity will be doubled in size. Since several grsecurity
50942 + features use additional randomness, it is recommended that you say Y
50943 + here. Saying Y here has a similar effect as modifying
50944 + /proc/sys/kernel/random/poolsize.
50945 +
50946 +config GRKERNSEC_BLACKHOLE
50947 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
50948 + depends on NET
50949 + help
50950 + If you say Y here, neither TCP resets nor ICMP
50951 + destination-unreachable packets will be sent in response to packets
50952 + sent to ports for which no associated listening process exists.
50953 + This feature supports both IPV4 and IPV6 and exempts the
50954 + loopback interface from blackholing. Enabling this feature
50955 + makes a host more resilient to DoS attacks and reduces network
50956 + visibility against scanners.
50957 +
50958 + The blackhole feature as-implemented is equivalent to the FreeBSD
50959 + blackhole feature, as it prevents RST responses to all packets, not
50960 + just SYNs. Under most application behavior this causes no
50961 + problems, but applications (like haproxy) may not close certain
50962 + connections in a way that cleanly terminates them on the remote
50963 + end, leaving the remote host in LAST_ACK state. Because of this
50964 + side-effect and to prevent intentional LAST_ACK DoSes, this
50965 + feature also adds automatic mitigation against such attacks.
50966 + The mitigation drastically reduces the amount of time a socket
50967 + can spend in LAST_ACK state. If you're using haproxy and not
50968 + all servers it connects to have this option enabled, consider
50969 + disabling this feature on the haproxy host.
50970 +
50971 + If the sysctl option is enabled, two sysctl options with names
50972 + "ip_blackhole" and "lastack_retries" will be created.
50973 + While "ip_blackhole" takes the standard zero/non-zero on/off
50974 + toggle, "lastack_retries" uses the same kinds of values as
50975 + "tcp_retries1" and "tcp_retries2". The default value of 4
50976 + prevents a socket from lasting more than 45 seconds in LAST_ACK
50977 + state.
50978 +
50979 +config GRKERNSEC_SOCKET
50980 + bool "Socket restrictions"
50981 + depends on NET
50982 + help
50983 + If you say Y here, you will be able to choose from several options.
50984 + If you assign a GID on your system and add it to the supplementary
50985 + groups of users you want to restrict socket access to, this patch
50986 + will perform up to three things, based on the option(s) you choose.
50987 +
50988 +config GRKERNSEC_SOCKET_ALL
50989 + bool "Deny any sockets to group"
50990 + depends on GRKERNSEC_SOCKET
50991 + help
50992 + If you say Y here, you will be able to choose a GID of whose users will
50993 + be unable to connect to other hosts from your machine or run server
50994 + applications from your machine. If the sysctl option is enabled, a
50995 + sysctl option with name "socket_all" is created.
50996 +
50997 +config GRKERNSEC_SOCKET_ALL_GID
50998 + int "GID to deny all sockets for"
50999 + depends on GRKERNSEC_SOCKET_ALL
51000 + default 1004
51001 + help
51002 + Here you can choose the GID to disable socket access for. Remember to
51003 + add the users you want socket access disabled for to the GID
51004 + specified here. If the sysctl option is enabled, a sysctl option
51005 + with name "socket_all_gid" is created.
51006 +
51007 +config GRKERNSEC_SOCKET_CLIENT
51008 + bool "Deny client sockets to group"
51009 + depends on GRKERNSEC_SOCKET
51010 + help
51011 + If you say Y here, you will be able to choose a GID of whose users will
51012 + be unable to connect to other hosts from your machine, but will be
51013 + able to run servers. If this option is enabled, all users in the group
51014 + you specify will have to use passive mode when initiating ftp transfers
51015 + from the shell on your machine. If the sysctl option is enabled, a
51016 + sysctl option with name "socket_client" is created.
51017 +
51018 +config GRKERNSEC_SOCKET_CLIENT_GID
51019 + int "GID to deny client sockets for"
51020 + depends on GRKERNSEC_SOCKET_CLIENT
51021 + default 1003
51022 + help
51023 + Here you can choose the GID to disable client socket access for.
51024 + Remember to add the users you want client socket access disabled for to
51025 + the GID specified here. If the sysctl option is enabled, a sysctl
51026 + option with name "socket_client_gid" is created.
51027 +
51028 +config GRKERNSEC_SOCKET_SERVER
51029 + bool "Deny server sockets to group"
51030 + depends on GRKERNSEC_SOCKET
51031 + help
51032 + If you say Y here, you will be able to choose a GID of whose users will
51033 + be unable to run server applications from your machine. If the sysctl
51034 + option is enabled, a sysctl option with name "socket_server" is created.
51035 +
51036 +config GRKERNSEC_SOCKET_SERVER_GID
51037 + int "GID to deny server sockets for"
51038 + depends on GRKERNSEC_SOCKET_SERVER
51039 + default 1002
51040 + help
51041 + Here you can choose the GID to disable server socket access for.
51042 + Remember to add the users you want server socket access disabled for to
51043 + the GID specified here. If the sysctl option is enabled, a sysctl
51044 + option with name "socket_server_gid" is created.
51045 +
51046 +endmenu
51047 +menu "Sysctl support"
51048 +depends on GRKERNSEC && SYSCTL
51049 +
51050 +config GRKERNSEC_SYSCTL
51051 + bool "Sysctl support"
51052 + help
51053 + If you say Y here, you will be able to change the options that
51054 + grsecurity runs with at bootup, without having to recompile your
51055 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
51056 + to enable (1) or disable (0) various features. All the sysctl entries
51057 + are mutable until the "grsec_lock" entry is set to a non-zero value.
51058 + All features enabled in the kernel configuration are disabled at boot
51059 + if you do not say Y to the "Turn on features by default" option.
51060 + All options should be set at startup, and the grsec_lock entry should
51061 + be set to a non-zero value after all the options are set.
51062 + *THIS IS EXTREMELY IMPORTANT*
51063 +
51064 +config GRKERNSEC_SYSCTL_DISTRO
51065 + bool "Extra sysctl support for distro makers (READ HELP)"
51066 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
51067 + help
51068 + If you say Y here, additional sysctl options will be created
51069 + for features that affect processes running as root. Therefore,
51070 + it is critical when using this option that the grsec_lock entry be
51071 + enabled after boot. Only distros with prebuilt kernel packages
51072 + with this option enabled that can ensure grsec_lock is enabled
51073 + after boot should use this option.
51074 + *Failure to set grsec_lock after boot makes all grsec features
51075 + this option covers useless*
51076 +
51077 + Currently this option creates the following sysctl entries:
51078 + "Disable Privileged I/O": "disable_priv_io"
51079 +
51080 +config GRKERNSEC_SYSCTL_ON
51081 + bool "Turn on features by default"
51082 + depends on GRKERNSEC_SYSCTL
51083 + help
51084 + If you say Y here, instead of having all features enabled in the
51085 + kernel configuration disabled at boot time, the features will be
51086 + enabled at boot time. It is recommended you say Y here unless
51087 + there is some reason you would want all sysctl-tunable features to
51088 + be disabled by default. As mentioned elsewhere, it is important
51089 + to enable the grsec_lock entry once you have finished modifying
51090 + the sysctl entries.
51091 +
51092 +endmenu
51093 +menu "Logging Options"
51094 +depends on GRKERNSEC
51095 +
51096 +config GRKERNSEC_FLOODTIME
51097 + int "Seconds in between log messages (minimum)"
51098 + default 10
51099 + help
51100 + This option allows you to enforce the number of seconds between
51101 + grsecurity log messages. The default should be suitable for most
51102 + people, however, if you choose to change it, choose a value small enough
51103 + to allow informative logs to be produced, but large enough to
51104 + prevent flooding.
51105 +
51106 +config GRKERNSEC_FLOODBURST
51107 + int "Number of messages in a burst (maximum)"
51108 + default 4
51109 + help
51110 + This option allows you to choose the maximum number of messages allowed
51111 + within the flood time interval you chose in a separate option. The
51112 + default should be suitable for most people, however if you find that
51113 + many of your logs are being interpreted as flooding, you may want to
51114 + raise this value.
51115 +
51116 +endmenu
51117 +
51118 +endmenu
51119 diff -urNp linux-2.6.39.4/grsecurity/Makefile linux-2.6.39.4/grsecurity/Makefile
51120 --- linux-2.6.39.4/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
51121 +++ linux-2.6.39.4/grsecurity/Makefile 2011-08-21 18:54:57.000000000 -0400
51122 @@ -0,0 +1,34 @@
51123 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
51124 +# during 2001-2009 it has been completely redesigned by Brad Spengler
51125 +# into an RBAC system
51126 +#
51127 +# All code in this directory and various hooks inserted throughout the kernel
51128 +# are copyright Brad Spengler - Open Source Security, Inc., and released
51129 +# under the GPL v2 or higher
51130 +
51131 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
51132 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
51133 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
51134 +
51135 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
51136 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
51137 + gracl_learn.o grsec_log.o
51138 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
51139 +
51140 +ifdef CONFIG_NET
51141 +obj-y += grsec_sock.o
51142 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
51143 +endif
51144 +
51145 +ifndef CONFIG_GRKERNSEC
51146 +obj-y += grsec_disabled.o
51147 +endif
51148 +
51149 +ifdef CONFIG_GRKERNSEC_HIDESYM
51150 +extra-y := grsec_hidesym.o
51151 +$(obj)/grsec_hidesym.o:
51152 + @-chmod -f 500 /boot
51153 + @-chmod -f 500 /lib/modules
51154 + @-chmod -f 700 .
51155 + @echo ' grsec: protected kernel image paths'
51156 +endif
51157 diff -urNp linux-2.6.39.4/include/acpi/acpi_bus.h linux-2.6.39.4/include/acpi/acpi_bus.h
51158 --- linux-2.6.39.4/include/acpi/acpi_bus.h 2011-05-19 00:06:34.000000000 -0400
51159 +++ linux-2.6.39.4/include/acpi/acpi_bus.h 2011-08-05 20:34:06.000000000 -0400
51160 @@ -107,7 +107,7 @@ struct acpi_device_ops {
51161 acpi_op_bind bind;
51162 acpi_op_unbind unbind;
51163 acpi_op_notify notify;
51164 -};
51165 +} __no_const;
51166
51167 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
51168
51169 diff -urNp linux-2.6.39.4/include/asm-generic/atomic-long.h linux-2.6.39.4/include/asm-generic/atomic-long.h
51170 --- linux-2.6.39.4/include/asm-generic/atomic-long.h 2011-05-19 00:06:34.000000000 -0400
51171 +++ linux-2.6.39.4/include/asm-generic/atomic-long.h 2011-08-05 20:34:06.000000000 -0400
51172 @@ -22,6 +22,12 @@
51173
51174 typedef atomic64_t atomic_long_t;
51175
51176 +#ifdef CONFIG_PAX_REFCOUNT
51177 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
51178 +#else
51179 +typedef atomic64_t atomic_long_unchecked_t;
51180 +#endif
51181 +
51182 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
51183
51184 static inline long atomic_long_read(atomic_long_t *l)
51185 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
51186 return (long)atomic64_read(v);
51187 }
51188
51189 +#ifdef CONFIG_PAX_REFCOUNT
51190 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
51191 +{
51192 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51193 +
51194 + return (long)atomic64_read_unchecked(v);
51195 +}
51196 +#endif
51197 +
51198 static inline void atomic_long_set(atomic_long_t *l, long i)
51199 {
51200 atomic64_t *v = (atomic64_t *)l;
51201 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
51202 atomic64_set(v, i);
51203 }
51204
51205 +#ifdef CONFIG_PAX_REFCOUNT
51206 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
51207 +{
51208 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51209 +
51210 + atomic64_set_unchecked(v, i);
51211 +}
51212 +#endif
51213 +
51214 static inline void atomic_long_inc(atomic_long_t *l)
51215 {
51216 atomic64_t *v = (atomic64_t *)l;
51217 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
51218 atomic64_inc(v);
51219 }
51220
51221 +#ifdef CONFIG_PAX_REFCOUNT
51222 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
51223 +{
51224 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51225 +
51226 + atomic64_inc_unchecked(v);
51227 +}
51228 +#endif
51229 +
51230 static inline void atomic_long_dec(atomic_long_t *l)
51231 {
51232 atomic64_t *v = (atomic64_t *)l;
51233 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
51234 atomic64_dec(v);
51235 }
51236
51237 +#ifdef CONFIG_PAX_REFCOUNT
51238 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
51239 +{
51240 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51241 +
51242 + atomic64_dec_unchecked(v);
51243 +}
51244 +#endif
51245 +
51246 static inline void atomic_long_add(long i, atomic_long_t *l)
51247 {
51248 atomic64_t *v = (atomic64_t *)l;
51249 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
51250 atomic64_add(i, v);
51251 }
51252
51253 +#ifdef CONFIG_PAX_REFCOUNT
51254 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
51255 +{
51256 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51257 +
51258 + atomic64_add_unchecked(i, v);
51259 +}
51260 +#endif
51261 +
51262 static inline void atomic_long_sub(long i, atomic_long_t *l)
51263 {
51264 atomic64_t *v = (atomic64_t *)l;
51265 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
51266 atomic64_sub(i, v);
51267 }
51268
51269 +#ifdef CONFIG_PAX_REFCOUNT
51270 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
51271 +{
51272 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51273 +
51274 + atomic64_sub_unchecked(i, v);
51275 +}
51276 +#endif
51277 +
51278 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
51279 {
51280 atomic64_t *v = (atomic64_t *)l;
51281 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
51282 return (long)atomic64_inc_return(v);
51283 }
51284
51285 +#ifdef CONFIG_PAX_REFCOUNT
51286 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
51287 +{
51288 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51289 +
51290 + return (long)atomic64_inc_return_unchecked(v);
51291 +}
51292 +#endif
51293 +
51294 static inline long atomic_long_dec_return(atomic_long_t *l)
51295 {
51296 atomic64_t *v = (atomic64_t *)l;
51297 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
51298
51299 typedef atomic_t atomic_long_t;
51300
51301 +#ifdef CONFIG_PAX_REFCOUNT
51302 +typedef atomic_unchecked_t atomic_long_unchecked_t;
51303 +#else
51304 +typedef atomic_t atomic_long_unchecked_t;
51305 +#endif
51306 +
51307 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
51308 static inline long atomic_long_read(atomic_long_t *l)
51309 {
51310 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
51311 return (long)atomic_read(v);
51312 }
51313
51314 +#ifdef CONFIG_PAX_REFCOUNT
51315 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
51316 +{
51317 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51318 +
51319 + return (long)atomic_read_unchecked(v);
51320 +}
51321 +#endif
51322 +
51323 static inline void atomic_long_set(atomic_long_t *l, long i)
51324 {
51325 atomic_t *v = (atomic_t *)l;
51326 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
51327 atomic_set(v, i);
51328 }
51329
51330 +#ifdef CONFIG_PAX_REFCOUNT
51331 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
51332 +{
51333 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51334 +
51335 + atomic_set_unchecked(v, i);
51336 +}
51337 +#endif
51338 +
51339 static inline void atomic_long_inc(atomic_long_t *l)
51340 {
51341 atomic_t *v = (atomic_t *)l;
51342 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
51343 atomic_inc(v);
51344 }
51345
51346 +#ifdef CONFIG_PAX_REFCOUNT
51347 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
51348 +{
51349 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51350 +
51351 + atomic_inc_unchecked(v);
51352 +}
51353 +#endif
51354 +
51355 static inline void atomic_long_dec(atomic_long_t *l)
51356 {
51357 atomic_t *v = (atomic_t *)l;
51358 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
51359 atomic_dec(v);
51360 }
51361
51362 +#ifdef CONFIG_PAX_REFCOUNT
51363 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
51364 +{
51365 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51366 +
51367 + atomic_dec_unchecked(v);
51368 +}
51369 +#endif
51370 +
51371 static inline void atomic_long_add(long i, atomic_long_t *l)
51372 {
51373 atomic_t *v = (atomic_t *)l;
51374 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long
51375 atomic_add(i, v);
51376 }
51377
51378 +#ifdef CONFIG_PAX_REFCOUNT
51379 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
51380 +{
51381 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51382 +
51383 + atomic_add_unchecked(i, v);
51384 +}
51385 +#endif
51386 +
51387 static inline void atomic_long_sub(long i, atomic_long_t *l)
51388 {
51389 atomic_t *v = (atomic_t *)l;
51390 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
51391 atomic_sub(i, v);
51392 }
51393
51394 +#ifdef CONFIG_PAX_REFCOUNT
51395 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
51396 +{
51397 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51398 +
51399 + atomic_sub_unchecked(i, v);
51400 +}
51401 +#endif
51402 +
51403 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
51404 {
51405 atomic_t *v = (atomic_t *)l;
51406 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
51407 return (long)atomic_inc_return(v);
51408 }
51409
51410 +#ifdef CONFIG_PAX_REFCOUNT
51411 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
51412 +{
51413 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51414 +
51415 + return (long)atomic_inc_return_unchecked(v);
51416 +}
51417 +#endif
51418 +
51419 static inline long atomic_long_dec_return(atomic_long_t *l)
51420 {
51421 atomic_t *v = (atomic_t *)l;
51422 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
51423
51424 #endif /* BITS_PER_LONG == 64 */
51425
51426 +#ifdef CONFIG_PAX_REFCOUNT
51427 +static inline void pax_refcount_needs_these_functions(void)
51428 +{
51429 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
51430 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
51431 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
51432 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
51433 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
51434 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
51435 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
51436 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
51437 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
51438 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
51439 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
51440 +
51441 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
51442 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
51443 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
51444 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
51445 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
51446 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
51447 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
51448 +}
51449 +#else
51450 +#define atomic_read_unchecked(v) atomic_read(v)
51451 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
51452 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
51453 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
51454 +#define atomic_inc_unchecked(v) atomic_inc(v)
51455 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
51456 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
51457 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
51458 +#define atomic_dec_unchecked(v) atomic_dec(v)
51459 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
51460 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
51461 +
51462 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
51463 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
51464 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
51465 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
51466 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
51467 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
51468 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
51469 +#endif
51470 +
51471 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
51472 diff -urNp linux-2.6.39.4/include/asm-generic/cache.h linux-2.6.39.4/include/asm-generic/cache.h
51473 --- linux-2.6.39.4/include/asm-generic/cache.h 2011-05-19 00:06:34.000000000 -0400
51474 +++ linux-2.6.39.4/include/asm-generic/cache.h 2011-08-05 19:44:37.000000000 -0400
51475 @@ -6,7 +6,7 @@
51476 * cache lines need to provide their own cache.h.
51477 */
51478
51479 -#define L1_CACHE_SHIFT 5
51480 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
51481 +#define L1_CACHE_SHIFT 5UL
51482 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
51483
51484 #endif /* __ASM_GENERIC_CACHE_H */
51485 diff -urNp linux-2.6.39.4/include/asm-generic/int-l64.h linux-2.6.39.4/include/asm-generic/int-l64.h
51486 --- linux-2.6.39.4/include/asm-generic/int-l64.h 2011-05-19 00:06:34.000000000 -0400
51487 +++ linux-2.6.39.4/include/asm-generic/int-l64.h 2011-08-05 19:44:37.000000000 -0400
51488 @@ -46,6 +46,8 @@ typedef unsigned int u32;
51489 typedef signed long s64;
51490 typedef unsigned long u64;
51491
51492 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
51493 +
51494 #define S8_C(x) x
51495 #define U8_C(x) x ## U
51496 #define S16_C(x) x
51497 diff -urNp linux-2.6.39.4/include/asm-generic/int-ll64.h linux-2.6.39.4/include/asm-generic/int-ll64.h
51498 --- linux-2.6.39.4/include/asm-generic/int-ll64.h 2011-05-19 00:06:34.000000000 -0400
51499 +++ linux-2.6.39.4/include/asm-generic/int-ll64.h 2011-08-05 19:44:37.000000000 -0400
51500 @@ -51,6 +51,8 @@ typedef unsigned int u32;
51501 typedef signed long long s64;
51502 typedef unsigned long long u64;
51503
51504 +typedef unsigned long long intoverflow_t;
51505 +
51506 #define S8_C(x) x
51507 #define U8_C(x) x ## U
51508 #define S16_C(x) x
51509 diff -urNp linux-2.6.39.4/include/asm-generic/kmap_types.h linux-2.6.39.4/include/asm-generic/kmap_types.h
51510 --- linux-2.6.39.4/include/asm-generic/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
51511 +++ linux-2.6.39.4/include/asm-generic/kmap_types.h 2011-08-05 19:44:37.000000000 -0400
51512 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
51513 KMAP_D(17) KM_NMI,
51514 KMAP_D(18) KM_NMI_PTE,
51515 KMAP_D(19) KM_KDB,
51516 +KMAP_D(20) KM_CLEARPAGE,
51517 /*
51518 * Remember to update debug_kmap_atomic() when adding new kmap types!
51519 */
51520 -KMAP_D(20) KM_TYPE_NR
51521 +KMAP_D(21) KM_TYPE_NR
51522 };
51523
51524 #undef KMAP_D
51525 diff -urNp linux-2.6.39.4/include/asm-generic/pgtable.h linux-2.6.39.4/include/asm-generic/pgtable.h
51526 --- linux-2.6.39.4/include/asm-generic/pgtable.h 2011-05-19 00:06:34.000000000 -0400
51527 +++ linux-2.6.39.4/include/asm-generic/pgtable.h 2011-08-05 19:44:37.000000000 -0400
51528 @@ -447,6 +447,14 @@ static inline int pmd_write(pmd_t pmd)
51529 #endif /* __HAVE_ARCH_PMD_WRITE */
51530 #endif
51531
51532 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
51533 +static inline unsigned long pax_open_kernel(void) { return 0; }
51534 +#endif
51535 +
51536 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
51537 +static inline unsigned long pax_close_kernel(void) { return 0; }
51538 +#endif
51539 +
51540 #endif /* !__ASSEMBLY__ */
51541
51542 #endif /* _ASM_GENERIC_PGTABLE_H */
51543 diff -urNp linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h
51544 --- linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h 2011-05-19 00:06:34.000000000 -0400
51545 +++ linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h 2011-08-05 19:44:37.000000000 -0400
51546 @@ -1,14 +1,19 @@
51547 #ifndef _PGTABLE_NOPMD_H
51548 #define _PGTABLE_NOPMD_H
51549
51550 -#ifndef __ASSEMBLY__
51551 -
51552 #include <asm-generic/pgtable-nopud.h>
51553
51554 -struct mm_struct;
51555 -
51556 #define __PAGETABLE_PMD_FOLDED
51557
51558 +#define PMD_SHIFT PUD_SHIFT
51559 +#define PTRS_PER_PMD 1
51560 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
51561 +#define PMD_MASK (~(PMD_SIZE-1))
51562 +
51563 +#ifndef __ASSEMBLY__
51564 +
51565 +struct mm_struct;
51566 +
51567 /*
51568 * Having the pmd type consist of a pud gets the size right, and allows
51569 * us to conceptually access the pud entry that this pmd is folded into
51570 @@ -16,11 +21,6 @@ struct mm_struct;
51571 */
51572 typedef struct { pud_t pud; } pmd_t;
51573
51574 -#define PMD_SHIFT PUD_SHIFT
51575 -#define PTRS_PER_PMD 1
51576 -#define PMD_SIZE (1UL << PMD_SHIFT)
51577 -#define PMD_MASK (~(PMD_SIZE-1))
51578 -
51579 /*
51580 * The "pud_xxx()" functions here are trivial for a folded two-level
51581 * setup: the pmd is never bad, and a pmd always exists (as it's folded
51582 diff -urNp linux-2.6.39.4/include/asm-generic/pgtable-nopud.h linux-2.6.39.4/include/asm-generic/pgtable-nopud.h
51583 --- linux-2.6.39.4/include/asm-generic/pgtable-nopud.h 2011-05-19 00:06:34.000000000 -0400
51584 +++ linux-2.6.39.4/include/asm-generic/pgtable-nopud.h 2011-08-05 19:44:37.000000000 -0400
51585 @@ -1,10 +1,15 @@
51586 #ifndef _PGTABLE_NOPUD_H
51587 #define _PGTABLE_NOPUD_H
51588
51589 -#ifndef __ASSEMBLY__
51590 -
51591 #define __PAGETABLE_PUD_FOLDED
51592
51593 +#define PUD_SHIFT PGDIR_SHIFT
51594 +#define PTRS_PER_PUD 1
51595 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
51596 +#define PUD_MASK (~(PUD_SIZE-1))
51597 +
51598 +#ifndef __ASSEMBLY__
51599 +
51600 /*
51601 * Having the pud type consist of a pgd gets the size right, and allows
51602 * us to conceptually access the pgd entry that this pud is folded into
51603 @@ -12,11 +17,6 @@
51604 */
51605 typedef struct { pgd_t pgd; } pud_t;
51606
51607 -#define PUD_SHIFT PGDIR_SHIFT
51608 -#define PTRS_PER_PUD 1
51609 -#define PUD_SIZE (1UL << PUD_SHIFT)
51610 -#define PUD_MASK (~(PUD_SIZE-1))
51611 -
51612 /*
51613 * The "pgd_xxx()" functions here are trivial for a folded two-level
51614 * setup: the pud is never bad, and a pud always exists (as it's folded
51615 diff -urNp linux-2.6.39.4/include/asm-generic/vmlinux.lds.h linux-2.6.39.4/include/asm-generic/vmlinux.lds.h
51616 --- linux-2.6.39.4/include/asm-generic/vmlinux.lds.h 2011-05-19 00:06:34.000000000 -0400
51617 +++ linux-2.6.39.4/include/asm-generic/vmlinux.lds.h 2011-08-05 19:44:37.000000000 -0400
51618 @@ -213,6 +213,7 @@
51619 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
51620 VMLINUX_SYMBOL(__start_rodata) = .; \
51621 *(.rodata) *(.rodata.*) \
51622 + *(.data..read_only) \
51623 *(__vermagic) /* Kernel version magic */ \
51624 . = ALIGN(8); \
51625 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
51626 @@ -707,14 +708,15 @@
51627 * section in the linker script will go there too. @phdr should have
51628 * a leading colon.
51629 *
51630 - * Note that this macros defines __per_cpu_load as an absolute symbol.
51631 + * Note that this macros defines per_cpu_load as an absolute symbol.
51632 * If there is no need to put the percpu section at a predetermined
51633 * address, use PERCPU().
51634 */
51635 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
51636 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
51637 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
51638 + per_cpu_load = .; \
51639 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
51640 - LOAD_OFFSET) { \
51641 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
51642 VMLINUX_SYMBOL(__per_cpu_start) = .; \
51643 *(.data..percpu..first) \
51644 . = ALIGN(PAGE_SIZE); \
51645 @@ -726,7 +728,7 @@
51646 *(.data..percpu..shared_aligned) \
51647 VMLINUX_SYMBOL(__per_cpu_end) = .; \
51648 } phdr \
51649 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
51650 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
51651
51652 /**
51653 * PERCPU - define output section for percpu area, simple version
51654 diff -urNp linux-2.6.39.4/include/drm/drm_crtc_helper.h linux-2.6.39.4/include/drm/drm_crtc_helper.h
51655 --- linux-2.6.39.4/include/drm/drm_crtc_helper.h 2011-05-19 00:06:34.000000000 -0400
51656 +++ linux-2.6.39.4/include/drm/drm_crtc_helper.h 2011-08-05 20:34:06.000000000 -0400
51657 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
51658
51659 /* disable crtc when not in use - more explicit than dpms off */
51660 void (*disable)(struct drm_crtc *crtc);
51661 -};
51662 +} __no_const;
51663
51664 struct drm_encoder_helper_funcs {
51665 void (*dpms)(struct drm_encoder *encoder, int mode);
51666 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
51667 struct drm_connector *connector);
51668 /* disable encoder when not in use - more explicit than dpms off */
51669 void (*disable)(struct drm_encoder *encoder);
51670 -};
51671 +} __no_const;
51672
51673 struct drm_connector_helper_funcs {
51674 int (*get_modes)(struct drm_connector *connector);
51675 diff -urNp linux-2.6.39.4/include/drm/drmP.h linux-2.6.39.4/include/drm/drmP.h
51676 --- linux-2.6.39.4/include/drm/drmP.h 2011-05-19 00:06:34.000000000 -0400
51677 +++ linux-2.6.39.4/include/drm/drmP.h 2011-08-05 20:34:06.000000000 -0400
51678 @@ -73,6 +73,7 @@
51679 #include <linux/workqueue.h>
51680 #include <linux/poll.h>
51681 #include <asm/pgalloc.h>
51682 +#include <asm/local.h>
51683 #include "drm.h"
51684
51685 #include <linux/idr.h>
51686 @@ -1023,7 +1024,7 @@ struct drm_device {
51687
51688 /** \name Usage Counters */
51689 /*@{ */
51690 - int open_count; /**< Outstanding files open */
51691 + local_t open_count; /**< Outstanding files open */
51692 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
51693 atomic_t vma_count; /**< Outstanding vma areas open */
51694 int buf_use; /**< Buffers in use -- cannot alloc */
51695 @@ -1034,7 +1035,7 @@ struct drm_device {
51696 /*@{ */
51697 unsigned long counters;
51698 enum drm_stat_type types[15];
51699 - atomic_t counts[15];
51700 + atomic_unchecked_t counts[15];
51701 /*@} */
51702
51703 struct list_head filelist;
51704 diff -urNp linux-2.6.39.4/include/drm/ttm/ttm_memory.h linux-2.6.39.4/include/drm/ttm/ttm_memory.h
51705 --- linux-2.6.39.4/include/drm/ttm/ttm_memory.h 2011-05-19 00:06:34.000000000 -0400
51706 +++ linux-2.6.39.4/include/drm/ttm/ttm_memory.h 2011-08-05 20:34:06.000000000 -0400
51707 @@ -47,7 +47,7 @@
51708
51709 struct ttm_mem_shrink {
51710 int (*do_shrink) (struct ttm_mem_shrink *);
51711 -};
51712 +} __no_const;
51713
51714 /**
51715 * struct ttm_mem_global - Global memory accounting structure.
51716 diff -urNp linux-2.6.39.4/include/linux/a.out.h linux-2.6.39.4/include/linux/a.out.h
51717 --- linux-2.6.39.4/include/linux/a.out.h 2011-05-19 00:06:34.000000000 -0400
51718 +++ linux-2.6.39.4/include/linux/a.out.h 2011-08-05 19:44:37.000000000 -0400
51719 @@ -39,6 +39,14 @@ enum machine_type {
51720 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
51721 };
51722
51723 +/* Constants for the N_FLAGS field */
51724 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
51725 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
51726 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
51727 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
51728 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
51729 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
51730 +
51731 #if !defined (N_MAGIC)
51732 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
51733 #endif
51734 diff -urNp linux-2.6.39.4/include/linux/atmdev.h linux-2.6.39.4/include/linux/atmdev.h
51735 --- linux-2.6.39.4/include/linux/atmdev.h 2011-05-19 00:06:34.000000000 -0400
51736 +++ linux-2.6.39.4/include/linux/atmdev.h 2011-08-05 19:44:37.000000000 -0400
51737 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
51738 #endif
51739
51740 struct k_atm_aal_stats {
51741 -#define __HANDLE_ITEM(i) atomic_t i
51742 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
51743 __AAL_STAT_ITEMS
51744 #undef __HANDLE_ITEM
51745 };
51746 diff -urNp linux-2.6.39.4/include/linux/binfmts.h linux-2.6.39.4/include/linux/binfmts.h
51747 --- linux-2.6.39.4/include/linux/binfmts.h 2011-05-19 00:06:34.000000000 -0400
51748 +++ linux-2.6.39.4/include/linux/binfmts.h 2011-08-05 19:44:37.000000000 -0400
51749 @@ -92,6 +92,7 @@ struct linux_binfmt {
51750 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
51751 int (*load_shlib)(struct file *);
51752 int (*core_dump)(struct coredump_params *cprm);
51753 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
51754 unsigned long min_coredump; /* minimal dump size */
51755 };
51756
51757 diff -urNp linux-2.6.39.4/include/linux/blkdev.h linux-2.6.39.4/include/linux/blkdev.h
51758 --- linux-2.6.39.4/include/linux/blkdev.h 2011-06-03 00:04:14.000000000 -0400
51759 +++ linux-2.6.39.4/include/linux/blkdev.h 2011-08-05 20:34:06.000000000 -0400
51760 @@ -1307,7 +1307,7 @@ struct block_device_operations {
51761 int (*getgeo)(struct block_device *, struct hd_geometry *);
51762 /* this callback is with swap_lock and sometimes page table lock held */
51763 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
51764 - struct module *owner;
51765 + struct module * const owner;
51766 };
51767
51768 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
51769 diff -urNp linux-2.6.39.4/include/linux/blktrace_api.h linux-2.6.39.4/include/linux/blktrace_api.h
51770 --- linux-2.6.39.4/include/linux/blktrace_api.h 2011-05-19 00:06:34.000000000 -0400
51771 +++ linux-2.6.39.4/include/linux/blktrace_api.h 2011-08-05 19:44:37.000000000 -0400
51772 @@ -161,7 +161,7 @@ struct blk_trace {
51773 struct dentry *dir;
51774 struct dentry *dropped_file;
51775 struct dentry *msg_file;
51776 - atomic_t dropped;
51777 + atomic_unchecked_t dropped;
51778 };
51779
51780 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
51781 diff -urNp linux-2.6.39.4/include/linux/byteorder/little_endian.h linux-2.6.39.4/include/linux/byteorder/little_endian.h
51782 --- linux-2.6.39.4/include/linux/byteorder/little_endian.h 2011-05-19 00:06:34.000000000 -0400
51783 +++ linux-2.6.39.4/include/linux/byteorder/little_endian.h 2011-08-05 19:44:37.000000000 -0400
51784 @@ -42,51 +42,51 @@
51785
51786 static inline __le64 __cpu_to_le64p(const __u64 *p)
51787 {
51788 - return (__force __le64)*p;
51789 + return (__force const __le64)*p;
51790 }
51791 static inline __u64 __le64_to_cpup(const __le64 *p)
51792 {
51793 - return (__force __u64)*p;
51794 + return (__force const __u64)*p;
51795 }
51796 static inline __le32 __cpu_to_le32p(const __u32 *p)
51797 {
51798 - return (__force __le32)*p;
51799 + return (__force const __le32)*p;
51800 }
51801 static inline __u32 __le32_to_cpup(const __le32 *p)
51802 {
51803 - return (__force __u32)*p;
51804 + return (__force const __u32)*p;
51805 }
51806 static inline __le16 __cpu_to_le16p(const __u16 *p)
51807 {
51808 - return (__force __le16)*p;
51809 + return (__force const __le16)*p;
51810 }
51811 static inline __u16 __le16_to_cpup(const __le16 *p)
51812 {
51813 - return (__force __u16)*p;
51814 + return (__force const __u16)*p;
51815 }
51816 static inline __be64 __cpu_to_be64p(const __u64 *p)
51817 {
51818 - return (__force __be64)__swab64p(p);
51819 + return (__force const __be64)__swab64p(p);
51820 }
51821 static inline __u64 __be64_to_cpup(const __be64 *p)
51822 {
51823 - return __swab64p((__u64 *)p);
51824 + return __swab64p((const __u64 *)p);
51825 }
51826 static inline __be32 __cpu_to_be32p(const __u32 *p)
51827 {
51828 - return (__force __be32)__swab32p(p);
51829 + return (__force const __be32)__swab32p(p);
51830 }
51831 static inline __u32 __be32_to_cpup(const __be32 *p)
51832 {
51833 - return __swab32p((__u32 *)p);
51834 + return __swab32p((const __u32 *)p);
51835 }
51836 static inline __be16 __cpu_to_be16p(const __u16 *p)
51837 {
51838 - return (__force __be16)__swab16p(p);
51839 + return (__force const __be16)__swab16p(p);
51840 }
51841 static inline __u16 __be16_to_cpup(const __be16 *p)
51842 {
51843 - return __swab16p((__u16 *)p);
51844 + return __swab16p((const __u16 *)p);
51845 }
51846 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
51847 #define __le64_to_cpus(x) do { (void)(x); } while (0)
51848 diff -urNp linux-2.6.39.4/include/linux/cache.h linux-2.6.39.4/include/linux/cache.h
51849 --- linux-2.6.39.4/include/linux/cache.h 2011-05-19 00:06:34.000000000 -0400
51850 +++ linux-2.6.39.4/include/linux/cache.h 2011-08-05 19:44:37.000000000 -0400
51851 @@ -16,6 +16,10 @@
51852 #define __read_mostly
51853 #endif
51854
51855 +#ifndef __read_only
51856 +#define __read_only __read_mostly
51857 +#endif
51858 +
51859 #ifndef ____cacheline_aligned
51860 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
51861 #endif
51862 diff -urNp linux-2.6.39.4/include/linux/capability.h linux-2.6.39.4/include/linux/capability.h
51863 --- linux-2.6.39.4/include/linux/capability.h 2011-05-19 00:06:34.000000000 -0400
51864 +++ linux-2.6.39.4/include/linux/capability.h 2011-08-05 19:44:37.000000000 -0400
51865 @@ -547,6 +547,9 @@ extern bool capable(int cap);
51866 extern bool ns_capable(struct user_namespace *ns, int cap);
51867 extern bool task_ns_capable(struct task_struct *t, int cap);
51868 extern bool nsown_capable(int cap);
51869 +extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
51870 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
51871 +extern bool capable_nolog(int cap);
51872
51873 /* audit system wants to get cap info from files as well */
51874 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
51875 diff -urNp linux-2.6.39.4/include/linux/compiler-gcc4.h linux-2.6.39.4/include/linux/compiler-gcc4.h
51876 --- linux-2.6.39.4/include/linux/compiler-gcc4.h 2011-05-19 00:06:34.000000000 -0400
51877 +++ linux-2.6.39.4/include/linux/compiler-gcc4.h 2011-08-05 20:34:06.000000000 -0400
51878 @@ -31,6 +31,9 @@
51879
51880
51881 #if __GNUC_MINOR__ >= 5
51882 +
51883 +#define __no_const __attribute__((no_const))
51884 +
51885 /*
51886 * Mark a position in code as unreachable. This can be used to
51887 * suppress control flow warnings after asm blocks that transfer
51888 @@ -46,6 +49,11 @@
51889 #define __noclone __attribute__((__noclone__))
51890
51891 #endif
51892 +
51893 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
51894 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
51895 +#define __bos0(ptr) __bos((ptr), 0)
51896 +#define __bos1(ptr) __bos((ptr), 1)
51897 #endif
51898
51899 #if __GNUC_MINOR__ > 0
51900 diff -urNp linux-2.6.39.4/include/linux/compiler.h linux-2.6.39.4/include/linux/compiler.h
51901 --- linux-2.6.39.4/include/linux/compiler.h 2011-05-19 00:06:34.000000000 -0400
51902 +++ linux-2.6.39.4/include/linux/compiler.h 2011-08-05 20:34:06.000000000 -0400
51903 @@ -264,6 +264,10 @@ void ftrace_likely_update(struct ftrace_
51904 # define __attribute_const__ /* unimplemented */
51905 #endif
51906
51907 +#ifndef __no_const
51908 +# define __no_const
51909 +#endif
51910 +
51911 /*
51912 * Tell gcc if a function is cold. The compiler will assume any path
51913 * directly leading to the call is unlikely.
51914 @@ -273,6 +277,22 @@ void ftrace_likely_update(struct ftrace_
51915 #define __cold
51916 #endif
51917
51918 +#ifndef __alloc_size
51919 +#define __alloc_size(...)
51920 +#endif
51921 +
51922 +#ifndef __bos
51923 +#define __bos(ptr, arg)
51924 +#endif
51925 +
51926 +#ifndef __bos0
51927 +#define __bos0(ptr)
51928 +#endif
51929 +
51930 +#ifndef __bos1
51931 +#define __bos1(ptr)
51932 +#endif
51933 +
51934 /* Simple shorthand for a section definition */
51935 #ifndef __section
51936 # define __section(S) __attribute__ ((__section__(#S)))
51937 @@ -306,6 +326,7 @@ void ftrace_likely_update(struct ftrace_
51938 * use is to mediate communication between process-level code and irq/NMI
51939 * handlers, all running on the same CPU.
51940 */
51941 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
51942 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
51943 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
51944
51945 #endif /* __LINUX_COMPILER_H */
51946 diff -urNp linux-2.6.39.4/include/linux/cpuset.h linux-2.6.39.4/include/linux/cpuset.h
51947 --- linux-2.6.39.4/include/linux/cpuset.h 2011-05-19 00:06:34.000000000 -0400
51948 +++ linux-2.6.39.4/include/linux/cpuset.h 2011-08-05 19:44:37.000000000 -0400
51949 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
51950 * nodemask.
51951 */
51952 smp_mb();
51953 - --ACCESS_ONCE(current->mems_allowed_change_disable);
51954 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
51955 }
51956
51957 static inline void set_mems_allowed(nodemask_t nodemask)
51958 diff -urNp linux-2.6.39.4/include/linux/crypto.h linux-2.6.39.4/include/linux/crypto.h
51959 --- linux-2.6.39.4/include/linux/crypto.h 2011-05-19 00:06:34.000000000 -0400
51960 +++ linux-2.6.39.4/include/linux/crypto.h 2011-08-05 20:34:06.000000000 -0400
51961 @@ -361,7 +361,7 @@ struct cipher_tfm {
51962 const u8 *key, unsigned int keylen);
51963 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
51964 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
51965 -};
51966 +} __no_const;
51967
51968 struct hash_tfm {
51969 int (*init)(struct hash_desc *desc);
51970 @@ -382,13 +382,13 @@ struct compress_tfm {
51971 int (*cot_decompress)(struct crypto_tfm *tfm,
51972 const u8 *src, unsigned int slen,
51973 u8 *dst, unsigned int *dlen);
51974 -};
51975 +} __no_const;
51976
51977 struct rng_tfm {
51978 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
51979 unsigned int dlen);
51980 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
51981 -};
51982 +} __no_const;
51983
51984 #define crt_ablkcipher crt_u.ablkcipher
51985 #define crt_aead crt_u.aead
51986 diff -urNp linux-2.6.39.4/include/linux/decompress/mm.h linux-2.6.39.4/include/linux/decompress/mm.h
51987 --- linux-2.6.39.4/include/linux/decompress/mm.h 2011-05-19 00:06:34.000000000 -0400
51988 +++ linux-2.6.39.4/include/linux/decompress/mm.h 2011-08-05 19:44:37.000000000 -0400
51989 @@ -77,7 +77,7 @@ static void free(void *where)
51990 * warnings when not needed (indeed large_malloc / large_free are not
51991 * needed by inflate */
51992
51993 -#define malloc(a) kmalloc(a, GFP_KERNEL)
51994 +#define malloc(a) kmalloc((a), GFP_KERNEL)
51995 #define free(a) kfree(a)
51996
51997 #define large_malloc(a) vmalloc(a)
51998 diff -urNp linux-2.6.39.4/include/linux/dma-mapping.h linux-2.6.39.4/include/linux/dma-mapping.h
51999 --- linux-2.6.39.4/include/linux/dma-mapping.h 2011-05-19 00:06:34.000000000 -0400
52000 +++ linux-2.6.39.4/include/linux/dma-mapping.h 2011-08-05 20:34:06.000000000 -0400
52001 @@ -49,7 +49,7 @@ struct dma_map_ops {
52002 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
52003 int (*dma_supported)(struct device *dev, u64 mask);
52004 int (*set_dma_mask)(struct device *dev, u64 mask);
52005 - int is_phys;
52006 + const int is_phys;
52007 };
52008
52009 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
52010 diff -urNp linux-2.6.39.4/include/linux/efi.h linux-2.6.39.4/include/linux/efi.h
52011 --- linux-2.6.39.4/include/linux/efi.h 2011-06-25 12:55:23.000000000 -0400
52012 +++ linux-2.6.39.4/include/linux/efi.h 2011-08-05 20:34:06.000000000 -0400
52013 @@ -409,7 +409,7 @@ struct efivar_operations {
52014 efi_get_variable_t *get_variable;
52015 efi_get_next_variable_t *get_next_variable;
52016 efi_set_variable_t *set_variable;
52017 -};
52018 +} __no_const;
52019
52020 struct efivars {
52021 /*
52022 diff -urNp linux-2.6.39.4/include/linux/elf.h linux-2.6.39.4/include/linux/elf.h
52023 --- linux-2.6.39.4/include/linux/elf.h 2011-05-19 00:06:34.000000000 -0400
52024 +++ linux-2.6.39.4/include/linux/elf.h 2011-08-05 19:44:37.000000000 -0400
52025 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
52026 #define PT_GNU_EH_FRAME 0x6474e550
52027
52028 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
52029 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
52030 +
52031 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
52032 +
52033 +/* Constants for the e_flags field */
52034 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
52035 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
52036 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
52037 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
52038 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
52039 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
52040
52041 /*
52042 * Extended Numbering
52043 @@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
52044 #define DT_DEBUG 21
52045 #define DT_TEXTREL 22
52046 #define DT_JMPREL 23
52047 +#define DT_FLAGS 30
52048 + #define DF_TEXTREL 0x00000004
52049 #define DT_ENCODING 32
52050 #define OLD_DT_LOOS 0x60000000
52051 #define DT_LOOS 0x6000000d
52052 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
52053 #define PF_W 0x2
52054 #define PF_X 0x1
52055
52056 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
52057 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
52058 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
52059 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
52060 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
52061 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
52062 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
52063 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
52064 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
52065 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
52066 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
52067 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
52068 +
52069 typedef struct elf32_phdr{
52070 Elf32_Word p_type;
52071 Elf32_Off p_offset;
52072 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
52073 #define EI_OSABI 7
52074 #define EI_PAD 8
52075
52076 +#define EI_PAX 14
52077 +
52078 #define ELFMAG0 0x7f /* EI_MAG */
52079 #define ELFMAG1 'E'
52080 #define ELFMAG2 'L'
52081 @@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
52082 #define elf_note elf32_note
52083 #define elf_addr_t Elf32_Off
52084 #define Elf_Half Elf32_Half
52085 +#define elf_dyn Elf32_Dyn
52086
52087 #else
52088
52089 @@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
52090 #define elf_note elf64_note
52091 #define elf_addr_t Elf64_Off
52092 #define Elf_Half Elf64_Half
52093 +#define elf_dyn Elf64_Dyn
52094
52095 #endif
52096
52097 diff -urNp linux-2.6.39.4/include/linux/firewire.h linux-2.6.39.4/include/linux/firewire.h
52098 --- linux-2.6.39.4/include/linux/firewire.h 2011-05-19 00:06:34.000000000 -0400
52099 +++ linux-2.6.39.4/include/linux/firewire.h 2011-08-05 20:34:06.000000000 -0400
52100 @@ -429,7 +429,7 @@ struct fw_iso_context {
52101 union {
52102 fw_iso_callback_t sc;
52103 fw_iso_mc_callback_t mc;
52104 - } callback;
52105 + } __no_const callback;
52106 void *callback_data;
52107 };
52108
52109 diff -urNp linux-2.6.39.4/include/linux/fscache-cache.h linux-2.6.39.4/include/linux/fscache-cache.h
52110 --- linux-2.6.39.4/include/linux/fscache-cache.h 2011-05-19 00:06:34.000000000 -0400
52111 +++ linux-2.6.39.4/include/linux/fscache-cache.h 2011-08-05 19:44:37.000000000 -0400
52112 @@ -113,7 +113,7 @@ struct fscache_operation {
52113 #endif
52114 };
52115
52116 -extern atomic_t fscache_op_debug_id;
52117 +extern atomic_unchecked_t fscache_op_debug_id;
52118 extern void fscache_op_work_func(struct work_struct *work);
52119
52120 extern void fscache_enqueue_operation(struct fscache_operation *);
52121 @@ -133,7 +133,7 @@ static inline void fscache_operation_ini
52122 {
52123 INIT_WORK(&op->work, fscache_op_work_func);
52124 atomic_set(&op->usage, 1);
52125 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
52126 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
52127 op->processor = processor;
52128 op->release = release;
52129 INIT_LIST_HEAD(&op->pend_link);
52130 diff -urNp linux-2.6.39.4/include/linux/fs.h linux-2.6.39.4/include/linux/fs.h
52131 --- linux-2.6.39.4/include/linux/fs.h 2011-05-19 00:06:34.000000000 -0400
52132 +++ linux-2.6.39.4/include/linux/fs.h 2011-08-05 20:34:06.000000000 -0400
52133 @@ -108,6 +108,11 @@ struct inodes_stat_t {
52134 /* File was opened by fanotify and shouldn't generate fanotify events */
52135 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
52136
52137 +/* Hack for grsec so as not to require read permission simply to execute
52138 + * a binary
52139 + */
52140 +#define FMODE_GREXEC ((__force fmode_t)0x2000000)
52141 +
52142 /*
52143 * The below are the various read and write types that we support. Some of
52144 * them include behavioral modifiers that send information down to the
52145 @@ -1535,7 +1540,7 @@ struct block_device_operations;
52146 * the big kernel lock held in all filesystems.
52147 */
52148 struct file_operations {
52149 - struct module *owner;
52150 + struct module * const owner;
52151 loff_t (*llseek) (struct file *, loff_t, int);
52152 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
52153 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
52154 @@ -1563,6 +1568,7 @@ struct file_operations {
52155 long (*fallocate)(struct file *file, int mode, loff_t offset,
52156 loff_t len);
52157 };
52158 +typedef struct file_operations __no_const file_operations_no_const;
52159
52160 #define IPERM_FLAG_RCU 0x0001
52161
52162 diff -urNp linux-2.6.39.4/include/linux/fs_struct.h linux-2.6.39.4/include/linux/fs_struct.h
52163 --- linux-2.6.39.4/include/linux/fs_struct.h 2011-05-19 00:06:34.000000000 -0400
52164 +++ linux-2.6.39.4/include/linux/fs_struct.h 2011-08-05 19:44:37.000000000 -0400
52165 @@ -6,7 +6,7 @@
52166 #include <linux/seqlock.h>
52167
52168 struct fs_struct {
52169 - int users;
52170 + atomic_t users;
52171 spinlock_t lock;
52172 seqcount_t seq;
52173 int umask;
52174 diff -urNp linux-2.6.39.4/include/linux/ftrace_event.h linux-2.6.39.4/include/linux/ftrace_event.h
52175 --- linux-2.6.39.4/include/linux/ftrace_event.h 2011-05-19 00:06:34.000000000 -0400
52176 +++ linux-2.6.39.4/include/linux/ftrace_event.h 2011-08-05 20:34:06.000000000 -0400
52177 @@ -84,7 +84,7 @@ struct trace_event_functions {
52178 trace_print_func raw;
52179 trace_print_func hex;
52180 trace_print_func binary;
52181 -};
52182 +} __no_const;
52183
52184 struct trace_event {
52185 struct hlist_node node;
52186 @@ -235,7 +235,7 @@ extern int trace_define_field(struct ftr
52187 extern int trace_add_event_call(struct ftrace_event_call *call);
52188 extern void trace_remove_event_call(struct ftrace_event_call *call);
52189
52190 -#define is_signed_type(type) (((type)(-1)) < 0)
52191 +#define is_signed_type(type) (((type)(-1)) < (type)1)
52192
52193 int trace_set_clr_event(const char *system, const char *event, int set);
52194
52195 diff -urNp linux-2.6.39.4/include/linux/genhd.h linux-2.6.39.4/include/linux/genhd.h
52196 --- linux-2.6.39.4/include/linux/genhd.h 2011-06-03 00:04:14.000000000 -0400
52197 +++ linux-2.6.39.4/include/linux/genhd.h 2011-08-05 19:44:37.000000000 -0400
52198 @@ -184,7 +184,7 @@ struct gendisk {
52199 struct kobject *slave_dir;
52200
52201 struct timer_rand_state *random;
52202 - atomic_t sync_io; /* RAID */
52203 + atomic_unchecked_t sync_io; /* RAID */
52204 struct disk_events *ev;
52205 #ifdef CONFIG_BLK_DEV_INTEGRITY
52206 struct blk_integrity *integrity;
52207 diff -urNp linux-2.6.39.4/include/linux/gracl.h linux-2.6.39.4/include/linux/gracl.h
52208 --- linux-2.6.39.4/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
52209 +++ linux-2.6.39.4/include/linux/gracl.h 2011-08-05 19:44:37.000000000 -0400
52210 @@ -0,0 +1,317 @@
52211 +#ifndef GR_ACL_H
52212 +#define GR_ACL_H
52213 +
52214 +#include <linux/grdefs.h>
52215 +#include <linux/resource.h>
52216 +#include <linux/capability.h>
52217 +#include <linux/dcache.h>
52218 +#include <asm/resource.h>
52219 +
52220 +/* Major status information */
52221 +
52222 +#define GR_VERSION "grsecurity 2.2.2"
52223 +#define GRSECURITY_VERSION 0x2202
52224 +
52225 +enum {
52226 + GR_SHUTDOWN = 0,
52227 + GR_ENABLE = 1,
52228 + GR_SPROLE = 2,
52229 + GR_RELOAD = 3,
52230 + GR_SEGVMOD = 4,
52231 + GR_STATUS = 5,
52232 + GR_UNSPROLE = 6,
52233 + GR_PASSSET = 7,
52234 + GR_SPROLEPAM = 8,
52235 +};
52236 +
52237 +/* Password setup definitions
52238 + * kernel/grhash.c */
52239 +enum {
52240 + GR_PW_LEN = 128,
52241 + GR_SALT_LEN = 16,
52242 + GR_SHA_LEN = 32,
52243 +};
52244 +
52245 +enum {
52246 + GR_SPROLE_LEN = 64,
52247 +};
52248 +
52249 +enum {
52250 + GR_NO_GLOB = 0,
52251 + GR_REG_GLOB,
52252 + GR_CREATE_GLOB
52253 +};
52254 +
52255 +#define GR_NLIMITS 32
52256 +
52257 +/* Begin Data Structures */
52258 +
52259 +struct sprole_pw {
52260 + unsigned char *rolename;
52261 + unsigned char salt[GR_SALT_LEN];
52262 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
52263 +};
52264 +
52265 +struct name_entry {
52266 + __u32 key;
52267 + ino_t inode;
52268 + dev_t device;
52269 + char *name;
52270 + __u16 len;
52271 + __u8 deleted;
52272 + struct name_entry *prev;
52273 + struct name_entry *next;
52274 +};
52275 +
52276 +struct inodev_entry {
52277 + struct name_entry *nentry;
52278 + struct inodev_entry *prev;
52279 + struct inodev_entry *next;
52280 +};
52281 +
52282 +struct acl_role_db {
52283 + struct acl_role_label **r_hash;
52284 + __u32 r_size;
52285 +};
52286 +
52287 +struct inodev_db {
52288 + struct inodev_entry **i_hash;
52289 + __u32 i_size;
52290 +};
52291 +
52292 +struct name_db {
52293 + struct name_entry **n_hash;
52294 + __u32 n_size;
52295 +};
52296 +
52297 +struct crash_uid {
52298 + uid_t uid;
52299 + unsigned long expires;
52300 +};
52301 +
52302 +struct gr_hash_struct {
52303 + void **table;
52304 + void **nametable;
52305 + void *first;
52306 + __u32 table_size;
52307 + __u32 used_size;
52308 + int type;
52309 +};
52310 +
52311 +/* Userspace Grsecurity ACL data structures */
52312 +
52313 +struct acl_subject_label {
52314 + char *filename;
52315 + ino_t inode;
52316 + dev_t device;
52317 + __u32 mode;
52318 + kernel_cap_t cap_mask;
52319 + kernel_cap_t cap_lower;
52320 + kernel_cap_t cap_invert_audit;
52321 +
52322 + struct rlimit res[GR_NLIMITS];
52323 + __u32 resmask;
52324 +
52325 + __u8 user_trans_type;
52326 + __u8 group_trans_type;
52327 + uid_t *user_transitions;
52328 + gid_t *group_transitions;
52329 + __u16 user_trans_num;
52330 + __u16 group_trans_num;
52331 +
52332 + __u32 sock_families[2];
52333 + __u32 ip_proto[8];
52334 + __u32 ip_type;
52335 + struct acl_ip_label **ips;
52336 + __u32 ip_num;
52337 + __u32 inaddr_any_override;
52338 +
52339 + __u32 crashes;
52340 + unsigned long expires;
52341 +
52342 + struct acl_subject_label *parent_subject;
52343 + struct gr_hash_struct *hash;
52344 + struct acl_subject_label *prev;
52345 + struct acl_subject_label *next;
52346 +
52347 + struct acl_object_label **obj_hash;
52348 + __u32 obj_hash_size;
52349 + __u16 pax_flags;
52350 +};
52351 +
52352 +struct role_allowed_ip {
52353 + __u32 addr;
52354 + __u32 netmask;
52355 +
52356 + struct role_allowed_ip *prev;
52357 + struct role_allowed_ip *next;
52358 +};
52359 +
52360 +struct role_transition {
52361 + char *rolename;
52362 +
52363 + struct role_transition *prev;
52364 + struct role_transition *next;
52365 +};
52366 +
52367 +struct acl_role_label {
52368 + char *rolename;
52369 + uid_t uidgid;
52370 + __u16 roletype;
52371 +
52372 + __u16 auth_attempts;
52373 + unsigned long expires;
52374 +
52375 + struct acl_subject_label *root_label;
52376 + struct gr_hash_struct *hash;
52377 +
52378 + struct acl_role_label *prev;
52379 + struct acl_role_label *next;
52380 +
52381 + struct role_transition *transitions;
52382 + struct role_allowed_ip *allowed_ips;
52383 + uid_t *domain_children;
52384 + __u16 domain_child_num;
52385 +
52386 + struct acl_subject_label **subj_hash;
52387 + __u32 subj_hash_size;
52388 +};
52389 +
52390 +struct user_acl_role_db {
52391 + struct acl_role_label **r_table;
52392 + __u32 num_pointers; /* Number of allocations to track */
52393 + __u32 num_roles; /* Number of roles */
52394 + __u32 num_domain_children; /* Number of domain children */
52395 + __u32 num_subjects; /* Number of subjects */
52396 + __u32 num_objects; /* Number of objects */
52397 +};
52398 +
52399 +struct acl_object_label {
52400 + char *filename;
52401 + ino_t inode;
52402 + dev_t device;
52403 + __u32 mode;
52404 +
52405 + struct acl_subject_label *nested;
52406 + struct acl_object_label *globbed;
52407 +
52408 + /* next two structures not used */
52409 +
52410 + struct acl_object_label *prev;
52411 + struct acl_object_label *next;
52412 +};
52413 +
52414 +struct acl_ip_label {
52415 + char *iface;
52416 + __u32 addr;
52417 + __u32 netmask;
52418 + __u16 low, high;
52419 + __u8 mode;
52420 + __u32 type;
52421 + __u32 proto[8];
52422 +
52423 + /* next two structures not used */
52424 +
52425 + struct acl_ip_label *prev;
52426 + struct acl_ip_label *next;
52427 +};
52428 +
52429 +struct gr_arg {
52430 + struct user_acl_role_db role_db;
52431 + unsigned char pw[GR_PW_LEN];
52432 + unsigned char salt[GR_SALT_LEN];
52433 + unsigned char sum[GR_SHA_LEN];
52434 + unsigned char sp_role[GR_SPROLE_LEN];
52435 + struct sprole_pw *sprole_pws;
52436 + dev_t segv_device;
52437 + ino_t segv_inode;
52438 + uid_t segv_uid;
52439 + __u16 num_sprole_pws;
52440 + __u16 mode;
52441 +};
52442 +
52443 +struct gr_arg_wrapper {
52444 + struct gr_arg *arg;
52445 + __u32 version;
52446 + __u32 size;
52447 +};
52448 +
52449 +struct subject_map {
52450 + struct acl_subject_label *user;
52451 + struct acl_subject_label *kernel;
52452 + struct subject_map *prev;
52453 + struct subject_map *next;
52454 +};
52455 +
52456 +struct acl_subj_map_db {
52457 + struct subject_map **s_hash;
52458 + __u32 s_size;
52459 +};
52460 +
52461 +/* End Data Structures Section */
52462 +
52463 +/* Hash functions generated by empirical testing by Brad Spengler
52464 + Makes good use of the low bits of the inode. Generally 0-1 times
52465 + in loop for successful match. 0-3 for unsuccessful match.
52466 + Shift/add algorithm with modulus of table size and an XOR*/
52467 +
52468 +static __inline__ unsigned int
52469 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
52470 +{
52471 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
52472 +}
52473 +
52474 + static __inline__ unsigned int
52475 +shash(const struct acl_subject_label *userp, const unsigned int sz)
52476 +{
52477 + return ((const unsigned long)userp % sz);
52478 +}
52479 +
52480 +static __inline__ unsigned int
52481 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
52482 +{
52483 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
52484 +}
52485 +
52486 +static __inline__ unsigned int
52487 +nhash(const char *name, const __u16 len, const unsigned int sz)
52488 +{
52489 + return full_name_hash((const unsigned char *)name, len) % sz;
52490 +}
52491 +
52492 +#define FOR_EACH_ROLE_START(role) \
52493 + role = role_list; \
52494 + while (role) {
52495 +
52496 +#define FOR_EACH_ROLE_END(role) \
52497 + role = role->prev; \
52498 + }
52499 +
52500 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
52501 + subj = NULL; \
52502 + iter = 0; \
52503 + while (iter < role->subj_hash_size) { \
52504 + if (subj == NULL) \
52505 + subj = role->subj_hash[iter]; \
52506 + if (subj == NULL) { \
52507 + iter++; \
52508 + continue; \
52509 + }
52510 +
52511 +#define FOR_EACH_SUBJECT_END(subj,iter) \
52512 + subj = subj->next; \
52513 + if (subj == NULL) \
52514 + iter++; \
52515 + }
52516 +
52517 +
52518 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
52519 + subj = role->hash->first; \
52520 + while (subj != NULL) {
52521 +
52522 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
52523 + subj = subj->next; \
52524 + }
52525 +
52526 +#endif
52527 +
52528 diff -urNp linux-2.6.39.4/include/linux/gralloc.h linux-2.6.39.4/include/linux/gralloc.h
52529 --- linux-2.6.39.4/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
52530 +++ linux-2.6.39.4/include/linux/gralloc.h 2011-08-05 19:44:37.000000000 -0400
52531 @@ -0,0 +1,9 @@
52532 +#ifndef __GRALLOC_H
52533 +#define __GRALLOC_H
52534 +
52535 +void acl_free_all(void);
52536 +int acl_alloc_stack_init(unsigned long size);
52537 +void *acl_alloc(unsigned long len);
52538 +void *acl_alloc_num(unsigned long num, unsigned long len);
52539 +
52540 +#endif
52541 diff -urNp linux-2.6.39.4/include/linux/grdefs.h linux-2.6.39.4/include/linux/grdefs.h
52542 --- linux-2.6.39.4/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
52543 +++ linux-2.6.39.4/include/linux/grdefs.h 2011-08-05 19:44:37.000000000 -0400
52544 @@ -0,0 +1,140 @@
52545 +#ifndef GRDEFS_H
52546 +#define GRDEFS_H
52547 +
52548 +/* Begin grsecurity status declarations */
52549 +
52550 +enum {
52551 + GR_READY = 0x01,
52552 + GR_STATUS_INIT = 0x00 // disabled state
52553 +};
52554 +
52555 +/* Begin ACL declarations */
52556 +
52557 +/* Role flags */
52558 +
52559 +enum {
52560 + GR_ROLE_USER = 0x0001,
52561 + GR_ROLE_GROUP = 0x0002,
52562 + GR_ROLE_DEFAULT = 0x0004,
52563 + GR_ROLE_SPECIAL = 0x0008,
52564 + GR_ROLE_AUTH = 0x0010,
52565 + GR_ROLE_NOPW = 0x0020,
52566 + GR_ROLE_GOD = 0x0040,
52567 + GR_ROLE_LEARN = 0x0080,
52568 + GR_ROLE_TPE = 0x0100,
52569 + GR_ROLE_DOMAIN = 0x0200,
52570 + GR_ROLE_PAM = 0x0400,
52571 + GR_ROLE_PERSIST = 0x0800
52572 +};
52573 +
52574 +/* ACL Subject and Object mode flags */
52575 +enum {
52576 + GR_DELETED = 0x80000000
52577 +};
52578 +
52579 +/* ACL Object-only mode flags */
52580 +enum {
52581 + GR_READ = 0x00000001,
52582 + GR_APPEND = 0x00000002,
52583 + GR_WRITE = 0x00000004,
52584 + GR_EXEC = 0x00000008,
52585 + GR_FIND = 0x00000010,
52586 + GR_INHERIT = 0x00000020,
52587 + GR_SETID = 0x00000040,
52588 + GR_CREATE = 0x00000080,
52589 + GR_DELETE = 0x00000100,
52590 + GR_LINK = 0x00000200,
52591 + GR_AUDIT_READ = 0x00000400,
52592 + GR_AUDIT_APPEND = 0x00000800,
52593 + GR_AUDIT_WRITE = 0x00001000,
52594 + GR_AUDIT_EXEC = 0x00002000,
52595 + GR_AUDIT_FIND = 0x00004000,
52596 + GR_AUDIT_INHERIT= 0x00008000,
52597 + GR_AUDIT_SETID = 0x00010000,
52598 + GR_AUDIT_CREATE = 0x00020000,
52599 + GR_AUDIT_DELETE = 0x00040000,
52600 + GR_AUDIT_LINK = 0x00080000,
52601 + GR_PTRACERD = 0x00100000,
52602 + GR_NOPTRACE = 0x00200000,
52603 + GR_SUPPRESS = 0x00400000,
52604 + GR_NOLEARN = 0x00800000,
52605 + GR_INIT_TRANSFER= 0x01000000
52606 +};
52607 +
52608 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
52609 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
52610 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
52611 +
52612 +/* ACL subject-only mode flags */
52613 +enum {
52614 + GR_KILL = 0x00000001,
52615 + GR_VIEW = 0x00000002,
52616 + GR_PROTECTED = 0x00000004,
52617 + GR_LEARN = 0x00000008,
52618 + GR_OVERRIDE = 0x00000010,
52619 + /* just a placeholder, this mode is only used in userspace */
52620 + GR_DUMMY = 0x00000020,
52621 + GR_PROTSHM = 0x00000040,
52622 + GR_KILLPROC = 0x00000080,
52623 + GR_KILLIPPROC = 0x00000100,
52624 + /* just a placeholder, this mode is only used in userspace */
52625 + GR_NOTROJAN = 0x00000200,
52626 + GR_PROTPROCFD = 0x00000400,
52627 + GR_PROCACCT = 0x00000800,
52628 + GR_RELAXPTRACE = 0x00001000,
52629 + GR_NESTED = 0x00002000,
52630 + GR_INHERITLEARN = 0x00004000,
52631 + GR_PROCFIND = 0x00008000,
52632 + GR_POVERRIDE = 0x00010000,
52633 + GR_KERNELAUTH = 0x00020000,
52634 + GR_ATSECURE = 0x00040000,
52635 + GR_SHMEXEC = 0x00080000
52636 +};
52637 +
52638 +enum {
52639 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
52640 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
52641 + GR_PAX_ENABLE_MPROTECT = 0x0004,
52642 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
52643 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
52644 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
52645 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
52646 + GR_PAX_DISABLE_MPROTECT = 0x0400,
52647 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
52648 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
52649 +};
52650 +
52651 +enum {
52652 + GR_ID_USER = 0x01,
52653 + GR_ID_GROUP = 0x02,
52654 +};
52655 +
52656 +enum {
52657 + GR_ID_ALLOW = 0x01,
52658 + GR_ID_DENY = 0x02,
52659 +};
52660 +
52661 +#define GR_CRASH_RES 31
52662 +#define GR_UIDTABLE_MAX 500
52663 +
52664 +/* begin resource learning section */
52665 +enum {
52666 + GR_RLIM_CPU_BUMP = 60,
52667 + GR_RLIM_FSIZE_BUMP = 50000,
52668 + GR_RLIM_DATA_BUMP = 10000,
52669 + GR_RLIM_STACK_BUMP = 1000,
52670 + GR_RLIM_CORE_BUMP = 10000,
52671 + GR_RLIM_RSS_BUMP = 500000,
52672 + GR_RLIM_NPROC_BUMP = 1,
52673 + GR_RLIM_NOFILE_BUMP = 5,
52674 + GR_RLIM_MEMLOCK_BUMP = 50000,
52675 + GR_RLIM_AS_BUMP = 500000,
52676 + GR_RLIM_LOCKS_BUMP = 2,
52677 + GR_RLIM_SIGPENDING_BUMP = 5,
52678 + GR_RLIM_MSGQUEUE_BUMP = 10000,
52679 + GR_RLIM_NICE_BUMP = 1,
52680 + GR_RLIM_RTPRIO_BUMP = 1,
52681 + GR_RLIM_RTTIME_BUMP = 1000000
52682 +};
52683 +
52684 +#endif
52685 diff -urNp linux-2.6.39.4/include/linux/grinternal.h linux-2.6.39.4/include/linux/grinternal.h
52686 --- linux-2.6.39.4/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
52687 +++ linux-2.6.39.4/include/linux/grinternal.h 2011-08-05 19:44:37.000000000 -0400
52688 @@ -0,0 +1,219 @@
52689 +#ifndef __GRINTERNAL_H
52690 +#define __GRINTERNAL_H
52691 +
52692 +#ifdef CONFIG_GRKERNSEC
52693 +
52694 +#include <linux/fs.h>
52695 +#include <linux/mnt_namespace.h>
52696 +#include <linux/nsproxy.h>
52697 +#include <linux/gracl.h>
52698 +#include <linux/grdefs.h>
52699 +#include <linux/grmsg.h>
52700 +
52701 +void gr_add_learn_entry(const char *fmt, ...)
52702 + __attribute__ ((format (printf, 1, 2)));
52703 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
52704 + const struct vfsmount *mnt);
52705 +__u32 gr_check_create(const struct dentry *new_dentry,
52706 + const struct dentry *parent,
52707 + const struct vfsmount *mnt, const __u32 mode);
52708 +int gr_check_protected_task(const struct task_struct *task);
52709 +__u32 to_gr_audit(const __u32 reqmode);
52710 +int gr_set_acls(const int type);
52711 +int gr_apply_subject_to_task(struct task_struct *task);
52712 +int gr_acl_is_enabled(void);
52713 +char gr_roletype_to_char(void);
52714 +
52715 +void gr_handle_alertkill(struct task_struct *task);
52716 +char *gr_to_filename(const struct dentry *dentry,
52717 + const struct vfsmount *mnt);
52718 +char *gr_to_filename1(const struct dentry *dentry,
52719 + const struct vfsmount *mnt);
52720 +char *gr_to_filename2(const struct dentry *dentry,
52721 + const struct vfsmount *mnt);
52722 +char *gr_to_filename3(const struct dentry *dentry,
52723 + const struct vfsmount *mnt);
52724 +
52725 +extern int grsec_enable_harden_ptrace;
52726 +extern int grsec_enable_link;
52727 +extern int grsec_enable_fifo;
52728 +extern int grsec_enable_execve;
52729 +extern int grsec_enable_shm;
52730 +extern int grsec_enable_execlog;
52731 +extern int grsec_enable_signal;
52732 +extern int grsec_enable_audit_ptrace;
52733 +extern int grsec_enable_forkfail;
52734 +extern int grsec_enable_time;
52735 +extern int grsec_enable_rofs;
52736 +extern int grsec_enable_chroot_shmat;
52737 +extern int grsec_enable_chroot_mount;
52738 +extern int grsec_enable_chroot_double;
52739 +extern int grsec_enable_chroot_pivot;
52740 +extern int grsec_enable_chroot_chdir;
52741 +extern int grsec_enable_chroot_chmod;
52742 +extern int grsec_enable_chroot_mknod;
52743 +extern int grsec_enable_chroot_fchdir;
52744 +extern int grsec_enable_chroot_nice;
52745 +extern int grsec_enable_chroot_execlog;
52746 +extern int grsec_enable_chroot_caps;
52747 +extern int grsec_enable_chroot_sysctl;
52748 +extern int grsec_enable_chroot_unix;
52749 +extern int grsec_enable_tpe;
52750 +extern int grsec_tpe_gid;
52751 +extern int grsec_enable_tpe_all;
52752 +extern int grsec_enable_tpe_invert;
52753 +extern int grsec_enable_socket_all;
52754 +extern int grsec_socket_all_gid;
52755 +extern int grsec_enable_socket_client;
52756 +extern int grsec_socket_client_gid;
52757 +extern int grsec_enable_socket_server;
52758 +extern int grsec_socket_server_gid;
52759 +extern int grsec_audit_gid;
52760 +extern int grsec_enable_group;
52761 +extern int grsec_enable_audit_textrel;
52762 +extern int grsec_enable_log_rwxmaps;
52763 +extern int grsec_enable_mount;
52764 +extern int grsec_enable_chdir;
52765 +extern int grsec_resource_logging;
52766 +extern int grsec_enable_blackhole;
52767 +extern int grsec_lastack_retries;
52768 +extern int grsec_enable_brute;
52769 +extern int grsec_lock;
52770 +
52771 +extern spinlock_t grsec_alert_lock;
52772 +extern unsigned long grsec_alert_wtime;
52773 +extern unsigned long grsec_alert_fyet;
52774 +
52775 +extern spinlock_t grsec_audit_lock;
52776 +
52777 +extern rwlock_t grsec_exec_file_lock;
52778 +
52779 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
52780 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
52781 + (tsk)->exec_file->f_vfsmnt) : "/")
52782 +
52783 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
52784 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
52785 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
52786 +
52787 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
52788 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
52789 + (tsk)->exec_file->f_vfsmnt) : "/")
52790 +
52791 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
52792 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
52793 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
52794 +
52795 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
52796 +
52797 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
52798 +
52799 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
52800 + (task)->pid, (cred)->uid, \
52801 + (cred)->euid, (cred)->gid, (cred)->egid, \
52802 + gr_parent_task_fullpath(task), \
52803 + (task)->real_parent->comm, (task)->real_parent->pid, \
52804 + (pcred)->uid, (pcred)->euid, \
52805 + (pcred)->gid, (pcred)->egid
52806 +
52807 +#define GR_CHROOT_CAPS {{ \
52808 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
52809 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
52810 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
52811 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
52812 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
52813 + CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
52814 +
52815 +#define security_learn(normal_msg,args...) \
52816 +({ \
52817 + read_lock(&grsec_exec_file_lock); \
52818 + gr_add_learn_entry(normal_msg "\n", ## args); \
52819 + read_unlock(&grsec_exec_file_lock); \
52820 +})
52821 +
52822 +enum {
52823 + GR_DO_AUDIT,
52824 + GR_DONT_AUDIT,
52825 + /* used for non-audit messages that we shouldn't kill the task on */
52826 + GR_DONT_AUDIT_GOOD
52827 +};
52828 +
52829 +enum {
52830 + GR_TTYSNIFF,
52831 + GR_RBAC,
52832 + GR_RBAC_STR,
52833 + GR_STR_RBAC,
52834 + GR_RBAC_MODE2,
52835 + GR_RBAC_MODE3,
52836 + GR_FILENAME,
52837 + GR_SYSCTL_HIDDEN,
52838 + GR_NOARGS,
52839 + GR_ONE_INT,
52840 + GR_ONE_INT_TWO_STR,
52841 + GR_ONE_STR,
52842 + GR_STR_INT,
52843 + GR_TWO_STR_INT,
52844 + GR_TWO_INT,
52845 + GR_TWO_U64,
52846 + GR_THREE_INT,
52847 + GR_FIVE_INT_TWO_STR,
52848 + GR_TWO_STR,
52849 + GR_THREE_STR,
52850 + GR_FOUR_STR,
52851 + GR_STR_FILENAME,
52852 + GR_FILENAME_STR,
52853 + GR_FILENAME_TWO_INT,
52854 + GR_FILENAME_TWO_INT_STR,
52855 + GR_TEXTREL,
52856 + GR_PTRACE,
52857 + GR_RESOURCE,
52858 + GR_CAP,
52859 + GR_SIG,
52860 + GR_SIG2,
52861 + GR_CRASH1,
52862 + GR_CRASH2,
52863 + GR_PSACCT,
52864 + GR_RWXMAP
52865 +};
52866 +
52867 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
52868 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
52869 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
52870 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
52871 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
52872 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
52873 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
52874 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
52875 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
52876 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
52877 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
52878 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
52879 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
52880 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
52881 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
52882 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
52883 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
52884 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
52885 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
52886 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
52887 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
52888 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
52889 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
52890 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
52891 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
52892 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
52893 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
52894 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
52895 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
52896 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
52897 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
52898 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
52899 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
52900 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
52901 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
52902 +
52903 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
52904 +
52905 +#endif
52906 +
52907 +#endif
52908 diff -urNp linux-2.6.39.4/include/linux/grmsg.h linux-2.6.39.4/include/linux/grmsg.h
52909 --- linux-2.6.39.4/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
52910 +++ linux-2.6.39.4/include/linux/grmsg.h 2011-08-05 19:44:37.000000000 -0400
52911 @@ -0,0 +1,108 @@
52912 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
52913 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
52914 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
52915 +#define GR_STOPMOD_MSG "denied modification of module state by "
52916 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
52917 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
52918 +#define GR_IOPERM_MSG "denied use of ioperm() by "
52919 +#define GR_IOPL_MSG "denied use of iopl() by "
52920 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
52921 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
52922 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
52923 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
52924 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
52925 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
52926 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
52927 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
52928 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
52929 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
52930 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
52931 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
52932 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
52933 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
52934 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
52935 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
52936 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
52937 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
52938 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
52939 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
52940 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
52941 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
52942 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
52943 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
52944 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
52945 +#define GR_NPROC_MSG "denied overstep of process limit by "
52946 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
52947 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
52948 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
52949 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
52950 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
52951 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
52952 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
52953 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
52954 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
52955 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
52956 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
52957 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
52958 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
52959 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
52960 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
52961 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
52962 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
52963 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
52964 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
52965 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
52966 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
52967 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
52968 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
52969 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
52970 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
52971 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
52972 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
52973 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
52974 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
52975 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
52976 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
52977 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
52978 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
52979 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
52980 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
52981 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
52982 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
52983 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
52984 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
52985 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
52986 +#define GR_NICE_CHROOT_MSG "denied priority change by "
52987 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
52988 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
52989 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
52990 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
52991 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
52992 +#define GR_TIME_MSG "time set by "
52993 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
52994 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
52995 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
52996 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
52997 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
52998 +#define GR_BIND_MSG "denied bind() by "
52999 +#define GR_CONNECT_MSG "denied connect() by "
53000 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
53001 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
53002 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
53003 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
53004 +#define GR_CAP_ACL_MSG "use of %s denied for "
53005 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
53006 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
53007 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
53008 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
53009 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
53010 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
53011 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
53012 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
53013 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
53014 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
53015 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
53016 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
53017 +#define GR_VM86_MSG "denied use of vm86 by "
53018 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
53019 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
53020 diff -urNp linux-2.6.39.4/include/linux/grsecurity.h linux-2.6.39.4/include/linux/grsecurity.h
53021 --- linux-2.6.39.4/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
53022 +++ linux-2.6.39.4/include/linux/grsecurity.h 2011-08-05 19:54:17.000000000 -0400
53023 @@ -0,0 +1,218 @@
53024 +#ifndef GR_SECURITY_H
53025 +#define GR_SECURITY_H
53026 +#include <linux/fs.h>
53027 +#include <linux/fs_struct.h>
53028 +#include <linux/binfmts.h>
53029 +#include <linux/gracl.h>
53030 +#include <linux/compat.h>
53031 +
53032 +/* notify of brain-dead configs */
53033 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53034 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
53035 +#endif
53036 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
53037 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
53038 +#endif
53039 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
53040 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
53041 +#endif
53042 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
53043 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
53044 +#endif
53045 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
53046 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
53047 +#endif
53048 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
53049 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
53050 +#endif
53051 +
53052 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
53053 +void gr_handle_brute_check(void);
53054 +void gr_handle_kernel_exploit(void);
53055 +int gr_process_user_ban(void);
53056 +
53057 +char gr_roletype_to_char(void);
53058 +
53059 +int gr_acl_enable_at_secure(void);
53060 +
53061 +int gr_check_user_change(int real, int effective, int fs);
53062 +int gr_check_group_change(int real, int effective, int fs);
53063 +
53064 +void gr_del_task_from_ip_table(struct task_struct *p);
53065 +
53066 +int gr_pid_is_chrooted(struct task_struct *p);
53067 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
53068 +int gr_handle_chroot_nice(void);
53069 +int gr_handle_chroot_sysctl(const int op);
53070 +int gr_handle_chroot_setpriority(struct task_struct *p,
53071 + const int niceval);
53072 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
53073 +int gr_handle_chroot_chroot(const struct dentry *dentry,
53074 + const struct vfsmount *mnt);
53075 +int gr_handle_chroot_caps(struct path *path);
53076 +void gr_handle_chroot_chdir(struct path *path);
53077 +int gr_handle_chroot_chmod(const struct dentry *dentry,
53078 + const struct vfsmount *mnt, const int mode);
53079 +int gr_handle_chroot_mknod(const struct dentry *dentry,
53080 + const struct vfsmount *mnt, const int mode);
53081 +int gr_handle_chroot_mount(const struct dentry *dentry,
53082 + const struct vfsmount *mnt,
53083 + const char *dev_name);
53084 +int gr_handle_chroot_pivot(void);
53085 +int gr_handle_chroot_unix(const pid_t pid);
53086 +
53087 +int gr_handle_rawio(const struct inode *inode);
53088 +int gr_handle_nproc(void);
53089 +
53090 +void gr_handle_ioperm(void);
53091 +void gr_handle_iopl(void);
53092 +
53093 +int gr_tpe_allow(const struct file *file);
53094 +
53095 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
53096 +void gr_clear_chroot_entries(struct task_struct *task);
53097 +
53098 +void gr_log_forkfail(const int retval);
53099 +void gr_log_timechange(void);
53100 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
53101 +void gr_log_chdir(const struct dentry *dentry,
53102 + const struct vfsmount *mnt);
53103 +void gr_log_chroot_exec(const struct dentry *dentry,
53104 + const struct vfsmount *mnt);
53105 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
53106 +#ifdef CONFIG_COMPAT
53107 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
53108 +#endif
53109 +void gr_log_remount(const char *devname, const int retval);
53110 +void gr_log_unmount(const char *devname, const int retval);
53111 +void gr_log_mount(const char *from, const char *to, const int retval);
53112 +void gr_log_textrel(struct vm_area_struct *vma);
53113 +void gr_log_rwxmmap(struct file *file);
53114 +void gr_log_rwxmprotect(struct file *file);
53115 +
53116 +int gr_handle_follow_link(const struct inode *parent,
53117 + const struct inode *inode,
53118 + const struct dentry *dentry,
53119 + const struct vfsmount *mnt);
53120 +int gr_handle_fifo(const struct dentry *dentry,
53121 + const struct vfsmount *mnt,
53122 + const struct dentry *dir, const int flag,
53123 + const int acc_mode);
53124 +int gr_handle_hardlink(const struct dentry *dentry,
53125 + const struct vfsmount *mnt,
53126 + struct inode *inode,
53127 + const int mode, const char *to);
53128 +
53129 +int gr_is_capable(const int cap);
53130 +int gr_is_capable_nolog(const int cap);
53131 +void gr_learn_resource(const struct task_struct *task, const int limit,
53132 + const unsigned long wanted, const int gt);
53133 +void gr_copy_label(struct task_struct *tsk);
53134 +void gr_handle_crash(struct task_struct *task, const int sig);
53135 +int gr_handle_signal(const struct task_struct *p, const int sig);
53136 +int gr_check_crash_uid(const uid_t uid);
53137 +int gr_check_protected_task(const struct task_struct *task);
53138 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
53139 +int gr_acl_handle_mmap(const struct file *file,
53140 + const unsigned long prot);
53141 +int gr_acl_handle_mprotect(const struct file *file,
53142 + const unsigned long prot);
53143 +int gr_check_hidden_task(const struct task_struct *tsk);
53144 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
53145 + const struct vfsmount *mnt);
53146 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
53147 + const struct vfsmount *mnt);
53148 +__u32 gr_acl_handle_access(const struct dentry *dentry,
53149 + const struct vfsmount *mnt, const int fmode);
53150 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
53151 + const struct vfsmount *mnt, mode_t mode);
53152 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
53153 + const struct vfsmount *mnt, mode_t mode);
53154 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
53155 + const struct vfsmount *mnt);
53156 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
53157 + const struct vfsmount *mnt);
53158 +int gr_handle_ptrace(struct task_struct *task, const long request);
53159 +int gr_handle_proc_ptrace(struct task_struct *task);
53160 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
53161 + const struct vfsmount *mnt);
53162 +int gr_check_crash_exec(const struct file *filp);
53163 +int gr_acl_is_enabled(void);
53164 +void gr_set_kernel_label(struct task_struct *task);
53165 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
53166 + const gid_t gid);
53167 +int gr_set_proc_label(const struct dentry *dentry,
53168 + const struct vfsmount *mnt,
53169 + const int unsafe_share);
53170 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
53171 + const struct vfsmount *mnt);
53172 +__u32 gr_acl_handle_open(const struct dentry *dentry,
53173 + const struct vfsmount *mnt, const int fmode);
53174 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
53175 + const struct dentry *p_dentry,
53176 + const struct vfsmount *p_mnt, const int fmode,
53177 + const int imode);
53178 +void gr_handle_create(const struct dentry *dentry,
53179 + const struct vfsmount *mnt);
53180 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
53181 + const struct dentry *parent_dentry,
53182 + const struct vfsmount *parent_mnt,
53183 + const int mode);
53184 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
53185 + const struct dentry *parent_dentry,
53186 + const struct vfsmount *parent_mnt);
53187 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
53188 + const struct vfsmount *mnt);
53189 +void gr_handle_delete(const ino_t ino, const dev_t dev);
53190 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
53191 + const struct vfsmount *mnt);
53192 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
53193 + const struct dentry *parent_dentry,
53194 + const struct vfsmount *parent_mnt,
53195 + const char *from);
53196 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
53197 + const struct dentry *parent_dentry,
53198 + const struct vfsmount *parent_mnt,
53199 + const struct dentry *old_dentry,
53200 + const struct vfsmount *old_mnt, const char *to);
53201 +int gr_acl_handle_rename(struct dentry *new_dentry,
53202 + struct dentry *parent_dentry,
53203 + const struct vfsmount *parent_mnt,
53204 + struct dentry *old_dentry,
53205 + struct inode *old_parent_inode,
53206 + struct vfsmount *old_mnt, const char *newname);
53207 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53208 + struct dentry *old_dentry,
53209 + struct dentry *new_dentry,
53210 + struct vfsmount *mnt, const __u8 replace);
53211 +__u32 gr_check_link(const struct dentry *new_dentry,
53212 + const struct dentry *parent_dentry,
53213 + const struct vfsmount *parent_mnt,
53214 + const struct dentry *old_dentry,
53215 + const struct vfsmount *old_mnt);
53216 +int gr_acl_handle_filldir(const struct file *file, const char *name,
53217 + const unsigned int namelen, const ino_t ino);
53218 +
53219 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
53220 + const struct vfsmount *mnt);
53221 +void gr_acl_handle_exit(void);
53222 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
53223 +int gr_acl_handle_procpidmem(const struct task_struct *task);
53224 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
53225 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
53226 +void gr_audit_ptrace(struct task_struct *task);
53227 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
53228 +
53229 +#ifdef CONFIG_GRKERNSEC
53230 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
53231 +void gr_handle_vm86(void);
53232 +void gr_handle_mem_readwrite(u64 from, u64 to);
53233 +
53234 +extern int grsec_enable_dmesg;
53235 +extern int grsec_disable_privio;
53236 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53237 +extern int grsec_enable_chroot_findtask;
53238 +#endif
53239 +#endif
53240 +
53241 +#endif
53242 diff -urNp linux-2.6.39.4/include/linux/grsock.h linux-2.6.39.4/include/linux/grsock.h
53243 --- linux-2.6.39.4/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
53244 +++ linux-2.6.39.4/include/linux/grsock.h 2011-08-05 19:44:37.000000000 -0400
53245 @@ -0,0 +1,19 @@
53246 +#ifndef __GRSOCK_H
53247 +#define __GRSOCK_H
53248 +
53249 +extern void gr_attach_curr_ip(const struct sock *sk);
53250 +extern int gr_handle_sock_all(const int family, const int type,
53251 + const int protocol);
53252 +extern int gr_handle_sock_server(const struct sockaddr *sck);
53253 +extern int gr_handle_sock_server_other(const struct sock *sck);
53254 +extern int gr_handle_sock_client(const struct sockaddr *sck);
53255 +extern int gr_search_connect(struct socket * sock,
53256 + struct sockaddr_in * addr);
53257 +extern int gr_search_bind(struct socket * sock,
53258 + struct sockaddr_in * addr);
53259 +extern int gr_search_listen(struct socket * sock);
53260 +extern int gr_search_accept(struct socket * sock);
53261 +extern int gr_search_socket(const int domain, const int type,
53262 + const int protocol);
53263 +
53264 +#endif
53265 diff -urNp linux-2.6.39.4/include/linux/highmem.h linux-2.6.39.4/include/linux/highmem.h
53266 --- linux-2.6.39.4/include/linux/highmem.h 2011-05-19 00:06:34.000000000 -0400
53267 +++ linux-2.6.39.4/include/linux/highmem.h 2011-08-05 19:44:37.000000000 -0400
53268 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct
53269 kunmap_atomic(kaddr, KM_USER0);
53270 }
53271
53272 +static inline void sanitize_highpage(struct page *page)
53273 +{
53274 + void *kaddr;
53275 + unsigned long flags;
53276 +
53277 + local_irq_save(flags);
53278 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
53279 + clear_page(kaddr);
53280 + kunmap_atomic(kaddr, KM_CLEARPAGE);
53281 + local_irq_restore(flags);
53282 +}
53283 +
53284 static inline void zero_user_segments(struct page *page,
53285 unsigned start1, unsigned end1,
53286 unsigned start2, unsigned end2)
53287 diff -urNp linux-2.6.39.4/include/linux/i2c.h linux-2.6.39.4/include/linux/i2c.h
53288 --- linux-2.6.39.4/include/linux/i2c.h 2011-05-19 00:06:34.000000000 -0400
53289 +++ linux-2.6.39.4/include/linux/i2c.h 2011-08-05 20:34:06.000000000 -0400
53290 @@ -346,6 +346,7 @@ struct i2c_algorithm {
53291 /* To determine what the adapter supports */
53292 u32 (*functionality) (struct i2c_adapter *);
53293 };
53294 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
53295
53296 /*
53297 * i2c_adapter is the structure used to identify a physical i2c bus along
53298 diff -urNp linux-2.6.39.4/include/linux/i2o.h linux-2.6.39.4/include/linux/i2o.h
53299 --- linux-2.6.39.4/include/linux/i2o.h 2011-05-19 00:06:34.000000000 -0400
53300 +++ linux-2.6.39.4/include/linux/i2o.h 2011-08-05 19:44:37.000000000 -0400
53301 @@ -564,7 +564,7 @@ struct i2o_controller {
53302 struct i2o_device *exec; /* Executive */
53303 #if BITS_PER_LONG == 64
53304 spinlock_t context_list_lock; /* lock for context_list */
53305 - atomic_t context_list_counter; /* needed for unique contexts */
53306 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
53307 struct list_head context_list; /* list of context id's
53308 and pointers */
53309 #endif
53310 diff -urNp linux-2.6.39.4/include/linux/init.h linux-2.6.39.4/include/linux/init.h
53311 --- linux-2.6.39.4/include/linux/init.h 2011-05-19 00:06:34.000000000 -0400
53312 +++ linux-2.6.39.4/include/linux/init.h 2011-08-05 19:44:37.000000000 -0400
53313 @@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
53314
53315 /* Each module must use one module_init(). */
53316 #define module_init(initfn) \
53317 - static inline initcall_t __inittest(void) \
53318 + static inline __used initcall_t __inittest(void) \
53319 { return initfn; } \
53320 int init_module(void) __attribute__((alias(#initfn)));
53321
53322 /* This is only required if you want to be unloadable. */
53323 #define module_exit(exitfn) \
53324 - static inline exitcall_t __exittest(void) \
53325 + static inline __used exitcall_t __exittest(void) \
53326 { return exitfn; } \
53327 void cleanup_module(void) __attribute__((alias(#exitfn)));
53328
53329 diff -urNp linux-2.6.39.4/include/linux/init_task.h linux-2.6.39.4/include/linux/init_task.h
53330 --- linux-2.6.39.4/include/linux/init_task.h 2011-05-19 00:06:34.000000000 -0400
53331 +++ linux-2.6.39.4/include/linux/init_task.h 2011-08-05 19:44:37.000000000 -0400
53332 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
53333 #define INIT_IDS
53334 #endif
53335
53336 +#ifdef CONFIG_X86
53337 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
53338 +#else
53339 +#define INIT_TASK_THREAD_INFO
53340 +#endif
53341 +
53342 /*
53343 * Because of the reduced scope of CAP_SETPCAP when filesystem
53344 * capabilities are in effect, it is safe to allow CAP_SETPCAP to
53345 @@ -163,6 +169,7 @@ extern struct cred init_cred;
53346 RCU_INIT_POINTER(.cred, &init_cred), \
53347 .comm = "swapper", \
53348 .thread = INIT_THREAD, \
53349 + INIT_TASK_THREAD_INFO \
53350 .fs = &init_fs, \
53351 .files = &init_files, \
53352 .signal = &init_signals, \
53353 diff -urNp linux-2.6.39.4/include/linux/intel-iommu.h linux-2.6.39.4/include/linux/intel-iommu.h
53354 --- linux-2.6.39.4/include/linux/intel-iommu.h 2011-05-19 00:06:34.000000000 -0400
53355 +++ linux-2.6.39.4/include/linux/intel-iommu.h 2011-08-05 20:34:06.000000000 -0400
53356 @@ -296,7 +296,7 @@ struct iommu_flush {
53357 u8 fm, u64 type);
53358 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
53359 unsigned int size_order, u64 type);
53360 -};
53361 +} __no_const;
53362
53363 enum {
53364 SR_DMAR_FECTL_REG,
53365 diff -urNp linux-2.6.39.4/include/linux/interrupt.h linux-2.6.39.4/include/linux/interrupt.h
53366 --- linux-2.6.39.4/include/linux/interrupt.h 2011-05-19 00:06:34.000000000 -0400
53367 +++ linux-2.6.39.4/include/linux/interrupt.h 2011-08-05 19:44:37.000000000 -0400
53368 @@ -422,7 +422,7 @@ enum
53369 /* map softirq index to softirq name. update 'softirq_to_name' in
53370 * kernel/softirq.c when adding a new softirq.
53371 */
53372 -extern char *softirq_to_name[NR_SOFTIRQS];
53373 +extern const char * const softirq_to_name[NR_SOFTIRQS];
53374
53375 /* softirq mask and active fields moved to irq_cpustat_t in
53376 * asm/hardirq.h to get better cache usage. KAO
53377 @@ -430,12 +430,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
53378
53379 struct softirq_action
53380 {
53381 - void (*action)(struct softirq_action *);
53382 + void (*action)(void);
53383 };
53384
53385 asmlinkage void do_softirq(void);
53386 asmlinkage void __do_softirq(void);
53387 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
53388 +extern void open_softirq(int nr, void (*action)(void));
53389 extern void softirq_init(void);
53390 static inline void __raise_softirq_irqoff(unsigned int nr)
53391 {
53392 diff -urNp linux-2.6.39.4/include/linux/kallsyms.h linux-2.6.39.4/include/linux/kallsyms.h
53393 --- linux-2.6.39.4/include/linux/kallsyms.h 2011-05-19 00:06:34.000000000 -0400
53394 +++ linux-2.6.39.4/include/linux/kallsyms.h 2011-08-05 19:44:37.000000000 -0400
53395 @@ -15,7 +15,8 @@
53396
53397 struct module;
53398
53399 -#ifdef CONFIG_KALLSYMS
53400 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
53401 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53402 /* Lookup the address for a symbol. Returns 0 if not found. */
53403 unsigned long kallsyms_lookup_name(const char *name);
53404
53405 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
53406 /* Stupid that this does nothing, but I didn't create this mess. */
53407 #define __print_symbol(fmt, addr)
53408 #endif /*CONFIG_KALLSYMS*/
53409 +#else /* when included by kallsyms.c, vsnprintf.c, or
53410 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
53411 +extern void __print_symbol(const char *fmt, unsigned long address);
53412 +extern int sprint_backtrace(char *buffer, unsigned long address);
53413 +extern int sprint_symbol(char *buffer, unsigned long address);
53414 +const char *kallsyms_lookup(unsigned long addr,
53415 + unsigned long *symbolsize,
53416 + unsigned long *offset,
53417 + char **modname, char *namebuf);
53418 +#endif
53419
53420 /* This macro allows us to keep printk typechecking */
53421 static void __check_printsym_format(const char *fmt, ...)
53422 diff -urNp linux-2.6.39.4/include/linux/kgdb.h linux-2.6.39.4/include/linux/kgdb.h
53423 --- linux-2.6.39.4/include/linux/kgdb.h 2011-05-19 00:06:34.000000000 -0400
53424 +++ linux-2.6.39.4/include/linux/kgdb.h 2011-08-05 20:34:06.000000000 -0400
53425 @@ -53,7 +53,7 @@ extern int kgdb_connected;
53426 extern int kgdb_io_module_registered;
53427
53428 extern atomic_t kgdb_setting_breakpoint;
53429 -extern atomic_t kgdb_cpu_doing_single_step;
53430 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
53431
53432 extern struct task_struct *kgdb_usethread;
53433 extern struct task_struct *kgdb_contthread;
53434 @@ -241,8 +241,8 @@ extern void kgdb_arch_late(void);
53435 * hardware debug registers.
53436 */
53437 struct kgdb_arch {
53438 - unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
53439 - unsigned long flags;
53440 + const unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
53441 + const unsigned long flags;
53442
53443 int (*set_breakpoint)(unsigned long, char *);
53444 int (*remove_breakpoint)(unsigned long, char *);
53445 @@ -268,14 +268,14 @@ struct kgdb_arch {
53446 * not a console
53447 */
53448 struct kgdb_io {
53449 - const char *name;
53450 + const char * const name;
53451 int (*read_char) (void);
53452 void (*write_char) (u8);
53453 void (*flush) (void);
53454 int (*init) (void);
53455 void (*pre_exception) (void);
53456 void (*post_exception) (void);
53457 - int is_console;
53458 + const int is_console;
53459 };
53460
53461 extern struct kgdb_arch arch_kgdb_ops;
53462 diff -urNp linux-2.6.39.4/include/linux/kmod.h linux-2.6.39.4/include/linux/kmod.h
53463 --- linux-2.6.39.4/include/linux/kmod.h 2011-05-19 00:06:34.000000000 -0400
53464 +++ linux-2.6.39.4/include/linux/kmod.h 2011-08-05 19:44:37.000000000 -0400
53465 @@ -33,6 +33,8 @@ extern char modprobe_path[]; /* for sysc
53466 * usually useless though. */
53467 extern int __request_module(bool wait, const char *name, ...) \
53468 __attribute__((format(printf, 2, 3)));
53469 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
53470 + __attribute__((format(printf, 3, 4)));
53471 #define request_module(mod...) __request_module(true, mod)
53472 #define request_module_nowait(mod...) __request_module(false, mod)
53473 #define try_then_request_module(x, mod...) \
53474 diff -urNp linux-2.6.39.4/include/linux/kvm_host.h linux-2.6.39.4/include/linux/kvm_host.h
53475 --- linux-2.6.39.4/include/linux/kvm_host.h 2011-05-19 00:06:34.000000000 -0400
53476 +++ linux-2.6.39.4/include/linux/kvm_host.h 2011-08-05 19:44:37.000000000 -0400
53477 @@ -302,7 +302,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
53478 void vcpu_load(struct kvm_vcpu *vcpu);
53479 void vcpu_put(struct kvm_vcpu *vcpu);
53480
53481 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
53482 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
53483 struct module *module);
53484 void kvm_exit(void);
53485
53486 @@ -442,7 +442,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
53487 struct kvm_guest_debug *dbg);
53488 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
53489
53490 -int kvm_arch_init(void *opaque);
53491 +int kvm_arch_init(const void *opaque);
53492 void kvm_arch_exit(void);
53493
53494 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
53495 diff -urNp linux-2.6.39.4/include/linux/libata.h linux-2.6.39.4/include/linux/libata.h
53496 --- linux-2.6.39.4/include/linux/libata.h 2011-05-19 00:06:34.000000000 -0400
53497 +++ linux-2.6.39.4/include/linux/libata.h 2011-08-05 20:34:06.000000000 -0400
53498 @@ -898,7 +898,7 @@ struct ata_port_operations {
53499 * ->inherits must be the last field and all the preceding
53500 * fields must be pointers.
53501 */
53502 - const struct ata_port_operations *inherits;
53503 + const struct ata_port_operations * const inherits;
53504 };
53505
53506 struct ata_port_info {
53507 diff -urNp linux-2.6.39.4/include/linux/mca.h linux-2.6.39.4/include/linux/mca.h
53508 --- linux-2.6.39.4/include/linux/mca.h 2011-05-19 00:06:34.000000000 -0400
53509 +++ linux-2.6.39.4/include/linux/mca.h 2011-08-05 20:34:06.000000000 -0400
53510 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
53511 int region);
53512 void * (*mca_transform_memory)(struct mca_device *,
53513 void *memory);
53514 -};
53515 +} __no_const;
53516
53517 struct mca_bus {
53518 u64 default_dma_mask;
53519 diff -urNp linux-2.6.39.4/include/linux/memory.h linux-2.6.39.4/include/linux/memory.h
53520 --- linux-2.6.39.4/include/linux/memory.h 2011-05-19 00:06:34.000000000 -0400
53521 +++ linux-2.6.39.4/include/linux/memory.h 2011-08-05 20:34:06.000000000 -0400
53522 @@ -142,7 +142,7 @@ struct memory_accessor {
53523 size_t count);
53524 ssize_t (*write)(struct memory_accessor *, const char *buf,
53525 off_t offset, size_t count);
53526 -};
53527 +} __no_const;
53528
53529 /*
53530 * Kernel text modification mutex, used for code patching. Users of this lock
53531 diff -urNp linux-2.6.39.4/include/linux/mfd/abx500.h linux-2.6.39.4/include/linux/mfd/abx500.h
53532 --- linux-2.6.39.4/include/linux/mfd/abx500.h 2011-05-19 00:06:34.000000000 -0400
53533 +++ linux-2.6.39.4/include/linux/mfd/abx500.h 2011-08-05 20:34:06.000000000 -0400
53534 @@ -226,6 +226,7 @@ struct abx500_ops {
53535 int (*event_registers_startup_state_get) (struct device *, u8 *);
53536 int (*startup_irq_enabled) (struct device *, unsigned int);
53537 };
53538 +typedef struct abx500_ops __no_const abx500_ops_no_const;
53539
53540 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
53541 void abx500_remove_ops(struct device *dev);
53542 diff -urNp linux-2.6.39.4/include/linux/mm.h linux-2.6.39.4/include/linux/mm.h
53543 --- linux-2.6.39.4/include/linux/mm.h 2011-05-19 00:06:34.000000000 -0400
53544 +++ linux-2.6.39.4/include/linux/mm.h 2011-08-05 19:44:37.000000000 -0400
53545 @@ -113,7 +113,14 @@ extern unsigned int kobjsize(const void
53546
53547 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
53548 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
53549 +
53550 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
53551 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
53552 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
53553 +#else
53554 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
53555 +#endif
53556 +
53557 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
53558 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
53559
53560 @@ -1010,34 +1017,6 @@ int set_page_dirty(struct page *page);
53561 int set_page_dirty_lock(struct page *page);
53562 int clear_page_dirty_for_io(struct page *page);
53563
53564 -/* Is the vma a continuation of the stack vma above it? */
53565 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
53566 -{
53567 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
53568 -}
53569 -
53570 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
53571 - unsigned long addr)
53572 -{
53573 - return (vma->vm_flags & VM_GROWSDOWN) &&
53574 - (vma->vm_start == addr) &&
53575 - !vma_growsdown(vma->vm_prev, addr);
53576 -}
53577 -
53578 -/* Is the vma a continuation of the stack vma below it? */
53579 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
53580 -{
53581 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
53582 -}
53583 -
53584 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
53585 - unsigned long addr)
53586 -{
53587 - return (vma->vm_flags & VM_GROWSUP) &&
53588 - (vma->vm_end == addr) &&
53589 - !vma_growsup(vma->vm_next, addr);
53590 -}
53591 -
53592 extern unsigned long move_page_tables(struct vm_area_struct *vma,
53593 unsigned long old_addr, struct vm_area_struct *new_vma,
53594 unsigned long new_addr, unsigned long len);
53595 @@ -1189,6 +1168,15 @@ struct shrinker {
53596 extern void register_shrinker(struct shrinker *);
53597 extern void unregister_shrinker(struct shrinker *);
53598
53599 +#ifdef CONFIG_MMU
53600 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
53601 +#else
53602 +static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
53603 +{
53604 + return __pgprot(0);
53605 +}
53606 +#endif
53607 +
53608 int vma_wants_writenotify(struct vm_area_struct *vma);
53609
53610 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
53611 @@ -1476,6 +1464,7 @@ out:
53612 }
53613
53614 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
53615 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
53616
53617 extern unsigned long do_brk(unsigned long, unsigned long);
53618
53619 @@ -1532,6 +1521,10 @@ extern struct vm_area_struct * find_vma(
53620 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
53621 struct vm_area_struct **pprev);
53622
53623 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
53624 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
53625 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
53626 +
53627 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
53628 NULL if none. Assume start_addr < end_addr. */
53629 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
53630 @@ -1548,15 +1541,6 @@ static inline unsigned long vma_pages(st
53631 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
53632 }
53633
53634 -#ifdef CONFIG_MMU
53635 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
53636 -#else
53637 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
53638 -{
53639 - return __pgprot(0);
53640 -}
53641 -#endif
53642 -
53643 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
53644 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
53645 unsigned long pfn, unsigned long size, pgprot_t);
53646 @@ -1668,7 +1652,7 @@ extern int unpoison_memory(unsigned long
53647 extern int sysctl_memory_failure_early_kill;
53648 extern int sysctl_memory_failure_recovery;
53649 extern void shake_page(struct page *p, int access);
53650 -extern atomic_long_t mce_bad_pages;
53651 +extern atomic_long_unchecked_t mce_bad_pages;
53652 extern int soft_offline_page(struct page *page, int flags);
53653
53654 extern void dump_page(struct page *page);
53655 @@ -1682,5 +1666,11 @@ extern void copy_user_huge_page(struct p
53656 unsigned int pages_per_huge_page);
53657 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
53658
53659 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53660 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
53661 +#else
53662 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
53663 +#endif
53664 +
53665 #endif /* __KERNEL__ */
53666 #endif /* _LINUX_MM_H */
53667 diff -urNp linux-2.6.39.4/include/linux/mm_types.h linux-2.6.39.4/include/linux/mm_types.h
53668 --- linux-2.6.39.4/include/linux/mm_types.h 2011-05-19 00:06:34.000000000 -0400
53669 +++ linux-2.6.39.4/include/linux/mm_types.h 2011-08-05 19:44:37.000000000 -0400
53670 @@ -183,6 +183,8 @@ struct vm_area_struct {
53671 #ifdef CONFIG_NUMA
53672 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
53673 #endif
53674 +
53675 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
53676 };
53677
53678 struct core_thread {
53679 @@ -317,6 +319,24 @@ struct mm_struct {
53680 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
53681 pgtable_t pmd_huge_pte; /* protected by page_table_lock */
53682 #endif
53683 +
53684 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53685 + unsigned long pax_flags;
53686 +#endif
53687 +
53688 +#ifdef CONFIG_PAX_DLRESOLVE
53689 + unsigned long call_dl_resolve;
53690 +#endif
53691 +
53692 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
53693 + unsigned long call_syscall;
53694 +#endif
53695 +
53696 +#ifdef CONFIG_PAX_ASLR
53697 + unsigned long delta_mmap; /* randomized offset */
53698 + unsigned long delta_stack; /* randomized offset */
53699 +#endif
53700 +
53701 };
53702
53703 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
53704 diff -urNp linux-2.6.39.4/include/linux/mmu_notifier.h linux-2.6.39.4/include/linux/mmu_notifier.h
53705 --- linux-2.6.39.4/include/linux/mmu_notifier.h 2011-05-19 00:06:34.000000000 -0400
53706 +++ linux-2.6.39.4/include/linux/mmu_notifier.h 2011-08-05 19:44:37.000000000 -0400
53707 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
53708 */
53709 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
53710 ({ \
53711 - pte_t __pte; \
53712 + pte_t ___pte; \
53713 struct vm_area_struct *___vma = __vma; \
53714 unsigned long ___address = __address; \
53715 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
53716 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
53717 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
53718 - __pte; \
53719 + ___pte; \
53720 })
53721
53722 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
53723 diff -urNp linux-2.6.39.4/include/linux/mmzone.h linux-2.6.39.4/include/linux/mmzone.h
53724 --- linux-2.6.39.4/include/linux/mmzone.h 2011-05-19 00:06:34.000000000 -0400
53725 +++ linux-2.6.39.4/include/linux/mmzone.h 2011-08-05 19:44:37.000000000 -0400
53726 @@ -355,7 +355,7 @@ struct zone {
53727 unsigned long flags; /* zone flags, see below */
53728
53729 /* Zone statistics */
53730 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53731 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53732
53733 /*
53734 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
53735 diff -urNp linux-2.6.39.4/include/linux/mod_devicetable.h linux-2.6.39.4/include/linux/mod_devicetable.h
53736 --- linux-2.6.39.4/include/linux/mod_devicetable.h 2011-05-19 00:06:34.000000000 -0400
53737 +++ linux-2.6.39.4/include/linux/mod_devicetable.h 2011-08-05 19:44:37.000000000 -0400
53738 @@ -12,7 +12,7 @@
53739 typedef unsigned long kernel_ulong_t;
53740 #endif
53741
53742 -#define PCI_ANY_ID (~0)
53743 +#define PCI_ANY_ID ((__u16)~0)
53744
53745 struct pci_device_id {
53746 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
53747 @@ -131,7 +131,7 @@ struct usb_device_id {
53748 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
53749 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
53750
53751 -#define HID_ANY_ID (~0)
53752 +#define HID_ANY_ID (~0U)
53753
53754 struct hid_device_id {
53755 __u16 bus;
53756 diff -urNp linux-2.6.39.4/include/linux/module.h linux-2.6.39.4/include/linux/module.h
53757 --- linux-2.6.39.4/include/linux/module.h 2011-05-19 00:06:34.000000000 -0400
53758 +++ linux-2.6.39.4/include/linux/module.h 2011-08-05 20:34:06.000000000 -0400
53759 @@ -16,6 +16,7 @@
53760 #include <linux/kobject.h>
53761 #include <linux/moduleparam.h>
53762 #include <linux/tracepoint.h>
53763 +#include <linux/fs.h>
53764
53765 #include <linux/percpu.h>
53766 #include <asm/module.h>
53767 @@ -324,19 +325,16 @@ struct module
53768 int (*init)(void);
53769
53770 /* If this is non-NULL, vfree after init() returns */
53771 - void *module_init;
53772 + void *module_init_rx, *module_init_rw;
53773
53774 /* Here is the actual code + data, vfree'd on unload. */
53775 - void *module_core;
53776 + void *module_core_rx, *module_core_rw;
53777
53778 /* Here are the sizes of the init and core sections */
53779 - unsigned int init_size, core_size;
53780 + unsigned int init_size_rw, core_size_rw;
53781
53782 /* The size of the executable code in each section. */
53783 - unsigned int init_text_size, core_text_size;
53784 -
53785 - /* Size of RO sections of the module (text+rodata) */
53786 - unsigned int init_ro_size, core_ro_size;
53787 + unsigned int init_size_rx, core_size_rx;
53788
53789 /* Arch-specific module values */
53790 struct mod_arch_specific arch;
53791 @@ -391,6 +389,10 @@ struct module
53792 #ifdef CONFIG_EVENT_TRACING
53793 struct ftrace_event_call **trace_events;
53794 unsigned int num_trace_events;
53795 + struct file_operations trace_id;
53796 + struct file_operations trace_enable;
53797 + struct file_operations trace_format;
53798 + struct file_operations trace_filter;
53799 #endif
53800 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
53801 unsigned long *ftrace_callsites;
53802 @@ -441,16 +443,46 @@ bool is_module_address(unsigned long add
53803 bool is_module_percpu_address(unsigned long addr);
53804 bool is_module_text_address(unsigned long addr);
53805
53806 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
53807 +{
53808 +
53809 +#ifdef CONFIG_PAX_KERNEXEC
53810 + if (ktla_ktva(addr) >= (unsigned long)start &&
53811 + ktla_ktva(addr) < (unsigned long)start + size)
53812 + return 1;
53813 +#endif
53814 +
53815 + return ((void *)addr >= start && (void *)addr < start + size);
53816 +}
53817 +
53818 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
53819 +{
53820 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
53821 +}
53822 +
53823 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
53824 +{
53825 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
53826 +}
53827 +
53828 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
53829 +{
53830 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
53831 +}
53832 +
53833 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
53834 +{
53835 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
53836 +}
53837 +
53838 static inline int within_module_core(unsigned long addr, struct module *mod)
53839 {
53840 - return (unsigned long)mod->module_core <= addr &&
53841 - addr < (unsigned long)mod->module_core + mod->core_size;
53842 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
53843 }
53844
53845 static inline int within_module_init(unsigned long addr, struct module *mod)
53846 {
53847 - return (unsigned long)mod->module_init <= addr &&
53848 - addr < (unsigned long)mod->module_init + mod->init_size;
53849 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
53850 }
53851
53852 /* Search for module by name: must hold module_mutex. */
53853 diff -urNp linux-2.6.39.4/include/linux/moduleloader.h linux-2.6.39.4/include/linux/moduleloader.h
53854 --- linux-2.6.39.4/include/linux/moduleloader.h 2011-05-19 00:06:34.000000000 -0400
53855 +++ linux-2.6.39.4/include/linux/moduleloader.h 2011-08-05 19:44:37.000000000 -0400
53856 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
53857 sections. Returns NULL on failure. */
53858 void *module_alloc(unsigned long size);
53859
53860 +#ifdef CONFIG_PAX_KERNEXEC
53861 +void *module_alloc_exec(unsigned long size);
53862 +#else
53863 +#define module_alloc_exec(x) module_alloc(x)
53864 +#endif
53865 +
53866 /* Free memory returned from module_alloc. */
53867 void module_free(struct module *mod, void *module_region);
53868
53869 +#ifdef CONFIG_PAX_KERNEXEC
53870 +void module_free_exec(struct module *mod, void *module_region);
53871 +#else
53872 +#define module_free_exec(x, y) module_free((x), (y))
53873 +#endif
53874 +
53875 /* Apply the given relocation to the (simplified) ELF. Return -error
53876 or 0. */
53877 int apply_relocate(Elf_Shdr *sechdrs,
53878 diff -urNp linux-2.6.39.4/include/linux/moduleparam.h linux-2.6.39.4/include/linux/moduleparam.h
53879 --- linux-2.6.39.4/include/linux/moduleparam.h 2011-05-19 00:06:34.000000000 -0400
53880 +++ linux-2.6.39.4/include/linux/moduleparam.h 2011-08-05 20:34:06.000000000 -0400
53881 @@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
53882 * @len is usually just sizeof(string).
53883 */
53884 #define module_param_string(name, string, len, perm) \
53885 - static const struct kparam_string __param_string_##name \
53886 + static const struct kparam_string __param_string_##name __used \
53887 = { len, string }; \
53888 __module_param_call(MODULE_PARAM_PREFIX, name, \
53889 &param_ops_string, \
53890 @@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
53891 * module_param_named() for why this might be necessary.
53892 */
53893 #define module_param_array_named(name, array, type, nump, perm) \
53894 - static const struct kparam_array __param_arr_##name \
53895 + static const struct kparam_array __param_arr_##name __used \
53896 = { ARRAY_SIZE(array), nump, &param_ops_##type, \
53897 sizeof(array[0]), array }; \
53898 __module_param_call(MODULE_PARAM_PREFIX, name, \
53899 diff -urNp linux-2.6.39.4/include/linux/mutex.h linux-2.6.39.4/include/linux/mutex.h
53900 --- linux-2.6.39.4/include/linux/mutex.h 2011-05-19 00:06:34.000000000 -0400
53901 +++ linux-2.6.39.4/include/linux/mutex.h 2011-08-05 19:44:37.000000000 -0400
53902 @@ -51,7 +51,7 @@ struct mutex {
53903 spinlock_t wait_lock;
53904 struct list_head wait_list;
53905 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
53906 - struct thread_info *owner;
53907 + struct task_struct *owner;
53908 #endif
53909 #ifdef CONFIG_DEBUG_MUTEXES
53910 const char *name;
53911 diff -urNp linux-2.6.39.4/include/linux/namei.h linux-2.6.39.4/include/linux/namei.h
53912 --- linux-2.6.39.4/include/linux/namei.h 2011-05-19 00:06:34.000000000 -0400
53913 +++ linux-2.6.39.4/include/linux/namei.h 2011-08-05 19:44:37.000000000 -0400
53914 @@ -24,7 +24,7 @@ struct nameidata {
53915 unsigned seq;
53916 int last_type;
53917 unsigned depth;
53918 - char *saved_names[MAX_NESTED_LINKS + 1];
53919 + const char *saved_names[MAX_NESTED_LINKS + 1];
53920
53921 /* Intent data */
53922 union {
53923 @@ -91,12 +91,12 @@ extern int follow_up(struct path *);
53924 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
53925 extern void unlock_rename(struct dentry *, struct dentry *);
53926
53927 -static inline void nd_set_link(struct nameidata *nd, char *path)
53928 +static inline void nd_set_link(struct nameidata *nd, const char *path)
53929 {
53930 nd->saved_names[nd->depth] = path;
53931 }
53932
53933 -static inline char *nd_get_link(struct nameidata *nd)
53934 +static inline const char *nd_get_link(const struct nameidata *nd)
53935 {
53936 return nd->saved_names[nd->depth];
53937 }
53938 diff -urNp linux-2.6.39.4/include/linux/netdevice.h linux-2.6.39.4/include/linux/netdevice.h
53939 --- linux-2.6.39.4/include/linux/netdevice.h 2011-08-05 21:11:51.000000000 -0400
53940 +++ linux-2.6.39.4/include/linux/netdevice.h 2011-08-05 21:12:20.000000000 -0400
53941 @@ -979,6 +979,7 @@ struct net_device_ops {
53942 int (*ndo_set_features)(struct net_device *dev,
53943 u32 features);
53944 };
53945 +typedef struct net_device_ops __no_const net_device_ops_no_const;
53946
53947 /*
53948 * The DEVICE structure.
53949 diff -urNp linux-2.6.39.4/include/linux/netfilter/xt_gradm.h linux-2.6.39.4/include/linux/netfilter/xt_gradm.h
53950 --- linux-2.6.39.4/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
53951 +++ linux-2.6.39.4/include/linux/netfilter/xt_gradm.h 2011-08-05 19:44:37.000000000 -0400
53952 @@ -0,0 +1,9 @@
53953 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
53954 +#define _LINUX_NETFILTER_XT_GRADM_H 1
53955 +
53956 +struct xt_gradm_mtinfo {
53957 + __u16 flags;
53958 + __u16 invflags;
53959 +};
53960 +
53961 +#endif
53962 diff -urNp linux-2.6.39.4/include/linux/oprofile.h linux-2.6.39.4/include/linux/oprofile.h
53963 --- linux-2.6.39.4/include/linux/oprofile.h 2011-05-19 00:06:34.000000000 -0400
53964 +++ linux-2.6.39.4/include/linux/oprofile.h 2011-08-05 19:44:37.000000000 -0400
53965 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
53966 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
53967 char const * name, ulong * val);
53968
53969 -/** Create a file for read-only access to an atomic_t. */
53970 +/** Create a file for read-only access to an atomic_unchecked_t. */
53971 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
53972 - char const * name, atomic_t * val);
53973 + char const * name, atomic_unchecked_t * val);
53974
53975 /** create a directory */
53976 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
53977 diff -urNp linux-2.6.39.4/include/linux/padata.h linux-2.6.39.4/include/linux/padata.h
53978 --- linux-2.6.39.4/include/linux/padata.h 2011-05-19 00:06:34.000000000 -0400
53979 +++ linux-2.6.39.4/include/linux/padata.h 2011-08-05 19:44:37.000000000 -0400
53980 @@ -129,7 +129,7 @@ struct parallel_data {
53981 struct padata_instance *pinst;
53982 struct padata_parallel_queue __percpu *pqueue;
53983 struct padata_serial_queue __percpu *squeue;
53984 - atomic_t seq_nr;
53985 + atomic_unchecked_t seq_nr;
53986 atomic_t reorder_objects;
53987 atomic_t refcnt;
53988 unsigned int max_seq_nr;
53989 diff -urNp linux-2.6.39.4/include/linux/perf_event.h linux-2.6.39.4/include/linux/perf_event.h
53990 --- linux-2.6.39.4/include/linux/perf_event.h 2011-05-19 00:06:34.000000000 -0400
53991 +++ linux-2.6.39.4/include/linux/perf_event.h 2011-08-05 20:34:06.000000000 -0400
53992 @@ -759,8 +759,8 @@ struct perf_event {
53993
53994 enum perf_event_active_state state;
53995 unsigned int attach_state;
53996 - local64_t count;
53997 - atomic64_t child_count;
53998 + local64_t count; /* PaX: fix it one day */
53999 + atomic64_unchecked_t child_count;
54000
54001 /*
54002 * These are the total time in nanoseconds that the event
54003 @@ -811,8 +811,8 @@ struct perf_event {
54004 * These accumulate total time (in nanoseconds) that children
54005 * events have been enabled and running, respectively.
54006 */
54007 - atomic64_t child_total_time_enabled;
54008 - atomic64_t child_total_time_running;
54009 + atomic64_unchecked_t child_total_time_enabled;
54010 + atomic64_unchecked_t child_total_time_running;
54011
54012 /*
54013 * Protect attach/detach and child_list:
54014 diff -urNp linux-2.6.39.4/include/linux/pipe_fs_i.h linux-2.6.39.4/include/linux/pipe_fs_i.h
54015 --- linux-2.6.39.4/include/linux/pipe_fs_i.h 2011-05-19 00:06:34.000000000 -0400
54016 +++ linux-2.6.39.4/include/linux/pipe_fs_i.h 2011-08-05 19:44:37.000000000 -0400
54017 @@ -46,9 +46,9 @@ struct pipe_buffer {
54018 struct pipe_inode_info {
54019 wait_queue_head_t wait;
54020 unsigned int nrbufs, curbuf, buffers;
54021 - unsigned int readers;
54022 - unsigned int writers;
54023 - unsigned int waiting_writers;
54024 + atomic_t readers;
54025 + atomic_t writers;
54026 + atomic_t waiting_writers;
54027 unsigned int r_counter;
54028 unsigned int w_counter;
54029 struct page *tmp_page;
54030 diff -urNp linux-2.6.39.4/include/linux/pm_runtime.h linux-2.6.39.4/include/linux/pm_runtime.h
54031 --- linux-2.6.39.4/include/linux/pm_runtime.h 2011-05-19 00:06:34.000000000 -0400
54032 +++ linux-2.6.39.4/include/linux/pm_runtime.h 2011-08-05 19:44:37.000000000 -0400
54033 @@ -94,7 +94,7 @@ static inline bool pm_runtime_callbacks_
54034
54035 static inline void pm_runtime_mark_last_busy(struct device *dev)
54036 {
54037 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
54038 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
54039 }
54040
54041 #else /* !CONFIG_PM_RUNTIME */
54042 diff -urNp linux-2.6.39.4/include/linux/poison.h linux-2.6.39.4/include/linux/poison.h
54043 --- linux-2.6.39.4/include/linux/poison.h 2011-05-19 00:06:34.000000000 -0400
54044 +++ linux-2.6.39.4/include/linux/poison.h 2011-08-05 19:44:37.000000000 -0400
54045 @@ -19,8 +19,8 @@
54046 * under normal circumstances, used to verify that nobody uses
54047 * non-initialized list entries.
54048 */
54049 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
54050 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
54051 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
54052 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
54053
54054 /********** include/linux/timer.h **********/
54055 /*
54056 diff -urNp linux-2.6.39.4/include/linux/preempt.h linux-2.6.39.4/include/linux/preempt.h
54057 --- linux-2.6.39.4/include/linux/preempt.h 2011-05-19 00:06:34.000000000 -0400
54058 +++ linux-2.6.39.4/include/linux/preempt.h 2011-08-05 20:34:06.000000000 -0400
54059 @@ -115,7 +115,7 @@ struct preempt_ops {
54060 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
54061 void (*sched_out)(struct preempt_notifier *notifier,
54062 struct task_struct *next);
54063 -};
54064 +} __no_const;
54065
54066 /**
54067 * preempt_notifier - key for installing preemption notifiers
54068 diff -urNp linux-2.6.39.4/include/linux/proc_fs.h linux-2.6.39.4/include/linux/proc_fs.h
54069 --- linux-2.6.39.4/include/linux/proc_fs.h 2011-05-19 00:06:34.000000000 -0400
54070 +++ linux-2.6.39.4/include/linux/proc_fs.h 2011-08-05 20:34:06.000000000 -0400
54071 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
54072 return proc_create_data(name, mode, parent, proc_fops, NULL);
54073 }
54074
54075 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
54076 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
54077 +{
54078 +#ifdef CONFIG_GRKERNSEC_PROC_USER
54079 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
54080 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54081 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
54082 +#else
54083 + return proc_create_data(name, mode, parent, proc_fops, NULL);
54084 +#endif
54085 +}
54086 +
54087 +
54088 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
54089 mode_t mode, struct proc_dir_entry *base,
54090 read_proc_t *read_proc, void * data)
54091 @@ -258,7 +271,7 @@ union proc_op {
54092 int (*proc_show)(struct seq_file *m,
54093 struct pid_namespace *ns, struct pid *pid,
54094 struct task_struct *task);
54095 -};
54096 +} __no_const;
54097
54098 struct ctl_table_header;
54099 struct ctl_table;
54100 diff -urNp linux-2.6.39.4/include/linux/ptrace.h linux-2.6.39.4/include/linux/ptrace.h
54101 --- linux-2.6.39.4/include/linux/ptrace.h 2011-05-19 00:06:34.000000000 -0400
54102 +++ linux-2.6.39.4/include/linux/ptrace.h 2011-08-05 19:44:37.000000000 -0400
54103 @@ -115,10 +115,10 @@ extern void __ptrace_unlink(struct task_
54104 extern void exit_ptrace(struct task_struct *tracer);
54105 #define PTRACE_MODE_READ 1
54106 #define PTRACE_MODE_ATTACH 2
54107 -/* Returns 0 on success, -errno on denial. */
54108 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
54109 /* Returns true on success, false on denial. */
54110 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
54111 +/* Returns true on success, false on denial. */
54112 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
54113
54114 static inline int ptrace_reparented(struct task_struct *child)
54115 {
54116 diff -urNp linux-2.6.39.4/include/linux/random.h linux-2.6.39.4/include/linux/random.h
54117 --- linux-2.6.39.4/include/linux/random.h 2011-05-19 00:06:34.000000000 -0400
54118 +++ linux-2.6.39.4/include/linux/random.h 2011-08-05 19:44:37.000000000 -0400
54119 @@ -80,12 +80,17 @@ void srandom32(u32 seed);
54120
54121 u32 prandom32(struct rnd_state *);
54122
54123 +static inline unsigned long pax_get_random_long(void)
54124 +{
54125 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
54126 +}
54127 +
54128 /*
54129 * Handle minimum values for seeds
54130 */
54131 static inline u32 __seed(u32 x, u32 m)
54132 {
54133 - return (x < m) ? x + m : x;
54134 + return (x <= m) ? x + m + 1 : x;
54135 }
54136
54137 /**
54138 diff -urNp linux-2.6.39.4/include/linux/reboot.h linux-2.6.39.4/include/linux/reboot.h
54139 --- linux-2.6.39.4/include/linux/reboot.h 2011-05-19 00:06:34.000000000 -0400
54140 +++ linux-2.6.39.4/include/linux/reboot.h 2011-08-05 19:44:37.000000000 -0400
54141 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
54142 * Architecture-specific implementations of sys_reboot commands.
54143 */
54144
54145 -extern void machine_restart(char *cmd);
54146 -extern void machine_halt(void);
54147 -extern void machine_power_off(void);
54148 +extern void machine_restart(char *cmd) __noreturn;
54149 +extern void machine_halt(void) __noreturn;
54150 +extern void machine_power_off(void) __noreturn;
54151
54152 extern void machine_shutdown(void);
54153 struct pt_regs;
54154 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
54155 */
54156
54157 extern void kernel_restart_prepare(char *cmd);
54158 -extern void kernel_restart(char *cmd);
54159 -extern void kernel_halt(void);
54160 -extern void kernel_power_off(void);
54161 +extern void kernel_restart(char *cmd) __noreturn;
54162 +extern void kernel_halt(void) __noreturn;
54163 +extern void kernel_power_off(void) __noreturn;
54164
54165 extern int C_A_D; /* for sysctl */
54166 void ctrl_alt_del(void);
54167 @@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
54168 * Emergency restart, callable from an interrupt handler.
54169 */
54170
54171 -extern void emergency_restart(void);
54172 +extern void emergency_restart(void) __noreturn;
54173 #include <asm/emergency-restart.h>
54174
54175 #endif
54176 diff -urNp linux-2.6.39.4/include/linux/reiserfs_fs.h linux-2.6.39.4/include/linux/reiserfs_fs.h
54177 --- linux-2.6.39.4/include/linux/reiserfs_fs.h 2011-05-19 00:06:34.000000000 -0400
54178 +++ linux-2.6.39.4/include/linux/reiserfs_fs.h 2011-08-05 20:34:06.000000000 -0400
54179 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
54180 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
54181
54182 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
54183 -#define get_generation(s) atomic_read (&fs_generation(s))
54184 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
54185 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
54186 #define __fs_changed(gen,s) (gen != get_generation (s))
54187 #define fs_changed(gen,s) \
54188 diff -urNp linux-2.6.39.4/include/linux/reiserfs_fs_sb.h linux-2.6.39.4/include/linux/reiserfs_fs_sb.h
54189 --- linux-2.6.39.4/include/linux/reiserfs_fs_sb.h 2011-05-19 00:06:34.000000000 -0400
54190 +++ linux-2.6.39.4/include/linux/reiserfs_fs_sb.h 2011-08-05 19:44:37.000000000 -0400
54191 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
54192 /* Comment? -Hans */
54193 wait_queue_head_t s_wait;
54194 /* To be obsoleted soon by per buffer seals.. -Hans */
54195 - atomic_t s_generation_counter; // increased by one every time the
54196 + atomic_unchecked_t s_generation_counter; // increased by one every time the
54197 // tree gets re-balanced
54198 unsigned long s_properties; /* File system properties. Currently holds
54199 on-disk FS format */
54200 diff -urNp linux-2.6.39.4/include/linux/relay.h linux-2.6.39.4/include/linux/relay.h
54201 --- linux-2.6.39.4/include/linux/relay.h 2011-05-19 00:06:34.000000000 -0400
54202 +++ linux-2.6.39.4/include/linux/relay.h 2011-08-05 20:34:06.000000000 -0400
54203 @@ -159,7 +159,7 @@ struct rchan_callbacks
54204 * The callback should return 0 if successful, negative if not.
54205 */
54206 int (*remove_buf_file)(struct dentry *dentry);
54207 -};
54208 +} __no_const;
54209
54210 /*
54211 * CONFIG_RELAY kernel API, kernel/relay.c
54212 diff -urNp linux-2.6.39.4/include/linux/rfkill.h linux-2.6.39.4/include/linux/rfkill.h
54213 --- linux-2.6.39.4/include/linux/rfkill.h 2011-05-19 00:06:34.000000000 -0400
54214 +++ linux-2.6.39.4/include/linux/rfkill.h 2011-08-05 20:34:06.000000000 -0400
54215 @@ -147,6 +147,7 @@ struct rfkill_ops {
54216 void (*query)(struct rfkill *rfkill, void *data);
54217 int (*set_block)(void *data, bool blocked);
54218 };
54219 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
54220
54221 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
54222 /**
54223 diff -urNp linux-2.6.39.4/include/linux/rmap.h linux-2.6.39.4/include/linux/rmap.h
54224 --- linux-2.6.39.4/include/linux/rmap.h 2011-05-19 00:06:34.000000000 -0400
54225 +++ linux-2.6.39.4/include/linux/rmap.h 2011-08-05 19:44:37.000000000 -0400
54226 @@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
54227 void anon_vma_init(void); /* create anon_vma_cachep */
54228 int anon_vma_prepare(struct vm_area_struct *);
54229 void unlink_anon_vmas(struct vm_area_struct *);
54230 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
54231 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
54232 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
54233 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
54234 void __anon_vma_link(struct vm_area_struct *);
54235
54236 static inline void anon_vma_merge(struct vm_area_struct *vma,
54237 diff -urNp linux-2.6.39.4/include/linux/sched.h linux-2.6.39.4/include/linux/sched.h
54238 --- linux-2.6.39.4/include/linux/sched.h 2011-05-19 00:06:34.000000000 -0400
54239 +++ linux-2.6.39.4/include/linux/sched.h 2011-08-05 20:34:06.000000000 -0400
54240 @@ -100,6 +100,7 @@ struct bio_list;
54241 struct fs_struct;
54242 struct perf_event_context;
54243 struct blk_plug;
54244 +struct linux_binprm;
54245
54246 /*
54247 * List of flags we want to share for kernel threads,
54248 @@ -360,7 +361,7 @@ extern signed long schedule_timeout_inte
54249 extern signed long schedule_timeout_killable(signed long timeout);
54250 extern signed long schedule_timeout_uninterruptible(signed long timeout);
54251 asmlinkage void schedule(void);
54252 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
54253 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
54254
54255 struct nsproxy;
54256 struct user_namespace;
54257 @@ -381,10 +382,13 @@ struct user_namespace;
54258 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
54259
54260 extern int sysctl_max_map_count;
54261 +extern unsigned long sysctl_heap_stack_gap;
54262
54263 #include <linux/aio.h>
54264
54265 #ifdef CONFIG_MMU
54266 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
54267 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
54268 extern void arch_pick_mmap_layout(struct mm_struct *mm);
54269 extern unsigned long
54270 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
54271 @@ -629,6 +633,17 @@ struct signal_struct {
54272 #ifdef CONFIG_TASKSTATS
54273 struct taskstats *stats;
54274 #endif
54275 +
54276 +#ifdef CONFIG_GRKERNSEC
54277 + u32 curr_ip;
54278 + u32 saved_ip;
54279 + u32 gr_saddr;
54280 + u32 gr_daddr;
54281 + u16 gr_sport;
54282 + u16 gr_dport;
54283 + u8 used_accept:1;
54284 +#endif
54285 +
54286 #ifdef CONFIG_AUDIT
54287 unsigned audit_tty;
54288 struct tty_audit_buf *tty_audit_buf;
54289 @@ -701,6 +716,11 @@ struct user_struct {
54290 struct key *session_keyring; /* UID's default session keyring */
54291 #endif
54292
54293 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54294 + unsigned int banned;
54295 + unsigned long ban_expires;
54296 +#endif
54297 +
54298 /* Hash table maintenance information */
54299 struct hlist_node uidhash_node;
54300 uid_t uid;
54301 @@ -1310,8 +1330,8 @@ struct task_struct {
54302 struct list_head thread_group;
54303
54304 struct completion *vfork_done; /* for vfork() */
54305 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
54306 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
54307 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
54308 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
54309
54310 cputime_t utime, stime, utimescaled, stimescaled;
54311 cputime_t gtime;
54312 @@ -1327,13 +1347,6 @@ struct task_struct {
54313 struct task_cputime cputime_expires;
54314 struct list_head cpu_timers[3];
54315
54316 -/* process credentials */
54317 - const struct cred __rcu *real_cred; /* objective and real subjective task
54318 - * credentials (COW) */
54319 - const struct cred __rcu *cred; /* effective (overridable) subjective task
54320 - * credentials (COW) */
54321 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
54322 -
54323 char comm[TASK_COMM_LEN]; /* executable name excluding path
54324 - access with [gs]et_task_comm (which lock
54325 it with task_lock())
54326 @@ -1350,8 +1363,16 @@ struct task_struct {
54327 #endif
54328 /* CPU-specific state of this task */
54329 struct thread_struct thread;
54330 +/* thread_info moved to task_struct */
54331 +#ifdef CONFIG_X86
54332 + struct thread_info tinfo;
54333 +#endif
54334 /* filesystem information */
54335 struct fs_struct *fs;
54336 +
54337 + const struct cred __rcu *cred; /* effective (overridable) subjective task
54338 + * credentials (COW) */
54339 +
54340 /* open file information */
54341 struct files_struct *files;
54342 /* namespaces */
54343 @@ -1398,6 +1419,11 @@ struct task_struct {
54344 struct rt_mutex_waiter *pi_blocked_on;
54345 #endif
54346
54347 +/* process credentials */
54348 + const struct cred __rcu *real_cred; /* objective and real subjective task
54349 + * credentials (COW) */
54350 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
54351 +
54352 #ifdef CONFIG_DEBUG_MUTEXES
54353 /* mutex deadlock detection */
54354 struct mutex_waiter *blocked_on;
54355 @@ -1508,6 +1534,21 @@ struct task_struct {
54356 unsigned long default_timer_slack_ns;
54357
54358 struct list_head *scm_work_list;
54359 +
54360 +#ifdef CONFIG_GRKERNSEC
54361 + /* grsecurity */
54362 + struct dentry *gr_chroot_dentry;
54363 + struct acl_subject_label *acl;
54364 + struct acl_role_label *role;
54365 + struct file *exec_file;
54366 + u16 acl_role_id;
54367 + /* is this the task that authenticated to the special role */
54368 + u8 acl_sp_role;
54369 + u8 is_writable;
54370 + u8 brute;
54371 + u8 gr_is_chrooted;
54372 +#endif
54373 +
54374 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
54375 /* Index of current stored address in ret_stack */
54376 int curr_ret_stack;
54377 @@ -1542,6 +1583,57 @@ struct task_struct {
54378 #endif
54379 };
54380
54381 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
54382 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
54383 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
54384 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
54385 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
54386 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
54387 +
54388 +#ifdef CONFIG_PAX_SOFTMODE
54389 +extern int pax_softmode;
54390 +#endif
54391 +
54392 +extern int pax_check_flags(unsigned long *);
54393 +
54394 +/* if tsk != current then task_lock must be held on it */
54395 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54396 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
54397 +{
54398 + if (likely(tsk->mm))
54399 + return tsk->mm->pax_flags;
54400 + else
54401 + return 0UL;
54402 +}
54403 +
54404 +/* if tsk != current then task_lock must be held on it */
54405 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
54406 +{
54407 + if (likely(tsk->mm)) {
54408 + tsk->mm->pax_flags = flags;
54409 + return 0;
54410 + }
54411 + return -EINVAL;
54412 +}
54413 +#endif
54414 +
54415 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54416 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
54417 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
54418 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
54419 +#endif
54420 +
54421 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
54422 +extern void pax_report_insns(void *pc, void *sp);
54423 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
54424 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
54425 +
54426 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
54427 +extern void pax_track_stack(void);
54428 +#else
54429 +static inline void pax_track_stack(void) {}
54430 +#endif
54431 +
54432 /* Future-safe accessor for struct task_struct's cpus_allowed. */
54433 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
54434
54435 @@ -2009,7 +2101,9 @@ void yield(void);
54436 extern struct exec_domain default_exec_domain;
54437
54438 union thread_union {
54439 +#ifndef CONFIG_X86
54440 struct thread_info thread_info;
54441 +#endif
54442 unsigned long stack[THREAD_SIZE/sizeof(long)];
54443 };
54444
54445 @@ -2042,6 +2136,7 @@ extern struct pid_namespace init_pid_ns;
54446 */
54447
54448 extern struct task_struct *find_task_by_vpid(pid_t nr);
54449 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
54450 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
54451 struct pid_namespace *ns);
54452
54453 @@ -2179,7 +2274,7 @@ extern void __cleanup_sighand(struct sig
54454 extern void exit_itimers(struct signal_struct *);
54455 extern void flush_itimer_signals(void);
54456
54457 -extern NORET_TYPE void do_group_exit(int);
54458 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
54459
54460 extern void daemonize(const char *, ...);
54461 extern int allow_signal(int);
54462 @@ -2320,13 +2415,17 @@ static inline unsigned long *end_of_stac
54463
54464 #endif
54465
54466 -static inline int object_is_on_stack(void *obj)
54467 +static inline int object_starts_on_stack(void *obj)
54468 {
54469 - void *stack = task_stack_page(current);
54470 + const void *stack = task_stack_page(current);
54471
54472 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
54473 }
54474
54475 +#ifdef CONFIG_PAX_USERCOPY
54476 +extern int object_is_on_stack(const void *obj, unsigned long len);
54477 +#endif
54478 +
54479 extern void thread_info_cache_init(void);
54480
54481 #ifdef CONFIG_DEBUG_STACK_USAGE
54482 diff -urNp linux-2.6.39.4/include/linux/screen_info.h linux-2.6.39.4/include/linux/screen_info.h
54483 --- linux-2.6.39.4/include/linux/screen_info.h 2011-05-19 00:06:34.000000000 -0400
54484 +++ linux-2.6.39.4/include/linux/screen_info.h 2011-08-05 19:44:37.000000000 -0400
54485 @@ -43,7 +43,8 @@ struct screen_info {
54486 __u16 pages; /* 0x32 */
54487 __u16 vesa_attributes; /* 0x34 */
54488 __u32 capabilities; /* 0x36 */
54489 - __u8 _reserved[6]; /* 0x3a */
54490 + __u16 vesapm_size; /* 0x3a */
54491 + __u8 _reserved[4]; /* 0x3c */
54492 } __attribute__((packed));
54493
54494 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
54495 diff -urNp linux-2.6.39.4/include/linux/security.h linux-2.6.39.4/include/linux/security.h
54496 --- linux-2.6.39.4/include/linux/security.h 2011-05-19 00:06:34.000000000 -0400
54497 +++ linux-2.6.39.4/include/linux/security.h 2011-08-05 19:44:37.000000000 -0400
54498 @@ -36,6 +36,7 @@
54499 #include <linux/key.h>
54500 #include <linux/xfrm.h>
54501 #include <linux/slab.h>
54502 +#include <linux/grsecurity.h>
54503 #include <net/flow.h>
54504
54505 /* Maximum number of letters for an LSM name string */
54506 diff -urNp linux-2.6.39.4/include/linux/seq_file.h linux-2.6.39.4/include/linux/seq_file.h
54507 --- linux-2.6.39.4/include/linux/seq_file.h 2011-05-19 00:06:34.000000000 -0400
54508 +++ linux-2.6.39.4/include/linux/seq_file.h 2011-08-05 20:34:06.000000000 -0400
54509 @@ -32,6 +32,7 @@ struct seq_operations {
54510 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
54511 int (*show) (struct seq_file *m, void *v);
54512 };
54513 +typedef struct seq_operations __no_const seq_operations_no_const;
54514
54515 #define SEQ_SKIP 1
54516
54517 diff -urNp linux-2.6.39.4/include/linux/shm.h linux-2.6.39.4/include/linux/shm.h
54518 --- linux-2.6.39.4/include/linux/shm.h 2011-05-19 00:06:34.000000000 -0400
54519 +++ linux-2.6.39.4/include/linux/shm.h 2011-08-05 19:44:37.000000000 -0400
54520 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
54521 pid_t shm_cprid;
54522 pid_t shm_lprid;
54523 struct user_struct *mlock_user;
54524 +#ifdef CONFIG_GRKERNSEC
54525 + time_t shm_createtime;
54526 + pid_t shm_lapid;
54527 +#endif
54528 };
54529
54530 /* shm_mode upper byte flags */
54531 diff -urNp linux-2.6.39.4/include/linux/skbuff.h linux-2.6.39.4/include/linux/skbuff.h
54532 --- linux-2.6.39.4/include/linux/skbuff.h 2011-05-19 00:06:34.000000000 -0400
54533 +++ linux-2.6.39.4/include/linux/skbuff.h 2011-08-05 19:44:37.000000000 -0400
54534 @@ -592,7 +592,7 @@ static inline struct skb_shared_hwtstamp
54535 */
54536 static inline int skb_queue_empty(const struct sk_buff_head *list)
54537 {
54538 - return list->next == (struct sk_buff *)list;
54539 + return list->next == (const struct sk_buff *)list;
54540 }
54541
54542 /**
54543 @@ -605,7 +605,7 @@ static inline int skb_queue_empty(const
54544 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
54545 const struct sk_buff *skb)
54546 {
54547 - return skb->next == (struct sk_buff *)list;
54548 + return skb->next == (const struct sk_buff *)list;
54549 }
54550
54551 /**
54552 @@ -618,7 +618,7 @@ static inline bool skb_queue_is_last(con
54553 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
54554 const struct sk_buff *skb)
54555 {
54556 - return skb->prev == (struct sk_buff *)list;
54557 + return skb->prev == (const struct sk_buff *)list;
54558 }
54559
54560 /**
54561 @@ -1435,7 +1435,7 @@ static inline int pskb_network_may_pull(
54562 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
54563 */
54564 #ifndef NET_SKB_PAD
54565 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
54566 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
54567 #endif
54568
54569 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
54570 diff -urNp linux-2.6.39.4/include/linux/slab_def.h linux-2.6.39.4/include/linux/slab_def.h
54571 --- linux-2.6.39.4/include/linux/slab_def.h 2011-05-19 00:06:34.000000000 -0400
54572 +++ linux-2.6.39.4/include/linux/slab_def.h 2011-08-05 19:44:37.000000000 -0400
54573 @@ -96,10 +96,10 @@ struct kmem_cache {
54574 unsigned long node_allocs;
54575 unsigned long node_frees;
54576 unsigned long node_overflow;
54577 - atomic_t allochit;
54578 - atomic_t allocmiss;
54579 - atomic_t freehit;
54580 - atomic_t freemiss;
54581 + atomic_unchecked_t allochit;
54582 + atomic_unchecked_t allocmiss;
54583 + atomic_unchecked_t freehit;
54584 + atomic_unchecked_t freemiss;
54585
54586 /*
54587 * If debugging is enabled, then the allocator can add additional
54588 diff -urNp linux-2.6.39.4/include/linux/slab.h linux-2.6.39.4/include/linux/slab.h
54589 --- linux-2.6.39.4/include/linux/slab.h 2011-05-19 00:06:34.000000000 -0400
54590 +++ linux-2.6.39.4/include/linux/slab.h 2011-08-05 19:44:37.000000000 -0400
54591 @@ -11,12 +11,20 @@
54592
54593 #include <linux/gfp.h>
54594 #include <linux/types.h>
54595 +#include <linux/err.h>
54596
54597 /*
54598 * Flags to pass to kmem_cache_create().
54599 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
54600 */
54601 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
54602 +
54603 +#ifdef CONFIG_PAX_USERCOPY
54604 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
54605 +#else
54606 +#define SLAB_USERCOPY 0x00000000UL
54607 +#endif
54608 +
54609 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
54610 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
54611 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
54612 @@ -87,10 +95,13 @@
54613 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
54614 * Both make kfree a no-op.
54615 */
54616 -#define ZERO_SIZE_PTR ((void *)16)
54617 +#define ZERO_SIZE_PTR \
54618 +({ \
54619 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
54620 + (void *)(-MAX_ERRNO-1L); \
54621 +})
54622
54623 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
54624 - (unsigned long)ZERO_SIZE_PTR)
54625 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
54626
54627 /*
54628 * struct kmem_cache related prototypes
54629 @@ -141,6 +152,7 @@ void * __must_check krealloc(const void
54630 void kfree(const void *);
54631 void kzfree(const void *);
54632 size_t ksize(const void *);
54633 +void check_object_size(const void *ptr, unsigned long n, bool to);
54634
54635 /*
54636 * Allocator specific definitions. These are mainly used to establish optimized
54637 @@ -333,4 +345,59 @@ static inline void *kzalloc_node(size_t
54638
54639 void __init kmem_cache_init_late(void);
54640
54641 +#define kmalloc(x, y) \
54642 +({ \
54643 + void *___retval; \
54644 + intoverflow_t ___x = (intoverflow_t)x; \
54645 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
54646 + ___retval = NULL; \
54647 + else \
54648 + ___retval = kmalloc((size_t)___x, (y)); \
54649 + ___retval; \
54650 +})
54651 +
54652 +#define kmalloc_node(x, y, z) \
54653 +({ \
54654 + void *___retval; \
54655 + intoverflow_t ___x = (intoverflow_t)x; \
54656 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
54657 + ___retval = NULL; \
54658 + else \
54659 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
54660 + ___retval; \
54661 +})
54662 +
54663 +#define kzalloc(x, y) \
54664 +({ \
54665 + void *___retval; \
54666 + intoverflow_t ___x = (intoverflow_t)x; \
54667 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
54668 + ___retval = NULL; \
54669 + else \
54670 + ___retval = kzalloc((size_t)___x, (y)); \
54671 + ___retval; \
54672 +})
54673 +
54674 +#define __krealloc(x, y, z) \
54675 +({ \
54676 + void *___retval; \
54677 + intoverflow_t ___y = (intoverflow_t)y; \
54678 + if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
54679 + ___retval = NULL; \
54680 + else \
54681 + ___retval = __krealloc((x), (size_t)___y, (z)); \
54682 + ___retval; \
54683 +})
54684 +
54685 +#define krealloc(x, y, z) \
54686 +({ \
54687 + void *___retval; \
54688 + intoverflow_t ___y = (intoverflow_t)y; \
54689 + if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
54690 + ___retval = NULL; \
54691 + else \
54692 + ___retval = krealloc((x), (size_t)___y, (z)); \
54693 + ___retval; \
54694 +})
54695 +
54696 #endif /* _LINUX_SLAB_H */
54697 diff -urNp linux-2.6.39.4/include/linux/slub_def.h linux-2.6.39.4/include/linux/slub_def.h
54698 --- linux-2.6.39.4/include/linux/slub_def.h 2011-05-19 00:06:34.000000000 -0400
54699 +++ linux-2.6.39.4/include/linux/slub_def.h 2011-08-05 20:34:06.000000000 -0400
54700 @@ -84,7 +84,7 @@ struct kmem_cache {
54701 struct kmem_cache_order_objects max;
54702 struct kmem_cache_order_objects min;
54703 gfp_t allocflags; /* gfp flags to use on each alloc */
54704 - int refcount; /* Refcount for slab cache destroy */
54705 + atomic_t refcount; /* Refcount for slab cache destroy */
54706 void (*ctor)(void *);
54707 int inuse; /* Offset to metadata */
54708 int align; /* Alignment */
54709 @@ -218,7 +218,7 @@ static __always_inline struct kmem_cache
54710 }
54711
54712 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
54713 -void *__kmalloc(size_t size, gfp_t flags);
54714 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
54715
54716 static __always_inline void *
54717 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
54718 diff -urNp linux-2.6.39.4/include/linux/sonet.h linux-2.6.39.4/include/linux/sonet.h
54719 --- linux-2.6.39.4/include/linux/sonet.h 2011-05-19 00:06:34.000000000 -0400
54720 +++ linux-2.6.39.4/include/linux/sonet.h 2011-08-05 19:44:37.000000000 -0400
54721 @@ -61,7 +61,7 @@ struct sonet_stats {
54722 #include <asm/atomic.h>
54723
54724 struct k_sonet_stats {
54725 -#define __HANDLE_ITEM(i) atomic_t i
54726 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
54727 __SONET_ITEMS
54728 #undef __HANDLE_ITEM
54729 };
54730 diff -urNp linux-2.6.39.4/include/linux/sunrpc/clnt.h linux-2.6.39.4/include/linux/sunrpc/clnt.h
54731 --- linux-2.6.39.4/include/linux/sunrpc/clnt.h 2011-05-19 00:06:34.000000000 -0400
54732 +++ linux-2.6.39.4/include/linux/sunrpc/clnt.h 2011-08-05 19:44:37.000000000 -0400
54733 @@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
54734 {
54735 switch (sap->sa_family) {
54736 case AF_INET:
54737 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
54738 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
54739 case AF_INET6:
54740 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
54741 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
54742 }
54743 return 0;
54744 }
54745 @@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
54746 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
54747 const struct sockaddr *src)
54748 {
54749 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
54750 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
54751 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
54752
54753 dsin->sin_family = ssin->sin_family;
54754 @@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
54755 if (sa->sa_family != AF_INET6)
54756 return 0;
54757
54758 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
54759 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
54760 }
54761
54762 #endif /* __KERNEL__ */
54763 diff -urNp linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h
54764 --- linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h 2011-05-19 00:06:34.000000000 -0400
54765 +++ linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h 2011-08-05 19:44:37.000000000 -0400
54766 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
54767 extern unsigned int svcrdma_max_requests;
54768 extern unsigned int svcrdma_max_req_size;
54769
54770 -extern atomic_t rdma_stat_recv;
54771 -extern atomic_t rdma_stat_read;
54772 -extern atomic_t rdma_stat_write;
54773 -extern atomic_t rdma_stat_sq_starve;
54774 -extern atomic_t rdma_stat_rq_starve;
54775 -extern atomic_t rdma_stat_rq_poll;
54776 -extern atomic_t rdma_stat_rq_prod;
54777 -extern atomic_t rdma_stat_sq_poll;
54778 -extern atomic_t rdma_stat_sq_prod;
54779 +extern atomic_unchecked_t rdma_stat_recv;
54780 +extern atomic_unchecked_t rdma_stat_read;
54781 +extern atomic_unchecked_t rdma_stat_write;
54782 +extern atomic_unchecked_t rdma_stat_sq_starve;
54783 +extern atomic_unchecked_t rdma_stat_rq_starve;
54784 +extern atomic_unchecked_t rdma_stat_rq_poll;
54785 +extern atomic_unchecked_t rdma_stat_rq_prod;
54786 +extern atomic_unchecked_t rdma_stat_sq_poll;
54787 +extern atomic_unchecked_t rdma_stat_sq_prod;
54788
54789 #define RPCRDMA_VERSION 1
54790
54791 diff -urNp linux-2.6.39.4/include/linux/sysctl.h linux-2.6.39.4/include/linux/sysctl.h
54792 --- linux-2.6.39.4/include/linux/sysctl.h 2011-05-19 00:06:34.000000000 -0400
54793 +++ linux-2.6.39.4/include/linux/sysctl.h 2011-08-05 19:44:37.000000000 -0400
54794 @@ -155,7 +155,11 @@ enum
54795 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
54796 };
54797
54798 -
54799 +#ifdef CONFIG_PAX_SOFTMODE
54800 +enum {
54801 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
54802 +};
54803 +#endif
54804
54805 /* CTL_VM names: */
54806 enum
54807 @@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
54808
54809 extern int proc_dostring(struct ctl_table *, int,
54810 void __user *, size_t *, loff_t *);
54811 +extern int proc_dostring_modpriv(struct ctl_table *, int,
54812 + void __user *, size_t *, loff_t *);
54813 extern int proc_dointvec(struct ctl_table *, int,
54814 void __user *, size_t *, loff_t *);
54815 extern int proc_dointvec_minmax(struct ctl_table *, int,
54816 diff -urNp linux-2.6.39.4/include/linux/tty_ldisc.h linux-2.6.39.4/include/linux/tty_ldisc.h
54817 --- linux-2.6.39.4/include/linux/tty_ldisc.h 2011-05-19 00:06:34.000000000 -0400
54818 +++ linux-2.6.39.4/include/linux/tty_ldisc.h 2011-08-05 19:44:37.000000000 -0400
54819 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
54820
54821 struct module *owner;
54822
54823 - int refcount;
54824 + atomic_t refcount;
54825 };
54826
54827 struct tty_ldisc {
54828 diff -urNp linux-2.6.39.4/include/linux/types.h linux-2.6.39.4/include/linux/types.h
54829 --- linux-2.6.39.4/include/linux/types.h 2011-05-19 00:06:34.000000000 -0400
54830 +++ linux-2.6.39.4/include/linux/types.h 2011-08-05 19:44:37.000000000 -0400
54831 @@ -213,10 +213,26 @@ typedef struct {
54832 int counter;
54833 } atomic_t;
54834
54835 +#ifdef CONFIG_PAX_REFCOUNT
54836 +typedef struct {
54837 + int counter;
54838 +} atomic_unchecked_t;
54839 +#else
54840 +typedef atomic_t atomic_unchecked_t;
54841 +#endif
54842 +
54843 #ifdef CONFIG_64BIT
54844 typedef struct {
54845 long counter;
54846 } atomic64_t;
54847 +
54848 +#ifdef CONFIG_PAX_REFCOUNT
54849 +typedef struct {
54850 + long counter;
54851 +} atomic64_unchecked_t;
54852 +#else
54853 +typedef atomic64_t atomic64_unchecked_t;
54854 +#endif
54855 #endif
54856
54857 struct list_head {
54858 diff -urNp linux-2.6.39.4/include/linux/uaccess.h linux-2.6.39.4/include/linux/uaccess.h
54859 --- linux-2.6.39.4/include/linux/uaccess.h 2011-05-19 00:06:34.000000000 -0400
54860 +++ linux-2.6.39.4/include/linux/uaccess.h 2011-08-05 19:44:37.000000000 -0400
54861 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
54862 long ret; \
54863 mm_segment_t old_fs = get_fs(); \
54864 \
54865 - set_fs(KERNEL_DS); \
54866 pagefault_disable(); \
54867 + set_fs(KERNEL_DS); \
54868 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
54869 - pagefault_enable(); \
54870 set_fs(old_fs); \
54871 + pagefault_enable(); \
54872 ret; \
54873 })
54874
54875 @@ -93,8 +93,8 @@ static inline unsigned long __copy_from_
54876 * Safely read from address @src to the buffer at @dst. If a kernel fault
54877 * happens, handle that and return -EFAULT.
54878 */
54879 -extern long probe_kernel_read(void *dst, void *src, size_t size);
54880 -extern long __probe_kernel_read(void *dst, void *src, size_t size);
54881 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
54882 +extern long __probe_kernel_read(void *dst, const void *src, size_t size);
54883
54884 /*
54885 * probe_kernel_write(): safely attempt to write to a location
54886 @@ -105,7 +105,7 @@ extern long __probe_kernel_read(void *ds
54887 * Safely write to address @dst from the buffer at @src. If a kernel fault
54888 * happens, handle that and return -EFAULT.
54889 */
54890 -extern long notrace probe_kernel_write(void *dst, void *src, size_t size);
54891 -extern long notrace __probe_kernel_write(void *dst, void *src, size_t size);
54892 +extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
54893 +extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
54894
54895 #endif /* __LINUX_UACCESS_H__ */
54896 diff -urNp linux-2.6.39.4/include/linux/unaligned/access_ok.h linux-2.6.39.4/include/linux/unaligned/access_ok.h
54897 --- linux-2.6.39.4/include/linux/unaligned/access_ok.h 2011-05-19 00:06:34.000000000 -0400
54898 +++ linux-2.6.39.4/include/linux/unaligned/access_ok.h 2011-08-05 19:44:37.000000000 -0400
54899 @@ -6,32 +6,32 @@
54900
54901 static inline u16 get_unaligned_le16(const void *p)
54902 {
54903 - return le16_to_cpup((__le16 *)p);
54904 + return le16_to_cpup((const __le16 *)p);
54905 }
54906
54907 static inline u32 get_unaligned_le32(const void *p)
54908 {
54909 - return le32_to_cpup((__le32 *)p);
54910 + return le32_to_cpup((const __le32 *)p);
54911 }
54912
54913 static inline u64 get_unaligned_le64(const void *p)
54914 {
54915 - return le64_to_cpup((__le64 *)p);
54916 + return le64_to_cpup((const __le64 *)p);
54917 }
54918
54919 static inline u16 get_unaligned_be16(const void *p)
54920 {
54921 - return be16_to_cpup((__be16 *)p);
54922 + return be16_to_cpup((const __be16 *)p);
54923 }
54924
54925 static inline u32 get_unaligned_be32(const void *p)
54926 {
54927 - return be32_to_cpup((__be32 *)p);
54928 + return be32_to_cpup((const __be32 *)p);
54929 }
54930
54931 static inline u64 get_unaligned_be64(const void *p)
54932 {
54933 - return be64_to_cpup((__be64 *)p);
54934 + return be64_to_cpup((const __be64 *)p);
54935 }
54936
54937 static inline void put_unaligned_le16(u16 val, void *p)
54938 diff -urNp linux-2.6.39.4/include/linux/vmalloc.h linux-2.6.39.4/include/linux/vmalloc.h
54939 --- linux-2.6.39.4/include/linux/vmalloc.h 2011-05-19 00:06:34.000000000 -0400
54940 +++ linux-2.6.39.4/include/linux/vmalloc.h 2011-08-05 19:44:37.000000000 -0400
54941 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
54942 #define VM_MAP 0x00000004 /* vmap()ed pages */
54943 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
54944 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
54945 +
54946 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
54947 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
54948 +#endif
54949 +
54950 /* bits [20..32] reserved for arch specific ioremap internals */
54951
54952 /*
54953 @@ -155,4 +160,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
54954 # endif
54955 #endif
54956
54957 +#define vmalloc(x) \
54958 +({ \
54959 + void *___retval; \
54960 + intoverflow_t ___x = (intoverflow_t)x; \
54961 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
54962 + ___retval = NULL; \
54963 + else \
54964 + ___retval = vmalloc((unsigned long)___x); \
54965 + ___retval; \
54966 +})
54967 +
54968 +#define vzalloc(x) \
54969 +({ \
54970 + void *___retval; \
54971 + intoverflow_t ___x = (intoverflow_t)x; \
54972 + if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
54973 + ___retval = NULL; \
54974 + else \
54975 + ___retval = vzalloc((unsigned long)___x); \
54976 + ___retval; \
54977 +})
54978 +
54979 +#define __vmalloc(x, y, z) \
54980 +({ \
54981 + void *___retval; \
54982 + intoverflow_t ___x = (intoverflow_t)x; \
54983 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
54984 + ___retval = NULL; \
54985 + else \
54986 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
54987 + ___retval; \
54988 +})
54989 +
54990 +#define vmalloc_user(x) \
54991 +({ \
54992 + void *___retval; \
54993 + intoverflow_t ___x = (intoverflow_t)x; \
54994 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
54995 + ___retval = NULL; \
54996 + else \
54997 + ___retval = vmalloc_user((unsigned long)___x); \
54998 + ___retval; \
54999 +})
55000 +
55001 +#define vmalloc_exec(x) \
55002 +({ \
55003 + void *___retval; \
55004 + intoverflow_t ___x = (intoverflow_t)x; \
55005 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
55006 + ___retval = NULL; \
55007 + else \
55008 + ___retval = vmalloc_exec((unsigned long)___x); \
55009 + ___retval; \
55010 +})
55011 +
55012 +#define vmalloc_node(x, y) \
55013 +({ \
55014 + void *___retval; \
55015 + intoverflow_t ___x = (intoverflow_t)x; \
55016 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
55017 + ___retval = NULL; \
55018 + else \
55019 + ___retval = vmalloc_node((unsigned long)___x, (y));\
55020 + ___retval; \
55021 +})
55022 +
55023 +#define vzalloc_node(x, y) \
55024 +({ \
55025 + void *___retval; \
55026 + intoverflow_t ___x = (intoverflow_t)x; \
55027 + if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
55028 + ___retval = NULL; \
55029 + else \
55030 + ___retval = vzalloc_node((unsigned long)___x, (y));\
55031 + ___retval; \
55032 +})
55033 +
55034 +#define vmalloc_32(x) \
55035 +({ \
55036 + void *___retval; \
55037 + intoverflow_t ___x = (intoverflow_t)x; \
55038 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
55039 + ___retval = NULL; \
55040 + else \
55041 + ___retval = vmalloc_32((unsigned long)___x); \
55042 + ___retval; \
55043 +})
55044 +
55045 +#define vmalloc_32_user(x) \
55046 +({ \
55047 +void *___retval; \
55048 + intoverflow_t ___x = (intoverflow_t)x; \
55049 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
55050 + ___retval = NULL; \
55051 + else \
55052 + ___retval = vmalloc_32_user((unsigned long)___x);\
55053 + ___retval; \
55054 +})
55055 +
55056 #endif /* _LINUX_VMALLOC_H */
55057 diff -urNp linux-2.6.39.4/include/linux/vmstat.h linux-2.6.39.4/include/linux/vmstat.h
55058 --- linux-2.6.39.4/include/linux/vmstat.h 2011-05-19 00:06:34.000000000 -0400
55059 +++ linux-2.6.39.4/include/linux/vmstat.h 2011-08-05 19:44:37.000000000 -0400
55060 @@ -147,18 +147,18 @@ static inline void vm_events_fold_cpu(in
55061 /*
55062 * Zone based page accounting with per cpu differentials.
55063 */
55064 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
55065 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
55066
55067 static inline void zone_page_state_add(long x, struct zone *zone,
55068 enum zone_stat_item item)
55069 {
55070 - atomic_long_add(x, &zone->vm_stat[item]);
55071 - atomic_long_add(x, &vm_stat[item]);
55072 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
55073 + atomic_long_add_unchecked(x, &vm_stat[item]);
55074 }
55075
55076 static inline unsigned long global_page_state(enum zone_stat_item item)
55077 {
55078 - long x = atomic_long_read(&vm_stat[item]);
55079 + long x = atomic_long_read_unchecked(&vm_stat[item]);
55080 #ifdef CONFIG_SMP
55081 if (x < 0)
55082 x = 0;
55083 @@ -169,7 +169,7 @@ static inline unsigned long global_page_
55084 static inline unsigned long zone_page_state(struct zone *zone,
55085 enum zone_stat_item item)
55086 {
55087 - long x = atomic_long_read(&zone->vm_stat[item]);
55088 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
55089 #ifdef CONFIG_SMP
55090 if (x < 0)
55091 x = 0;
55092 @@ -186,7 +186,7 @@ static inline unsigned long zone_page_st
55093 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
55094 enum zone_stat_item item)
55095 {
55096 - long x = atomic_long_read(&zone->vm_stat[item]);
55097 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
55098
55099 #ifdef CONFIG_SMP
55100 int cpu;
55101 @@ -280,8 +280,8 @@ static inline void __mod_zone_page_state
55102
55103 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
55104 {
55105 - atomic_long_inc(&zone->vm_stat[item]);
55106 - atomic_long_inc(&vm_stat[item]);
55107 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
55108 + atomic_long_inc_unchecked(&vm_stat[item]);
55109 }
55110
55111 static inline void __inc_zone_page_state(struct page *page,
55112 @@ -292,8 +292,8 @@ static inline void __inc_zone_page_state
55113
55114 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
55115 {
55116 - atomic_long_dec(&zone->vm_stat[item]);
55117 - atomic_long_dec(&vm_stat[item]);
55118 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
55119 + atomic_long_dec_unchecked(&vm_stat[item]);
55120 }
55121
55122 static inline void __dec_zone_page_state(struct page *page,
55123 diff -urNp linux-2.6.39.4/include/media/saa7146_vv.h linux-2.6.39.4/include/media/saa7146_vv.h
55124 --- linux-2.6.39.4/include/media/saa7146_vv.h 2011-05-19 00:06:34.000000000 -0400
55125 +++ linux-2.6.39.4/include/media/saa7146_vv.h 2011-08-05 20:34:06.000000000 -0400
55126 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
55127 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
55128
55129 /* the extension can override this */
55130 - struct v4l2_ioctl_ops ops;
55131 + v4l2_ioctl_ops_no_const ops;
55132 /* pointer to the saa7146 core ops */
55133 const struct v4l2_ioctl_ops *core_ops;
55134
55135 diff -urNp linux-2.6.39.4/include/media/v4l2-dev.h linux-2.6.39.4/include/media/v4l2-dev.h
55136 --- linux-2.6.39.4/include/media/v4l2-dev.h 2011-05-19 00:06:34.000000000 -0400
55137 +++ linux-2.6.39.4/include/media/v4l2-dev.h 2011-08-05 20:34:06.000000000 -0400
55138 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_sta
55139
55140
55141 struct v4l2_file_operations {
55142 - struct module *owner;
55143 + struct module * const owner;
55144 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
55145 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
55146 unsigned int (*poll) (struct file *, struct poll_table_struct *);
55147 diff -urNp linux-2.6.39.4/include/media/v4l2-device.h linux-2.6.39.4/include/media/v4l2-device.h
55148 --- linux-2.6.39.4/include/media/v4l2-device.h 2011-05-19 00:06:34.000000000 -0400
55149 +++ linux-2.6.39.4/include/media/v4l2-device.h 2011-08-05 19:44:37.000000000 -0400
55150 @@ -95,7 +95,7 @@ int __must_check v4l2_device_register(st
55151 this function returns 0. If the name ends with a digit (e.g. cx18),
55152 then the name will be set to cx18-0 since cx180 looks really odd. */
55153 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
55154 - atomic_t *instance);
55155 + atomic_unchecked_t *instance);
55156
55157 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
55158 Since the parent disappears this ensures that v4l2_dev doesn't have an
55159 diff -urNp linux-2.6.39.4/include/media/v4l2-ioctl.h linux-2.6.39.4/include/media/v4l2-ioctl.h
55160 --- linux-2.6.39.4/include/media/v4l2-ioctl.h 2011-05-19 00:06:34.000000000 -0400
55161 +++ linux-2.6.39.4/include/media/v4l2-ioctl.h 2011-08-05 20:34:06.000000000 -0400
55162 @@ -272,6 +272,7 @@ struct v4l2_ioctl_ops {
55163 long (*vidioc_default) (struct file *file, void *fh,
55164 bool valid_prio, int cmd, void *arg);
55165 };
55166 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
55167
55168
55169 /* v4l debugging and diagnostics */
55170 diff -urNp linux-2.6.39.4/include/net/caif/cfctrl.h linux-2.6.39.4/include/net/caif/cfctrl.h
55171 --- linux-2.6.39.4/include/net/caif/cfctrl.h 2011-05-19 00:06:34.000000000 -0400
55172 +++ linux-2.6.39.4/include/net/caif/cfctrl.h 2011-08-05 20:34:06.000000000 -0400
55173 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
55174 void (*radioset_rsp)(void);
55175 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
55176 struct cflayer *client_layer);
55177 -};
55178 +} __no_const;
55179
55180 /* Link Setup Parameters for CAIF-Links. */
55181 struct cfctrl_link_param {
55182 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
55183 struct cfctrl {
55184 struct cfsrvl serv;
55185 struct cfctrl_rsp res;
55186 - atomic_t req_seq_no;
55187 - atomic_t rsp_seq_no;
55188 + atomic_unchecked_t req_seq_no;
55189 + atomic_unchecked_t rsp_seq_no;
55190 struct list_head list;
55191 /* Protects from simultaneous access to first_req list */
55192 spinlock_t info_list_lock;
55193 diff -urNp linux-2.6.39.4/include/net/flow.h linux-2.6.39.4/include/net/flow.h
55194 --- linux-2.6.39.4/include/net/flow.h 2011-05-19 00:06:34.000000000 -0400
55195 +++ linux-2.6.39.4/include/net/flow.h 2011-08-05 19:44:37.000000000 -0400
55196 @@ -167,6 +167,6 @@ extern struct flow_cache_object *flow_ca
55197 u8 dir, flow_resolve_t resolver, void *ctx);
55198
55199 extern void flow_cache_flush(void);
55200 -extern atomic_t flow_cache_genid;
55201 +extern atomic_unchecked_t flow_cache_genid;
55202
55203 #endif
55204 diff -urNp linux-2.6.39.4/include/net/inetpeer.h linux-2.6.39.4/include/net/inetpeer.h
55205 --- linux-2.6.39.4/include/net/inetpeer.h 2011-05-19 00:06:34.000000000 -0400
55206 +++ linux-2.6.39.4/include/net/inetpeer.h 2011-08-05 19:44:37.000000000 -0400
55207 @@ -43,8 +43,8 @@ struct inet_peer {
55208 */
55209 union {
55210 struct {
55211 - atomic_t rid; /* Frag reception counter */
55212 - atomic_t ip_id_count; /* IP ID for the next packet */
55213 + atomic_unchecked_t rid; /* Frag reception counter */
55214 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
55215 __u32 tcp_ts;
55216 __u32 tcp_ts_stamp;
55217 u32 metrics[RTAX_MAX];
55218 @@ -108,7 +108,7 @@ static inline __u16 inet_getid(struct in
55219 {
55220 more++;
55221 inet_peer_refcheck(p);
55222 - return atomic_add_return(more, &p->ip_id_count) - more;
55223 + return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
55224 }
55225
55226 #endif /* _NET_INETPEER_H */
55227 diff -urNp linux-2.6.39.4/include/net/ip_fib.h linux-2.6.39.4/include/net/ip_fib.h
55228 --- linux-2.6.39.4/include/net/ip_fib.h 2011-05-19 00:06:34.000000000 -0400
55229 +++ linux-2.6.39.4/include/net/ip_fib.h 2011-08-05 19:44:37.000000000 -0400
55230 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
55231
55232 #define FIB_RES_SADDR(net, res) \
55233 ((FIB_RES_NH(res).nh_saddr_genid == \
55234 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
55235 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
55236 FIB_RES_NH(res).nh_saddr : \
55237 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
55238 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
55239 diff -urNp linux-2.6.39.4/include/net/ip_vs.h linux-2.6.39.4/include/net/ip_vs.h
55240 --- linux-2.6.39.4/include/net/ip_vs.h 2011-07-09 09:18:51.000000000 -0400
55241 +++ linux-2.6.39.4/include/net/ip_vs.h 2011-08-05 19:44:37.000000000 -0400
55242 @@ -512,7 +512,7 @@ struct ip_vs_conn {
55243 struct ip_vs_conn *control; /* Master control connection */
55244 atomic_t n_control; /* Number of controlled ones */
55245 struct ip_vs_dest *dest; /* real server */
55246 - atomic_t in_pkts; /* incoming packet counter */
55247 + atomic_unchecked_t in_pkts; /* incoming packet counter */
55248
55249 /* packet transmitter for different forwarding methods. If it
55250 mangles the packet, it must return NF_DROP or better NF_STOLEN,
55251 @@ -650,7 +650,7 @@ struct ip_vs_dest {
55252 __be16 port; /* port number of the server */
55253 union nf_inet_addr addr; /* IP address of the server */
55254 volatile unsigned flags; /* dest status flags */
55255 - atomic_t conn_flags; /* flags to copy to conn */
55256 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
55257 atomic_t weight; /* server weight */
55258
55259 atomic_t refcnt; /* reference counter */
55260 diff -urNp linux-2.6.39.4/include/net/irda/ircomm_core.h linux-2.6.39.4/include/net/irda/ircomm_core.h
55261 --- linux-2.6.39.4/include/net/irda/ircomm_core.h 2011-05-19 00:06:34.000000000 -0400
55262 +++ linux-2.6.39.4/include/net/irda/ircomm_core.h 2011-08-05 20:34:06.000000000 -0400
55263 @@ -51,7 +51,7 @@ typedef struct {
55264 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
55265 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
55266 struct ircomm_info *);
55267 -} call_t;
55268 +} __no_const call_t;
55269
55270 struct ircomm_cb {
55271 irda_queue_t queue;
55272 diff -urNp linux-2.6.39.4/include/net/irda/ircomm_tty.h linux-2.6.39.4/include/net/irda/ircomm_tty.h
55273 --- linux-2.6.39.4/include/net/irda/ircomm_tty.h 2011-05-19 00:06:34.000000000 -0400
55274 +++ linux-2.6.39.4/include/net/irda/ircomm_tty.h 2011-08-05 19:44:37.000000000 -0400
55275 @@ -35,6 +35,7 @@
55276 #include <linux/termios.h>
55277 #include <linux/timer.h>
55278 #include <linux/tty.h> /* struct tty_struct */
55279 +#include <asm/local.h>
55280
55281 #include <net/irda/irias_object.h>
55282 #include <net/irda/ircomm_core.h>
55283 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
55284 unsigned short close_delay;
55285 unsigned short closing_wait; /* time to wait before closing */
55286
55287 - int open_count;
55288 - int blocked_open; /* # of blocked opens */
55289 + local_t open_count;
55290 + local_t blocked_open; /* # of blocked opens */
55291
55292 /* Protect concurent access to :
55293 * o self->open_count
55294 diff -urNp linux-2.6.39.4/include/net/iucv/af_iucv.h linux-2.6.39.4/include/net/iucv/af_iucv.h
55295 --- linux-2.6.39.4/include/net/iucv/af_iucv.h 2011-05-19 00:06:34.000000000 -0400
55296 +++ linux-2.6.39.4/include/net/iucv/af_iucv.h 2011-08-05 19:44:37.000000000 -0400
55297 @@ -87,7 +87,7 @@ struct iucv_sock {
55298 struct iucv_sock_list {
55299 struct hlist_head head;
55300 rwlock_t lock;
55301 - atomic_t autobind_name;
55302 + atomic_unchecked_t autobind_name;
55303 };
55304
55305 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
55306 diff -urNp linux-2.6.39.4/include/net/lapb.h linux-2.6.39.4/include/net/lapb.h
55307 --- linux-2.6.39.4/include/net/lapb.h 2011-05-19 00:06:34.000000000 -0400
55308 +++ linux-2.6.39.4/include/net/lapb.h 2011-08-05 20:34:06.000000000 -0400
55309 @@ -95,7 +95,7 @@ struct lapb_cb {
55310 struct sk_buff_head write_queue;
55311 struct sk_buff_head ack_queue;
55312 unsigned char window;
55313 - struct lapb_register_struct callbacks;
55314 + struct lapb_register_struct *callbacks;
55315
55316 /* FRMR control information */
55317 struct lapb_frame frmr_data;
55318 diff -urNp linux-2.6.39.4/include/net/neighbour.h linux-2.6.39.4/include/net/neighbour.h
55319 --- linux-2.6.39.4/include/net/neighbour.h 2011-05-19 00:06:34.000000000 -0400
55320 +++ linux-2.6.39.4/include/net/neighbour.h 2011-08-05 20:34:06.000000000 -0400
55321 @@ -117,7 +117,7 @@ struct neighbour {
55322 };
55323
55324 struct neigh_ops {
55325 - int family;
55326 + const int family;
55327 void (*solicit)(struct neighbour *, struct sk_buff*);
55328 void (*error_report)(struct neighbour *, struct sk_buff*);
55329 int (*output)(struct sk_buff*);
55330 diff -urNp linux-2.6.39.4/include/net/netlink.h linux-2.6.39.4/include/net/netlink.h
55331 --- linux-2.6.39.4/include/net/netlink.h 2011-05-19 00:06:34.000000000 -0400
55332 +++ linux-2.6.39.4/include/net/netlink.h 2011-08-05 19:44:37.000000000 -0400
55333 @@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
55334 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
55335 {
55336 if (mark)
55337 - skb_trim(skb, (unsigned char *) mark - skb->data);
55338 + skb_trim(skb, (const unsigned char *) mark - skb->data);
55339 }
55340
55341 /**
55342 diff -urNp linux-2.6.39.4/include/net/netns/ipv4.h linux-2.6.39.4/include/net/netns/ipv4.h
55343 --- linux-2.6.39.4/include/net/netns/ipv4.h 2011-05-19 00:06:34.000000000 -0400
55344 +++ linux-2.6.39.4/include/net/netns/ipv4.h 2011-08-05 19:44:37.000000000 -0400
55345 @@ -54,8 +54,8 @@ struct netns_ipv4 {
55346 int sysctl_rt_cache_rebuild_count;
55347 int current_rt_cache_rebuild_count;
55348
55349 - atomic_t rt_genid;
55350 - atomic_t dev_addr_genid;
55351 + atomic_unchecked_t rt_genid;
55352 + atomic_unchecked_t dev_addr_genid;
55353
55354 #ifdef CONFIG_IP_MROUTE
55355 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
55356 diff -urNp linux-2.6.39.4/include/net/sctp/sctp.h linux-2.6.39.4/include/net/sctp/sctp.h
55357 --- linux-2.6.39.4/include/net/sctp/sctp.h 2011-05-19 00:06:34.000000000 -0400
55358 +++ linux-2.6.39.4/include/net/sctp/sctp.h 2011-08-05 19:44:37.000000000 -0400
55359 @@ -316,9 +316,9 @@ do { \
55360
55361 #else /* SCTP_DEBUG */
55362
55363 -#define SCTP_DEBUG_PRINTK(whatever...)
55364 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
55365 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
55366 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
55367 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
55368 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
55369 #define SCTP_ENABLE_DEBUG
55370 #define SCTP_DISABLE_DEBUG
55371 #define SCTP_ASSERT(expr, str, func)
55372 diff -urNp linux-2.6.39.4/include/net/sock.h linux-2.6.39.4/include/net/sock.h
55373 --- linux-2.6.39.4/include/net/sock.h 2011-05-19 00:06:34.000000000 -0400
55374 +++ linux-2.6.39.4/include/net/sock.h 2011-08-05 19:44:37.000000000 -0400
55375 @@ -277,7 +277,7 @@ struct sock {
55376 #ifdef CONFIG_RPS
55377 __u32 sk_rxhash;
55378 #endif
55379 - atomic_t sk_drops;
55380 + atomic_unchecked_t sk_drops;
55381 int sk_rcvbuf;
55382
55383 struct sk_filter __rcu *sk_filter;
55384 diff -urNp linux-2.6.39.4/include/net/tcp.h linux-2.6.39.4/include/net/tcp.h
55385 --- linux-2.6.39.4/include/net/tcp.h 2011-05-19 00:06:34.000000000 -0400
55386 +++ linux-2.6.39.4/include/net/tcp.h 2011-08-05 20:34:06.000000000 -0400
55387 @@ -1374,8 +1374,8 @@ enum tcp_seq_states {
55388 struct tcp_seq_afinfo {
55389 char *name;
55390 sa_family_t family;
55391 - struct file_operations seq_fops;
55392 - struct seq_operations seq_ops;
55393 + file_operations_no_const seq_fops;
55394 + seq_operations_no_const seq_ops;
55395 };
55396
55397 struct tcp_iter_state {
55398 diff -urNp linux-2.6.39.4/include/net/udp.h linux-2.6.39.4/include/net/udp.h
55399 --- linux-2.6.39.4/include/net/udp.h 2011-05-19 00:06:34.000000000 -0400
55400 +++ linux-2.6.39.4/include/net/udp.h 2011-08-05 20:34:06.000000000 -0400
55401 @@ -234,8 +234,8 @@ struct udp_seq_afinfo {
55402 char *name;
55403 sa_family_t family;
55404 struct udp_table *udp_table;
55405 - struct file_operations seq_fops;
55406 - struct seq_operations seq_ops;
55407 + file_operations_no_const seq_fops;
55408 + seq_operations_no_const seq_ops;
55409 };
55410
55411 struct udp_iter_state {
55412 diff -urNp linux-2.6.39.4/include/net/xfrm.h linux-2.6.39.4/include/net/xfrm.h
55413 --- linux-2.6.39.4/include/net/xfrm.h 2011-05-19 00:06:34.000000000 -0400
55414 +++ linux-2.6.39.4/include/net/xfrm.h 2011-08-05 19:44:37.000000000 -0400
55415 @@ -505,7 +505,7 @@ struct xfrm_policy {
55416 struct timer_list timer;
55417
55418 struct flow_cache_object flo;
55419 - atomic_t genid;
55420 + atomic_unchecked_t genid;
55421 u32 priority;
55422 u32 index;
55423 struct xfrm_mark mark;
55424 diff -urNp linux-2.6.39.4/include/rdma/iw_cm.h linux-2.6.39.4/include/rdma/iw_cm.h
55425 --- linux-2.6.39.4/include/rdma/iw_cm.h 2011-05-19 00:06:34.000000000 -0400
55426 +++ linux-2.6.39.4/include/rdma/iw_cm.h 2011-08-05 20:34:06.000000000 -0400
55427 @@ -129,7 +129,7 @@ struct iw_cm_verbs {
55428 int backlog);
55429
55430 int (*destroy_listen)(struct iw_cm_id *cm_id);
55431 -};
55432 +} __no_const;
55433
55434 /**
55435 * iw_create_cm_id - Create an IW CM identifier.
55436 diff -urNp linux-2.6.39.4/include/scsi/libfc.h linux-2.6.39.4/include/scsi/libfc.h
55437 --- linux-2.6.39.4/include/scsi/libfc.h 2011-05-19 00:06:34.000000000 -0400
55438 +++ linux-2.6.39.4/include/scsi/libfc.h 2011-08-05 20:34:06.000000000 -0400
55439 @@ -750,6 +750,7 @@ struct libfc_function_template {
55440 */
55441 void (*disc_stop_final) (struct fc_lport *);
55442 };
55443 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
55444
55445 /**
55446 * struct fc_disc - Discovery context
55447 @@ -853,7 +854,7 @@ struct fc_lport {
55448 struct fc_vport *vport;
55449
55450 /* Operational Information */
55451 - struct libfc_function_template tt;
55452 + libfc_function_template_no_const tt;
55453 u8 link_up;
55454 u8 qfull;
55455 enum fc_lport_state state;
55456 diff -urNp linux-2.6.39.4/include/scsi/scsi_device.h linux-2.6.39.4/include/scsi/scsi_device.h
55457 --- linux-2.6.39.4/include/scsi/scsi_device.h 2011-05-19 00:06:34.000000000 -0400
55458 +++ linux-2.6.39.4/include/scsi/scsi_device.h 2011-08-05 19:44:37.000000000 -0400
55459 @@ -161,9 +161,9 @@ struct scsi_device {
55460 unsigned int max_device_blocked; /* what device_blocked counts down from */
55461 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
55462
55463 - atomic_t iorequest_cnt;
55464 - atomic_t iodone_cnt;
55465 - atomic_t ioerr_cnt;
55466 + atomic_unchecked_t iorequest_cnt;
55467 + atomic_unchecked_t iodone_cnt;
55468 + atomic_unchecked_t ioerr_cnt;
55469
55470 struct device sdev_gendev,
55471 sdev_dev;
55472 diff -urNp linux-2.6.39.4/include/scsi/scsi_transport_fc.h linux-2.6.39.4/include/scsi/scsi_transport_fc.h
55473 --- linux-2.6.39.4/include/scsi/scsi_transport_fc.h 2011-05-19 00:06:34.000000000 -0400
55474 +++ linux-2.6.39.4/include/scsi/scsi_transport_fc.h 2011-08-05 20:34:06.000000000 -0400
55475 @@ -666,9 +666,9 @@ struct fc_function_template {
55476 int (*bsg_timeout)(struct fc_bsg_job *);
55477
55478 /* allocation lengths for host-specific data */
55479 - u32 dd_fcrport_size;
55480 - u32 dd_fcvport_size;
55481 - u32 dd_bsg_size;
55482 + const u32 dd_fcrport_size;
55483 + const u32 dd_fcvport_size;
55484 + const u32 dd_bsg_size;
55485
55486 /*
55487 * The driver sets these to tell the transport class it
55488 @@ -678,39 +678,39 @@ struct fc_function_template {
55489 */
55490
55491 /* remote port fixed attributes */
55492 - unsigned long show_rport_maxframe_size:1;
55493 - unsigned long show_rport_supported_classes:1;
55494 - unsigned long show_rport_dev_loss_tmo:1;
55495 + const unsigned long show_rport_maxframe_size:1;
55496 + const unsigned long show_rport_supported_classes:1;
55497 + const unsigned long show_rport_dev_loss_tmo:1;
55498
55499 /*
55500 * target dynamic attributes
55501 * These should all be "1" if the driver uses the remote port
55502 * add/delete functions (so attributes reflect rport values).
55503 */
55504 - unsigned long show_starget_node_name:1;
55505 - unsigned long show_starget_port_name:1;
55506 - unsigned long show_starget_port_id:1;
55507 + const unsigned long show_starget_node_name:1;
55508 + const unsigned long show_starget_port_name:1;
55509 + const unsigned long show_starget_port_id:1;
55510
55511 /* host fixed attributes */
55512 - unsigned long show_host_node_name:1;
55513 - unsigned long show_host_port_name:1;
55514 - unsigned long show_host_permanent_port_name:1;
55515 - unsigned long show_host_supported_classes:1;
55516 - unsigned long show_host_supported_fc4s:1;
55517 - unsigned long show_host_supported_speeds:1;
55518 - unsigned long show_host_maxframe_size:1;
55519 - unsigned long show_host_serial_number:1;
55520 + const unsigned long show_host_node_name:1;
55521 + const unsigned long show_host_port_name:1;
55522 + const unsigned long show_host_permanent_port_name:1;
55523 + const unsigned long show_host_supported_classes:1;
55524 + const unsigned long show_host_supported_fc4s:1;
55525 + const unsigned long show_host_supported_speeds:1;
55526 + const unsigned long show_host_maxframe_size:1;
55527 + const unsigned long show_host_serial_number:1;
55528 /* host dynamic attributes */
55529 - unsigned long show_host_port_id:1;
55530 - unsigned long show_host_port_type:1;
55531 - unsigned long show_host_port_state:1;
55532 - unsigned long show_host_active_fc4s:1;
55533 - unsigned long show_host_speed:1;
55534 - unsigned long show_host_fabric_name:1;
55535 - unsigned long show_host_symbolic_name:1;
55536 - unsigned long show_host_system_hostname:1;
55537 + const unsigned long show_host_port_id:1;
55538 + const unsigned long show_host_port_type:1;
55539 + const unsigned long show_host_port_state:1;
55540 + const unsigned long show_host_active_fc4s:1;
55541 + const unsigned long show_host_speed:1;
55542 + const unsigned long show_host_fabric_name:1;
55543 + const unsigned long show_host_symbolic_name:1;
55544 + const unsigned long show_host_system_hostname:1;
55545
55546 - unsigned long disable_target_scan:1;
55547 + const unsigned long disable_target_scan:1;
55548 };
55549
55550
55551 diff -urNp linux-2.6.39.4/include/sound/ak4xxx-adda.h linux-2.6.39.4/include/sound/ak4xxx-adda.h
55552 --- linux-2.6.39.4/include/sound/ak4xxx-adda.h 2011-05-19 00:06:34.000000000 -0400
55553 +++ linux-2.6.39.4/include/sound/ak4xxx-adda.h 2011-08-05 20:34:06.000000000 -0400
55554 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
55555 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
55556 unsigned char val);
55557 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
55558 -};
55559 +} __no_const;
55560
55561 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
55562
55563 diff -urNp linux-2.6.39.4/include/sound/hwdep.h linux-2.6.39.4/include/sound/hwdep.h
55564 --- linux-2.6.39.4/include/sound/hwdep.h 2011-05-19 00:06:34.000000000 -0400
55565 +++ linux-2.6.39.4/include/sound/hwdep.h 2011-08-05 20:34:06.000000000 -0400
55566 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
55567 struct snd_hwdep_dsp_status *status);
55568 int (*dsp_load)(struct snd_hwdep *hw,
55569 struct snd_hwdep_dsp_image *image);
55570 -};
55571 +} __no_const;
55572
55573 struct snd_hwdep {
55574 struct snd_card *card;
55575 diff -urNp linux-2.6.39.4/include/sound/info.h linux-2.6.39.4/include/sound/info.h
55576 --- linux-2.6.39.4/include/sound/info.h 2011-05-19 00:06:34.000000000 -0400
55577 +++ linux-2.6.39.4/include/sound/info.h 2011-08-05 20:34:06.000000000 -0400
55578 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
55579 struct snd_info_buffer *buffer);
55580 void (*write)(struct snd_info_entry *entry,
55581 struct snd_info_buffer *buffer);
55582 -};
55583 +} __no_const;
55584
55585 struct snd_info_entry_ops {
55586 int (*open)(struct snd_info_entry *entry,
55587 diff -urNp linux-2.6.39.4/include/sound/pcm.h linux-2.6.39.4/include/sound/pcm.h
55588 --- linux-2.6.39.4/include/sound/pcm.h 2011-05-19 00:06:34.000000000 -0400
55589 +++ linux-2.6.39.4/include/sound/pcm.h 2011-08-05 20:34:06.000000000 -0400
55590 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
55591 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
55592 int (*ack)(struct snd_pcm_substream *substream);
55593 };
55594 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
55595
55596 /*
55597 *
55598 diff -urNp linux-2.6.39.4/include/sound/sb16_csp.h linux-2.6.39.4/include/sound/sb16_csp.h
55599 --- linux-2.6.39.4/include/sound/sb16_csp.h 2011-05-19 00:06:34.000000000 -0400
55600 +++ linux-2.6.39.4/include/sound/sb16_csp.h 2011-08-05 20:34:06.000000000 -0400
55601 @@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
55602 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
55603 int (*csp_stop) (struct snd_sb_csp * p);
55604 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
55605 -};
55606 +} __no_const;
55607
55608 /*
55609 * CSP private data
55610 diff -urNp linux-2.6.39.4/include/sound/soc.h linux-2.6.39.4/include/sound/soc.h
55611 --- linux-2.6.39.4/include/sound/soc.h 2011-05-19 00:06:34.000000000 -0400
55612 +++ linux-2.6.39.4/include/sound/soc.h 2011-08-05 20:34:06.000000000 -0400
55613 @@ -624,7 +624,7 @@ struct snd_soc_platform_driver {
55614 struct snd_soc_dai *);
55615
55616 /* platform stream ops */
55617 - struct snd_pcm_ops *ops;
55618 + struct snd_pcm_ops * const ops;
55619 };
55620
55621 struct snd_soc_platform {
55622 diff -urNp linux-2.6.39.4/include/sound/ymfpci.h linux-2.6.39.4/include/sound/ymfpci.h
55623 --- linux-2.6.39.4/include/sound/ymfpci.h 2011-05-19 00:06:34.000000000 -0400
55624 +++ linux-2.6.39.4/include/sound/ymfpci.h 2011-08-05 19:44:37.000000000 -0400
55625 @@ -358,7 +358,7 @@ struct snd_ymfpci {
55626 spinlock_t reg_lock;
55627 spinlock_t voice_lock;
55628 wait_queue_head_t interrupt_sleep;
55629 - atomic_t interrupt_sleep_count;
55630 + atomic_unchecked_t interrupt_sleep_count;
55631 struct snd_info_entry *proc_entry;
55632 const struct firmware *dsp_microcode;
55633 const struct firmware *controller_microcode;
55634 diff -urNp linux-2.6.39.4/include/target/target_core_base.h linux-2.6.39.4/include/target/target_core_base.h
55635 --- linux-2.6.39.4/include/target/target_core_base.h 2011-06-03 00:04:14.000000000 -0400
55636 +++ linux-2.6.39.4/include/target/target_core_base.h 2011-08-05 20:34:06.000000000 -0400
55637 @@ -364,7 +364,7 @@ struct t10_reservation_ops {
55638 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
55639 int (*t10_pr_register)(struct se_cmd *);
55640 int (*t10_pr_clear)(struct se_cmd *);
55641 -};
55642 +} __no_const;
55643
55644 struct t10_reservation_template {
55645 /* Reservation effects all target ports */
55646 @@ -432,8 +432,8 @@ struct se_transport_task {
55647 atomic_t t_task_cdbs_left;
55648 atomic_t t_task_cdbs_ex_left;
55649 atomic_t t_task_cdbs_timeout_left;
55650 - atomic_t t_task_cdbs_sent;
55651 - atomic_t t_transport_aborted;
55652 + atomic_unchecked_t t_task_cdbs_sent;
55653 + atomic_unchecked_t t_transport_aborted;
55654 atomic_t t_transport_active;
55655 atomic_t t_transport_complete;
55656 atomic_t t_transport_queue_active;
55657 @@ -774,7 +774,7 @@ struct se_device {
55658 atomic_t active_cmds;
55659 atomic_t simple_cmds;
55660 atomic_t depth_left;
55661 - atomic_t dev_ordered_id;
55662 + atomic_unchecked_t dev_ordered_id;
55663 atomic_t dev_tur_active;
55664 atomic_t execute_tasks;
55665 atomic_t dev_status_thr_count;
55666 diff -urNp linux-2.6.39.4/include/trace/events/irq.h linux-2.6.39.4/include/trace/events/irq.h
55667 --- linux-2.6.39.4/include/trace/events/irq.h 2011-05-19 00:06:34.000000000 -0400
55668 +++ linux-2.6.39.4/include/trace/events/irq.h 2011-08-05 19:44:37.000000000 -0400
55669 @@ -36,7 +36,7 @@ struct softirq_action;
55670 */
55671 TRACE_EVENT(irq_handler_entry,
55672
55673 - TP_PROTO(int irq, struct irqaction *action),
55674 + TP_PROTO(int irq, const struct irqaction *action),
55675
55676 TP_ARGS(irq, action),
55677
55678 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
55679 */
55680 TRACE_EVENT(irq_handler_exit,
55681
55682 - TP_PROTO(int irq, struct irqaction *action, int ret),
55683 + TP_PROTO(int irq, const struct irqaction *action, int ret),
55684
55685 TP_ARGS(irq, action, ret),
55686
55687 diff -urNp linux-2.6.39.4/include/video/udlfb.h linux-2.6.39.4/include/video/udlfb.h
55688 --- linux-2.6.39.4/include/video/udlfb.h 2011-05-19 00:06:34.000000000 -0400
55689 +++ linux-2.6.39.4/include/video/udlfb.h 2011-08-05 19:44:37.000000000 -0400
55690 @@ -51,10 +51,10 @@ struct dlfb_data {
55691 int base8;
55692 u32 pseudo_palette[256];
55693 /* blit-only rendering path metrics, exposed through sysfs */
55694 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
55695 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
55696 - atomic_t bytes_sent; /* to usb, after compression including overhead */
55697 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
55698 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
55699 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
55700 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
55701 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
55702 };
55703
55704 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
55705 diff -urNp linux-2.6.39.4/include/video/uvesafb.h linux-2.6.39.4/include/video/uvesafb.h
55706 --- linux-2.6.39.4/include/video/uvesafb.h 2011-05-19 00:06:34.000000000 -0400
55707 +++ linux-2.6.39.4/include/video/uvesafb.h 2011-08-05 19:44:37.000000000 -0400
55708 @@ -177,6 +177,7 @@ struct uvesafb_par {
55709 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
55710 u8 pmi_setpal; /* PMI for palette changes */
55711 u16 *pmi_base; /* protected mode interface location */
55712 + u8 *pmi_code; /* protected mode code location */
55713 void *pmi_start;
55714 void *pmi_pal;
55715 u8 *vbe_state_orig; /*
55716 diff -urNp linux-2.6.39.4/init/do_mounts.c linux-2.6.39.4/init/do_mounts.c
55717 --- linux-2.6.39.4/init/do_mounts.c 2011-05-19 00:06:34.000000000 -0400
55718 +++ linux-2.6.39.4/init/do_mounts.c 2011-08-05 19:44:37.000000000 -0400
55719 @@ -287,7 +287,7 @@ static void __init get_fs_names(char *pa
55720
55721 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
55722 {
55723 - int err = sys_mount(name, "/root", fs, flags, data);
55724 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
55725 if (err)
55726 return err;
55727
55728 @@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
55729 va_start(args, fmt);
55730 vsprintf(buf, fmt, args);
55731 va_end(args);
55732 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
55733 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
55734 if (fd >= 0) {
55735 sys_ioctl(fd, FDEJECT, 0);
55736 sys_close(fd);
55737 }
55738 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
55739 - fd = sys_open("/dev/console", O_RDWR, 0);
55740 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
55741 if (fd >= 0) {
55742 sys_ioctl(fd, TCGETS, (long)&termios);
55743 termios.c_lflag &= ~ICANON;
55744 sys_ioctl(fd, TCSETSF, (long)&termios);
55745 - sys_read(fd, &c, 1);
55746 + sys_read(fd, (char __user *)&c, 1);
55747 termios.c_lflag |= ICANON;
55748 sys_ioctl(fd, TCSETSF, (long)&termios);
55749 sys_close(fd);
55750 @@ -488,6 +488,6 @@ void __init prepare_namespace(void)
55751 mount_root();
55752 out:
55753 devtmpfs_mount("dev");
55754 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
55755 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
55756 sys_chroot((const char __user __force *)".");
55757 }
55758 diff -urNp linux-2.6.39.4/init/do_mounts.h linux-2.6.39.4/init/do_mounts.h
55759 --- linux-2.6.39.4/init/do_mounts.h 2011-05-19 00:06:34.000000000 -0400
55760 +++ linux-2.6.39.4/init/do_mounts.h 2011-08-05 19:44:37.000000000 -0400
55761 @@ -15,15 +15,15 @@ extern int root_mountflags;
55762
55763 static inline int create_dev(char *name, dev_t dev)
55764 {
55765 - sys_unlink(name);
55766 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
55767 + sys_unlink((__force char __user *)name);
55768 + return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
55769 }
55770
55771 #if BITS_PER_LONG == 32
55772 static inline u32 bstat(char *name)
55773 {
55774 struct stat64 stat;
55775 - if (sys_stat64(name, &stat) != 0)
55776 + if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
55777 return 0;
55778 if (!S_ISBLK(stat.st_mode))
55779 return 0;
55780 diff -urNp linux-2.6.39.4/init/do_mounts_initrd.c linux-2.6.39.4/init/do_mounts_initrd.c
55781 --- linux-2.6.39.4/init/do_mounts_initrd.c 2011-05-19 00:06:34.000000000 -0400
55782 +++ linux-2.6.39.4/init/do_mounts_initrd.c 2011-08-05 19:44:37.000000000 -0400
55783 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
55784 create_dev("/dev/root.old", Root_RAM0);
55785 /* mount initrd on rootfs' /root */
55786 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
55787 - sys_mkdir("/old", 0700);
55788 - root_fd = sys_open("/", 0, 0);
55789 - old_fd = sys_open("/old", 0, 0);
55790 + sys_mkdir((__force const char __user *)"/old", 0700);
55791 + root_fd = sys_open((__force const char __user *)"/", 0, 0);
55792 + old_fd = sys_open((__force const char __user *)"/old", 0, 0);
55793 /* move initrd over / and chdir/chroot in initrd root */
55794 - sys_chdir("/root");
55795 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
55796 - sys_chroot(".");
55797 + sys_chdir((__force const char __user *)"/root");
55798 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
55799 + sys_chroot((__force const char __user *)".");
55800
55801 /*
55802 * In case that a resume from disk is carried out by linuxrc or one of
55803 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
55804
55805 /* move initrd to rootfs' /old */
55806 sys_fchdir(old_fd);
55807 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
55808 + sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
55809 /* switch root and cwd back to / of rootfs */
55810 sys_fchdir(root_fd);
55811 - sys_chroot(".");
55812 + sys_chroot((__force const char __user *)".");
55813 sys_close(old_fd);
55814 sys_close(root_fd);
55815
55816 if (new_decode_dev(real_root_dev) == Root_RAM0) {
55817 - sys_chdir("/old");
55818 + sys_chdir((__force const char __user *)"/old");
55819 return;
55820 }
55821
55822 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
55823 mount_root();
55824
55825 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
55826 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
55827 + error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
55828 if (!error)
55829 printk("okay\n");
55830 else {
55831 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
55832 + int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
55833 if (error == -ENOENT)
55834 printk("/initrd does not exist. Ignored.\n");
55835 else
55836 printk("failed\n");
55837 printk(KERN_NOTICE "Unmounting old root\n");
55838 - sys_umount("/old", MNT_DETACH);
55839 + sys_umount((__force char __user *)"/old", MNT_DETACH);
55840 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
55841 if (fd < 0) {
55842 error = fd;
55843 @@ -116,11 +116,11 @@ int __init initrd_load(void)
55844 * mounted in the normal path.
55845 */
55846 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
55847 - sys_unlink("/initrd.image");
55848 + sys_unlink((__force const char __user *)"/initrd.image");
55849 handle_initrd();
55850 return 1;
55851 }
55852 }
55853 - sys_unlink("/initrd.image");
55854 + sys_unlink((__force const char __user *)"/initrd.image");
55855 return 0;
55856 }
55857 diff -urNp linux-2.6.39.4/init/do_mounts_md.c linux-2.6.39.4/init/do_mounts_md.c
55858 --- linux-2.6.39.4/init/do_mounts_md.c 2011-05-19 00:06:34.000000000 -0400
55859 +++ linux-2.6.39.4/init/do_mounts_md.c 2011-08-05 19:44:37.000000000 -0400
55860 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
55861 partitioned ? "_d" : "", minor,
55862 md_setup_args[ent].device_names);
55863
55864 - fd = sys_open(name, 0, 0);
55865 + fd = sys_open((__force char __user *)name, 0, 0);
55866 if (fd < 0) {
55867 printk(KERN_ERR "md: open failed - cannot start "
55868 "array %s\n", name);
55869 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
55870 * array without it
55871 */
55872 sys_close(fd);
55873 - fd = sys_open(name, 0, 0);
55874 + fd = sys_open((__force char __user *)name, 0, 0);
55875 sys_ioctl(fd, BLKRRPART, 0);
55876 }
55877 sys_close(fd);
55878 diff -urNp linux-2.6.39.4/init/initramfs.c linux-2.6.39.4/init/initramfs.c
55879 --- linux-2.6.39.4/init/initramfs.c 2011-05-19 00:06:34.000000000 -0400
55880 +++ linux-2.6.39.4/init/initramfs.c 2011-08-05 19:44:37.000000000 -0400
55881 @@ -74,7 +74,7 @@ static void __init free_hash(void)
55882 }
55883 }
55884
55885 -static long __init do_utime(char __user *filename, time_t mtime)
55886 +static long __init do_utime(__force char __user *filename, time_t mtime)
55887 {
55888 struct timespec t[2];
55889
55890 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
55891 struct dir_entry *de, *tmp;
55892 list_for_each_entry_safe(de, tmp, &dir_list, list) {
55893 list_del(&de->list);
55894 - do_utime(de->name, de->mtime);
55895 + do_utime((__force char __user *)de->name, de->mtime);
55896 kfree(de->name);
55897 kfree(de);
55898 }
55899 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
55900 if (nlink >= 2) {
55901 char *old = find_link(major, minor, ino, mode, collected);
55902 if (old)
55903 - return (sys_link(old, collected) < 0) ? -1 : 1;
55904 + return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
55905 }
55906 return 0;
55907 }
55908 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
55909 {
55910 struct stat st;
55911
55912 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
55913 + if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
55914 if (S_ISDIR(st.st_mode))
55915 - sys_rmdir(path);
55916 + sys_rmdir((__force char __user *)path);
55917 else
55918 - sys_unlink(path);
55919 + sys_unlink((__force char __user *)path);
55920 }
55921 }
55922
55923 @@ -305,7 +305,7 @@ static int __init do_name(void)
55924 int openflags = O_WRONLY|O_CREAT;
55925 if (ml != 1)
55926 openflags |= O_TRUNC;
55927 - wfd = sys_open(collected, openflags, mode);
55928 + wfd = sys_open((__force char __user *)collected, openflags, mode);
55929
55930 if (wfd >= 0) {
55931 sys_fchown(wfd, uid, gid);
55932 @@ -317,17 +317,17 @@ static int __init do_name(void)
55933 }
55934 }
55935 } else if (S_ISDIR(mode)) {
55936 - sys_mkdir(collected, mode);
55937 - sys_chown(collected, uid, gid);
55938 - sys_chmod(collected, mode);
55939 + sys_mkdir((__force char __user *)collected, mode);
55940 + sys_chown((__force char __user *)collected, uid, gid);
55941 + sys_chmod((__force char __user *)collected, mode);
55942 dir_add(collected, mtime);
55943 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
55944 S_ISFIFO(mode) || S_ISSOCK(mode)) {
55945 if (maybe_link() == 0) {
55946 - sys_mknod(collected, mode, rdev);
55947 - sys_chown(collected, uid, gid);
55948 - sys_chmod(collected, mode);
55949 - do_utime(collected, mtime);
55950 + sys_mknod((__force char __user *)collected, mode, rdev);
55951 + sys_chown((__force char __user *)collected, uid, gid);
55952 + sys_chmod((__force char __user *)collected, mode);
55953 + do_utime((__force char __user *)collected, mtime);
55954 }
55955 }
55956 return 0;
55957 @@ -336,15 +336,15 @@ static int __init do_name(void)
55958 static int __init do_copy(void)
55959 {
55960 if (count >= body_len) {
55961 - sys_write(wfd, victim, body_len);
55962 + sys_write(wfd, (__force char __user *)victim, body_len);
55963 sys_close(wfd);
55964 - do_utime(vcollected, mtime);
55965 + do_utime((__force char __user *)vcollected, mtime);
55966 kfree(vcollected);
55967 eat(body_len);
55968 state = SkipIt;
55969 return 0;
55970 } else {
55971 - sys_write(wfd, victim, count);
55972 + sys_write(wfd, (__force char __user *)victim, count);
55973 body_len -= count;
55974 eat(count);
55975 return 1;
55976 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
55977 {
55978 collected[N_ALIGN(name_len) + body_len] = '\0';
55979 clean_path(collected, 0);
55980 - sys_symlink(collected + N_ALIGN(name_len), collected);
55981 - sys_lchown(collected, uid, gid);
55982 - do_utime(collected, mtime);
55983 + sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
55984 + sys_lchown((__force char __user *)collected, uid, gid);
55985 + do_utime((__force char __user *)collected, mtime);
55986 state = SkipIt;
55987 next_state = Reset;
55988 return 0;
55989 diff -urNp linux-2.6.39.4/init/Kconfig linux-2.6.39.4/init/Kconfig
55990 --- linux-2.6.39.4/init/Kconfig 2011-05-19 00:06:34.000000000 -0400
55991 +++ linux-2.6.39.4/init/Kconfig 2011-08-05 19:44:37.000000000 -0400
55992 @@ -1202,7 +1202,7 @@ config SLUB_DEBUG
55993
55994 config COMPAT_BRK
55995 bool "Disable heap randomization"
55996 - default y
55997 + default n
55998 help
55999 Randomizing heap placement makes heap exploits harder, but it
56000 also breaks ancient binaries (including anything libc5 based).
56001 diff -urNp linux-2.6.39.4/init/main.c linux-2.6.39.4/init/main.c
56002 --- linux-2.6.39.4/init/main.c 2011-06-03 00:04:14.000000000 -0400
56003 +++ linux-2.6.39.4/init/main.c 2011-08-05 20:34:06.000000000 -0400
56004 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
56005 extern void tc_init(void);
56006 #endif
56007
56008 +extern void grsecurity_init(void);
56009 +
56010 /*
56011 * Debug helper: via this flag we know that we are in 'early bootup code'
56012 * where only the boot processor is running with IRQ disabled. This means
56013 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char
56014
56015 __setup("reset_devices", set_reset_devices);
56016
56017 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
56018 +extern char pax_enter_kernel_user[];
56019 +extern char pax_exit_kernel_user[];
56020 +extern pgdval_t clone_pgd_mask;
56021 +#endif
56022 +
56023 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
56024 +static int __init setup_pax_nouderef(char *str)
56025 +{
56026 +#ifdef CONFIG_X86_32
56027 + unsigned int cpu;
56028 + struct desc_struct *gdt;
56029 +
56030 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
56031 + gdt = get_cpu_gdt_table(cpu);
56032 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
56033 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
56034 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
56035 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
56036 + }
56037 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
56038 +#else
56039 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
56040 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
56041 + clone_pgd_mask = ~(pgdval_t)0UL;
56042 +#endif
56043 +
56044 + return 0;
56045 +}
56046 +early_param("pax_nouderef", setup_pax_nouderef);
56047 +#endif
56048 +
56049 +#ifdef CONFIG_PAX_SOFTMODE
56050 +int pax_softmode;
56051 +
56052 +static int __init setup_pax_softmode(char *str)
56053 +{
56054 + get_option(&str, &pax_softmode);
56055 + return 1;
56056 +}
56057 +__setup("pax_softmode=", setup_pax_softmode);
56058 +#endif
56059 +
56060 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
56061 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
56062 static const char *panic_later, *panic_param;
56063 @@ -663,6 +708,7 @@ int __init_or_module do_one_initcall(ini
56064 {
56065 int count = preempt_count();
56066 int ret;
56067 + const char *msg1 = "", *msg2 = "";
56068
56069 if (initcall_debug)
56070 ret = do_one_initcall_debug(fn);
56071 @@ -675,15 +721,15 @@ int __init_or_module do_one_initcall(ini
56072 sprintf(msgbuf, "error code %d ", ret);
56073
56074 if (preempt_count() != count) {
56075 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
56076 + msg1 = " preemption imbalance";
56077 preempt_count() = count;
56078 }
56079 if (irqs_disabled()) {
56080 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
56081 + msg2 = " disabled interrupts";
56082 local_irq_enable();
56083 }
56084 - if (msgbuf[0]) {
56085 - printk("initcall %pF returned with %s\n", fn, msgbuf);
56086 + if (msgbuf[0] || *msg1 || *msg2) {
56087 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
56088 }
56089
56090 return ret;
56091 @@ -801,7 +847,7 @@ static int __init kernel_init(void * unu
56092 do_basic_setup();
56093
56094 /* Open the /dev/console on the rootfs, this should never fail */
56095 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
56096 + if (sys_open((__force const char __user *) "/dev/console", O_RDWR, 0) < 0)
56097 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
56098
56099 (void) sys_dup(0);
56100 @@ -814,11 +860,13 @@ static int __init kernel_init(void * unu
56101 if (!ramdisk_execute_command)
56102 ramdisk_execute_command = "/init";
56103
56104 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
56105 + if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
56106 ramdisk_execute_command = NULL;
56107 prepare_namespace();
56108 }
56109
56110 + grsecurity_init();
56111 +
56112 /*
56113 * Ok, we have completed the initial bootup, and
56114 * we're essentially up and running. Get rid of the
56115 diff -urNp linux-2.6.39.4/ipc/mqueue.c linux-2.6.39.4/ipc/mqueue.c
56116 --- linux-2.6.39.4/ipc/mqueue.c 2011-05-19 00:06:34.000000000 -0400
56117 +++ linux-2.6.39.4/ipc/mqueue.c 2011-08-05 19:44:37.000000000 -0400
56118 @@ -154,6 +154,7 @@ static struct inode *mqueue_get_inode(st
56119 mq_bytes = (mq_msg_tblsz +
56120 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
56121
56122 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
56123 spin_lock(&mq_lock);
56124 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
56125 u->mq_bytes + mq_bytes >
56126 diff -urNp linux-2.6.39.4/ipc/msg.c linux-2.6.39.4/ipc/msg.c
56127 --- linux-2.6.39.4/ipc/msg.c 2011-05-19 00:06:34.000000000 -0400
56128 +++ linux-2.6.39.4/ipc/msg.c 2011-08-05 20:34:06.000000000 -0400
56129 @@ -309,18 +309,19 @@ static inline int msg_security(struct ke
56130 return security_msg_queue_associate(msq, msgflg);
56131 }
56132
56133 +static struct ipc_ops msg_ops = {
56134 + .getnew = newque,
56135 + .associate = msg_security,
56136 + .more_checks = NULL
56137 +};
56138 +
56139 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
56140 {
56141 struct ipc_namespace *ns;
56142 - struct ipc_ops msg_ops;
56143 struct ipc_params msg_params;
56144
56145 ns = current->nsproxy->ipc_ns;
56146
56147 - msg_ops.getnew = newque;
56148 - msg_ops.associate = msg_security;
56149 - msg_ops.more_checks = NULL;
56150 -
56151 msg_params.key = key;
56152 msg_params.flg = msgflg;
56153
56154 diff -urNp linux-2.6.39.4/ipc/sem.c linux-2.6.39.4/ipc/sem.c
56155 --- linux-2.6.39.4/ipc/sem.c 2011-05-19 00:06:34.000000000 -0400
56156 +++ linux-2.6.39.4/ipc/sem.c 2011-08-05 20:34:06.000000000 -0400
56157 @@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
56158 return 0;
56159 }
56160
56161 +static struct ipc_ops sem_ops = {
56162 + .getnew = newary,
56163 + .associate = sem_security,
56164 + .more_checks = sem_more_checks
56165 +};
56166 +
56167 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
56168 {
56169 struct ipc_namespace *ns;
56170 - struct ipc_ops sem_ops;
56171 struct ipc_params sem_params;
56172
56173 ns = current->nsproxy->ipc_ns;
56174 @@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
56175 if (nsems < 0 || nsems > ns->sc_semmsl)
56176 return -EINVAL;
56177
56178 - sem_ops.getnew = newary;
56179 - sem_ops.associate = sem_security;
56180 - sem_ops.more_checks = sem_more_checks;
56181 -
56182 sem_params.key = key;
56183 sem_params.flg = semflg;
56184 sem_params.u.nsems = nsems;
56185 @@ -854,6 +855,8 @@ static int semctl_main(struct ipc_namesp
56186 int nsems;
56187 struct list_head tasks;
56188
56189 + pax_track_stack();
56190 +
56191 sma = sem_lock_check(ns, semid);
56192 if (IS_ERR(sma))
56193 return PTR_ERR(sma);
56194 @@ -1301,6 +1304,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
56195 struct ipc_namespace *ns;
56196 struct list_head tasks;
56197
56198 + pax_track_stack();
56199 +
56200 ns = current->nsproxy->ipc_ns;
56201
56202 if (nsops < 1 || semid < 0)
56203 diff -urNp linux-2.6.39.4/ipc/shm.c linux-2.6.39.4/ipc/shm.c
56204 --- linux-2.6.39.4/ipc/shm.c 2011-05-19 00:06:34.000000000 -0400
56205 +++ linux-2.6.39.4/ipc/shm.c 2011-08-05 20:34:06.000000000 -0400
56206 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
56207 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
56208 #endif
56209
56210 +#ifdef CONFIG_GRKERNSEC
56211 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56212 + const time_t shm_createtime, const uid_t cuid,
56213 + const int shmid);
56214 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56215 + const time_t shm_createtime);
56216 +#endif
56217 +
56218 void shm_init_ns(struct ipc_namespace *ns)
56219 {
56220 ns->shm_ctlmax = SHMMAX;
56221 @@ -401,6 +409,14 @@ static int newseg(struct ipc_namespace *
56222 shp->shm_lprid = 0;
56223 shp->shm_atim = shp->shm_dtim = 0;
56224 shp->shm_ctim = get_seconds();
56225 +#ifdef CONFIG_GRKERNSEC
56226 + {
56227 + struct timespec timeval;
56228 + do_posix_clock_monotonic_gettime(&timeval);
56229 +
56230 + shp->shm_createtime = timeval.tv_sec;
56231 + }
56232 +#endif
56233 shp->shm_segsz = size;
56234 shp->shm_nattch = 0;
56235 shp->shm_file = file;
56236 @@ -451,18 +467,19 @@ static inline int shm_more_checks(struct
56237 return 0;
56238 }
56239
56240 +static struct ipc_ops shm_ops = {
56241 + .getnew = newseg,
56242 + .associate = shm_security,
56243 + .more_checks = shm_more_checks
56244 +};
56245 +
56246 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
56247 {
56248 struct ipc_namespace *ns;
56249 - struct ipc_ops shm_ops;
56250 struct ipc_params shm_params;
56251
56252 ns = current->nsproxy->ipc_ns;
56253
56254 - shm_ops.getnew = newseg;
56255 - shm_ops.associate = shm_security;
56256 - shm_ops.more_checks = shm_more_checks;
56257 -
56258 shm_params.key = key;
56259 shm_params.flg = shmflg;
56260 shm_params.u.size = size;
56261 @@ -762,8 +779,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
56262 case SHM_LOCK:
56263 case SHM_UNLOCK:
56264 {
56265 - struct file *uninitialized_var(shm_file);
56266 -
56267 lru_add_drain_all(); /* drain pagevecs to lru lists */
56268
56269 shp = shm_lock_check(ns, shmid);
56270 @@ -896,9 +911,21 @@ long do_shmat(int shmid, char __user *sh
56271 if (err)
56272 goto out_unlock;
56273
56274 +#ifdef CONFIG_GRKERNSEC
56275 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
56276 + shp->shm_perm.cuid, shmid) ||
56277 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
56278 + err = -EACCES;
56279 + goto out_unlock;
56280 + }
56281 +#endif
56282 +
56283 path = shp->shm_file->f_path;
56284 path_get(&path);
56285 shp->shm_nattch++;
56286 +#ifdef CONFIG_GRKERNSEC
56287 + shp->shm_lapid = current->pid;
56288 +#endif
56289 size = i_size_read(path.dentry->d_inode);
56290 shm_unlock(shp);
56291
56292 diff -urNp linux-2.6.39.4/kernel/acct.c linux-2.6.39.4/kernel/acct.c
56293 --- linux-2.6.39.4/kernel/acct.c 2011-05-19 00:06:34.000000000 -0400
56294 +++ linux-2.6.39.4/kernel/acct.c 2011-08-05 19:44:37.000000000 -0400
56295 @@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
56296 */
56297 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
56298 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
56299 - file->f_op->write(file, (char *)&ac,
56300 + file->f_op->write(file, (__force char __user *)&ac,
56301 sizeof(acct_t), &file->f_pos);
56302 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
56303 set_fs(fs);
56304 diff -urNp linux-2.6.39.4/kernel/audit.c linux-2.6.39.4/kernel/audit.c
56305 --- linux-2.6.39.4/kernel/audit.c 2011-05-19 00:06:34.000000000 -0400
56306 +++ linux-2.6.39.4/kernel/audit.c 2011-08-05 19:44:37.000000000 -0400
56307 @@ -112,7 +112,7 @@ u32 audit_sig_sid = 0;
56308 3) suppressed due to audit_rate_limit
56309 4) suppressed due to audit_backlog_limit
56310 */
56311 -static atomic_t audit_lost = ATOMIC_INIT(0);
56312 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
56313
56314 /* The netlink socket. */
56315 static struct sock *audit_sock;
56316 @@ -234,7 +234,7 @@ void audit_log_lost(const char *message)
56317 unsigned long now;
56318 int print;
56319
56320 - atomic_inc(&audit_lost);
56321 + atomic_inc_unchecked(&audit_lost);
56322
56323 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
56324
56325 @@ -253,7 +253,7 @@ void audit_log_lost(const char *message)
56326 printk(KERN_WARNING
56327 "audit: audit_lost=%d audit_rate_limit=%d "
56328 "audit_backlog_limit=%d\n",
56329 - atomic_read(&audit_lost),
56330 + atomic_read_unchecked(&audit_lost),
56331 audit_rate_limit,
56332 audit_backlog_limit);
56333 audit_panic(message);
56334 @@ -686,7 +686,7 @@ static int audit_receive_msg(struct sk_b
56335 status_set.pid = audit_pid;
56336 status_set.rate_limit = audit_rate_limit;
56337 status_set.backlog_limit = audit_backlog_limit;
56338 - status_set.lost = atomic_read(&audit_lost);
56339 + status_set.lost = atomic_read_unchecked(&audit_lost);
56340 status_set.backlog = skb_queue_len(&audit_skb_queue);
56341 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
56342 &status_set, sizeof(status_set));
56343 diff -urNp linux-2.6.39.4/kernel/auditsc.c linux-2.6.39.4/kernel/auditsc.c
56344 --- linux-2.6.39.4/kernel/auditsc.c 2011-05-19 00:06:34.000000000 -0400
56345 +++ linux-2.6.39.4/kernel/auditsc.c 2011-08-05 19:44:37.000000000 -0400
56346 @@ -2111,7 +2111,7 @@ int auditsc_get_stamp(struct audit_conte
56347 }
56348
56349 /* global counter which is incremented every time something logs in */
56350 -static atomic_t session_id = ATOMIC_INIT(0);
56351 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
56352
56353 /**
56354 * audit_set_loginuid - set a task's audit_context loginuid
56355 @@ -2124,7 +2124,7 @@ static atomic_t session_id = ATOMIC_INIT
56356 */
56357 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
56358 {
56359 - unsigned int sessionid = atomic_inc_return(&session_id);
56360 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
56361 struct audit_context *context = task->audit_context;
56362
56363 if (context && context->in_syscall) {
56364 diff -urNp linux-2.6.39.4/kernel/capability.c linux-2.6.39.4/kernel/capability.c
56365 --- linux-2.6.39.4/kernel/capability.c 2011-05-19 00:06:34.000000000 -0400
56366 +++ linux-2.6.39.4/kernel/capability.c 2011-08-05 19:44:37.000000000 -0400
56367 @@ -206,6 +206,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
56368 * before modification is attempted and the application
56369 * fails.
56370 */
56371 + if (tocopy > ARRAY_SIZE(kdata))
56372 + return -EFAULT;
56373 +
56374 if (copy_to_user(dataptr, kdata, tocopy
56375 * sizeof(struct __user_cap_data_struct))) {
56376 return -EFAULT;
56377 @@ -378,7 +381,7 @@ bool ns_capable(struct user_namespace *n
56378 BUG();
56379 }
56380
56381 - if (security_capable(ns, current_cred(), cap) == 0) {
56382 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
56383 current->flags |= PF_SUPERPRIV;
56384 return true;
56385 }
56386 @@ -386,6 +389,27 @@ bool ns_capable(struct user_namespace *n
56387 }
56388 EXPORT_SYMBOL(ns_capable);
56389
56390 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
56391 +{
56392 + if (unlikely(!cap_valid(cap))) {
56393 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
56394 + BUG();
56395 + }
56396 +
56397 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
56398 + current->flags |= PF_SUPERPRIV;
56399 + return true;
56400 + }
56401 + return false;
56402 +}
56403 +EXPORT_SYMBOL(ns_capable_nolog);
56404 +
56405 +bool capable_nolog(int cap)
56406 +{
56407 + return ns_capable_nolog(&init_user_ns, cap);
56408 +}
56409 +EXPORT_SYMBOL(capable_nolog);
56410 +
56411 /**
56412 * task_ns_capable - Determine whether current task has a superior
56413 * capability targeted at a specific task's user namespace.
56414 @@ -400,6 +424,12 @@ bool task_ns_capable(struct task_struct
56415 }
56416 EXPORT_SYMBOL(task_ns_capable);
56417
56418 +bool task_ns_capable_nolog(struct task_struct *t, int cap)
56419 +{
56420 + return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
56421 +}
56422 +EXPORT_SYMBOL(task_ns_capable_nolog);
56423 +
56424 /**
56425 * nsown_capable - Check superior capability to one's own user_ns
56426 * @cap: The capability in question
56427 diff -urNp linux-2.6.39.4/kernel/cgroup.c linux-2.6.39.4/kernel/cgroup.c
56428 --- linux-2.6.39.4/kernel/cgroup.c 2011-05-19 00:06:34.000000000 -0400
56429 +++ linux-2.6.39.4/kernel/cgroup.c 2011-08-05 19:44:37.000000000 -0400
56430 @@ -598,6 +598,8 @@ static struct css_set *find_css_set(
56431 struct hlist_head *hhead;
56432 struct cg_cgroup_link *link;
56433
56434 + pax_track_stack();
56435 +
56436 /* First see if we already have a cgroup group that matches
56437 * the desired set */
56438 read_lock(&css_set_lock);
56439 diff -urNp linux-2.6.39.4/kernel/compat.c linux-2.6.39.4/kernel/compat.c
56440 --- linux-2.6.39.4/kernel/compat.c 2011-05-19 00:06:34.000000000 -0400
56441 +++ linux-2.6.39.4/kernel/compat.c 2011-08-05 19:44:37.000000000 -0400
56442 @@ -13,6 +13,7 @@
56443
56444 #include <linux/linkage.h>
56445 #include <linux/compat.h>
56446 +#include <linux/module.h>
56447 #include <linux/errno.h>
56448 #include <linux/time.h>
56449 #include <linux/signal.h>
56450 diff -urNp linux-2.6.39.4/kernel/configs.c linux-2.6.39.4/kernel/configs.c
56451 --- linux-2.6.39.4/kernel/configs.c 2011-05-19 00:06:34.000000000 -0400
56452 +++ linux-2.6.39.4/kernel/configs.c 2011-08-05 19:44:37.000000000 -0400
56453 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
56454 struct proc_dir_entry *entry;
56455
56456 /* create the current config file */
56457 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
56458 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
56459 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
56460 + &ikconfig_file_ops);
56461 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56462 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
56463 + &ikconfig_file_ops);
56464 +#endif
56465 +#else
56466 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
56467 &ikconfig_file_ops);
56468 +#endif
56469 +
56470 if (!entry)
56471 return -ENOMEM;
56472
56473 diff -urNp linux-2.6.39.4/kernel/cred.c linux-2.6.39.4/kernel/cred.c
56474 --- linux-2.6.39.4/kernel/cred.c 2011-05-19 00:06:34.000000000 -0400
56475 +++ linux-2.6.39.4/kernel/cred.c 2011-08-05 19:44:37.000000000 -0400
56476 @@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
56477 */
56478 void __put_cred(struct cred *cred)
56479 {
56480 + pax_track_stack();
56481 +
56482 kdebug("__put_cred(%p{%d,%d})", cred,
56483 atomic_read(&cred->usage),
56484 read_cred_subscribers(cred));
56485 @@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
56486 {
56487 struct cred *cred;
56488
56489 + pax_track_stack();
56490 +
56491 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
56492 atomic_read(&tsk->cred->usage),
56493 read_cred_subscribers(tsk->cred));
56494 @@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
56495 {
56496 const struct cred *cred;
56497
56498 + pax_track_stack();
56499 +
56500 rcu_read_lock();
56501
56502 do {
56503 @@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
56504 {
56505 struct cred *new;
56506
56507 + pax_track_stack();
56508 +
56509 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
56510 if (!new)
56511 return NULL;
56512 @@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
56513 const struct cred *old;
56514 struct cred *new;
56515
56516 + pax_track_stack();
56517 +
56518 validate_process_creds();
56519
56520 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
56521 @@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
56522 struct thread_group_cred *tgcred = NULL;
56523 struct cred *new;
56524
56525 + pax_track_stack();
56526 +
56527 #ifdef CONFIG_KEYS
56528 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
56529 if (!tgcred)
56530 @@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
56531 struct cred *new;
56532 int ret;
56533
56534 + pax_track_stack();
56535 +
56536 if (
56537 #ifdef CONFIG_KEYS
56538 !p->cred->thread_keyring &&
56539 @@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
56540 struct task_struct *task = current;
56541 const struct cred *old = task->real_cred;
56542
56543 + pax_track_stack();
56544 +
56545 kdebug("commit_creds(%p{%d,%d})", new,
56546 atomic_read(&new->usage),
56547 read_cred_subscribers(new));
56548 @@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
56549
56550 get_cred(new); /* we will require a ref for the subj creds too */
56551
56552 + gr_set_role_label(task, new->uid, new->gid);
56553 +
56554 /* dumpability changes */
56555 if (old->euid != new->euid ||
56556 old->egid != new->egid ||
56557 @@ -551,6 +569,8 @@ EXPORT_SYMBOL(commit_creds);
56558 */
56559 void abort_creds(struct cred *new)
56560 {
56561 + pax_track_stack();
56562 +
56563 kdebug("abort_creds(%p{%d,%d})", new,
56564 atomic_read(&new->usage),
56565 read_cred_subscribers(new));
56566 @@ -574,6 +594,8 @@ const struct cred *override_creds(const
56567 {
56568 const struct cred *old = current->cred;
56569
56570 + pax_track_stack();
56571 +
56572 kdebug("override_creds(%p{%d,%d})", new,
56573 atomic_read(&new->usage),
56574 read_cred_subscribers(new));
56575 @@ -603,6 +625,8 @@ void revert_creds(const struct cred *old
56576 {
56577 const struct cred *override = current->cred;
56578
56579 + pax_track_stack();
56580 +
56581 kdebug("revert_creds(%p{%d,%d})", old,
56582 atomic_read(&old->usage),
56583 read_cred_subscribers(old));
56584 @@ -649,6 +673,8 @@ struct cred *prepare_kernel_cred(struct
56585 const struct cred *old;
56586 struct cred *new;
56587
56588 + pax_track_stack();
56589 +
56590 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
56591 if (!new)
56592 return NULL;
56593 @@ -703,6 +729,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
56594 */
56595 int set_security_override(struct cred *new, u32 secid)
56596 {
56597 + pax_track_stack();
56598 +
56599 return security_kernel_act_as(new, secid);
56600 }
56601 EXPORT_SYMBOL(set_security_override);
56602 @@ -722,6 +750,8 @@ int set_security_override_from_ctx(struc
56603 u32 secid;
56604 int ret;
56605
56606 + pax_track_stack();
56607 +
56608 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
56609 if (ret < 0)
56610 return ret;
56611 diff -urNp linux-2.6.39.4/kernel/debug/debug_core.c linux-2.6.39.4/kernel/debug/debug_core.c
56612 --- linux-2.6.39.4/kernel/debug/debug_core.c 2011-05-19 00:06:34.000000000 -0400
56613 +++ linux-2.6.39.4/kernel/debug/debug_core.c 2011-08-05 20:34:06.000000000 -0400
56614 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
56615 */
56616 static atomic_t masters_in_kgdb;
56617 static atomic_t slaves_in_kgdb;
56618 -static atomic_t kgdb_break_tasklet_var;
56619 +static atomic_unchecked_t kgdb_break_tasklet_var;
56620 atomic_t kgdb_setting_breakpoint;
56621
56622 struct task_struct *kgdb_usethread;
56623 @@ -129,7 +129,7 @@ int kgdb_single_step;
56624 static pid_t kgdb_sstep_pid;
56625
56626 /* to keep track of the CPU which is doing the single stepping*/
56627 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
56628 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
56629
56630 /*
56631 * If you are debugging a problem where roundup (the collection of
56632 @@ -542,7 +542,7 @@ return_normal:
56633 * kernel will only try for the value of sstep_tries before
56634 * giving up and continuing on.
56635 */
56636 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
56637 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
56638 (kgdb_info[cpu].task &&
56639 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
56640 atomic_set(&kgdb_active, -1);
56641 @@ -636,8 +636,8 @@ cpu_master_loop:
56642 }
56643
56644 kgdb_restore:
56645 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
56646 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
56647 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
56648 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
56649 if (kgdb_info[sstep_cpu].task)
56650 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
56651 else
56652 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
56653 static void kgdb_tasklet_bpt(unsigned long ing)
56654 {
56655 kgdb_breakpoint();
56656 - atomic_set(&kgdb_break_tasklet_var, 0);
56657 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
56658 }
56659
56660 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
56661
56662 void kgdb_schedule_breakpoint(void)
56663 {
56664 - if (atomic_read(&kgdb_break_tasklet_var) ||
56665 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
56666 atomic_read(&kgdb_active) != -1 ||
56667 atomic_read(&kgdb_setting_breakpoint))
56668 return;
56669 - atomic_inc(&kgdb_break_tasklet_var);
56670 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
56671 tasklet_schedule(&kgdb_tasklet_breakpoint);
56672 }
56673 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
56674 diff -urNp linux-2.6.39.4/kernel/debug/kdb/kdb_main.c linux-2.6.39.4/kernel/debug/kdb/kdb_main.c
56675 --- linux-2.6.39.4/kernel/debug/kdb/kdb_main.c 2011-05-19 00:06:34.000000000 -0400
56676 +++ linux-2.6.39.4/kernel/debug/kdb/kdb_main.c 2011-08-05 19:44:37.000000000 -0400
56677 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
56678 list_for_each_entry(mod, kdb_modules, list) {
56679
56680 kdb_printf("%-20s%8u 0x%p ", mod->name,
56681 - mod->core_size, (void *)mod);
56682 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
56683 #ifdef CONFIG_MODULE_UNLOAD
56684 kdb_printf("%4d ", module_refcount(mod));
56685 #endif
56686 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
56687 kdb_printf(" (Loading)");
56688 else
56689 kdb_printf(" (Live)");
56690 - kdb_printf(" 0x%p", mod->module_core);
56691 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
56692
56693 #ifdef CONFIG_MODULE_UNLOAD
56694 {
56695 diff -urNp linux-2.6.39.4/kernel/exit.c linux-2.6.39.4/kernel/exit.c
56696 --- linux-2.6.39.4/kernel/exit.c 2011-05-19 00:06:34.000000000 -0400
56697 +++ linux-2.6.39.4/kernel/exit.c 2011-08-17 19:20:17.000000000 -0400
56698 @@ -57,6 +57,10 @@
56699 #include <asm/pgtable.h>
56700 #include <asm/mmu_context.h>
56701
56702 +#ifdef CONFIG_GRKERNSEC
56703 +extern rwlock_t grsec_exec_file_lock;
56704 +#endif
56705 +
56706 static void exit_mm(struct task_struct * tsk);
56707
56708 static void __unhash_process(struct task_struct *p, bool group_dead)
56709 @@ -169,6 +173,10 @@ void release_task(struct task_struct * p
56710 struct task_struct *leader;
56711 int zap_leader;
56712 repeat:
56713 +#ifdef CONFIG_NET
56714 + gr_del_task_from_ip_table(p);
56715 +#endif
56716 +
56717 tracehook_prepare_release_task(p);
56718 /* don't need to get the RCU readlock here - the process is dead and
56719 * can't be modifying its own credentials. But shut RCU-lockdep up */
56720 @@ -338,11 +346,22 @@ static void reparent_to_kthreadd(void)
56721 {
56722 write_lock_irq(&tasklist_lock);
56723
56724 +#ifdef CONFIG_GRKERNSEC
56725 + write_lock(&grsec_exec_file_lock);
56726 + if (current->exec_file) {
56727 + fput(current->exec_file);
56728 + current->exec_file = NULL;
56729 + }
56730 + write_unlock(&grsec_exec_file_lock);
56731 +#endif
56732 +
56733 ptrace_unlink(current);
56734 /* Reparent to init */
56735 current->real_parent = current->parent = kthreadd_task;
56736 list_move_tail(&current->sibling, &current->real_parent->children);
56737
56738 + gr_set_kernel_label(current);
56739 +
56740 /* Set the exit signal to SIGCHLD so we signal init on exit */
56741 current->exit_signal = SIGCHLD;
56742
56743 @@ -394,7 +413,7 @@ int allow_signal(int sig)
56744 * know it'll be handled, so that they don't get converted to
56745 * SIGKILL or just silently dropped.
56746 */
56747 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
56748 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
56749 recalc_sigpending();
56750 spin_unlock_irq(&current->sighand->siglock);
56751 return 0;
56752 @@ -430,6 +449,17 @@ void daemonize(const char *name, ...)
56753 vsnprintf(current->comm, sizeof(current->comm), name, args);
56754 va_end(args);
56755
56756 +#ifdef CONFIG_GRKERNSEC
56757 + write_lock(&grsec_exec_file_lock);
56758 + if (current->exec_file) {
56759 + fput(current->exec_file);
56760 + current->exec_file = NULL;
56761 + }
56762 + write_unlock(&grsec_exec_file_lock);
56763 +#endif
56764 +
56765 + gr_set_kernel_label(current);
56766 +
56767 /*
56768 * If we were started as result of loading a module, close all of the
56769 * user space pages. We don't need them, and if we didn't close them
56770 @@ -905,15 +935,8 @@ NORET_TYPE void do_exit(long code)
56771 struct task_struct *tsk = current;
56772 int group_dead;
56773
56774 - profile_task_exit(tsk);
56775 -
56776 - WARN_ON(atomic_read(&tsk->fs_excl));
56777 - WARN_ON(blk_needs_flush_plug(tsk));
56778 -
56779 if (unlikely(in_interrupt()))
56780 panic("Aiee, killing interrupt handler!");
56781 - if (unlikely(!tsk->pid))
56782 - panic("Attempted to kill the idle task!");
56783
56784 /*
56785 * If do_exit is called because this processes oopsed, it's possible
56786 @@ -924,6 +947,14 @@ NORET_TYPE void do_exit(long code)
56787 */
56788 set_fs(USER_DS);
56789
56790 + profile_task_exit(tsk);
56791 +
56792 + WARN_ON(atomic_read(&tsk->fs_excl));
56793 + WARN_ON(blk_needs_flush_plug(tsk));
56794 +
56795 + if (unlikely(!tsk->pid))
56796 + panic("Attempted to kill the idle task!");
56797 +
56798 tracehook_report_exit(&code);
56799
56800 validate_creds_for_do_exit(tsk);
56801 @@ -984,6 +1015,9 @@ NORET_TYPE void do_exit(long code)
56802 tsk->exit_code = code;
56803 taskstats_exit(tsk, group_dead);
56804
56805 + gr_acl_handle_psacct(tsk, code);
56806 + gr_acl_handle_exit();
56807 +
56808 exit_mm(tsk);
56809
56810 if (group_dead)
56811 diff -urNp linux-2.6.39.4/kernel/fork.c linux-2.6.39.4/kernel/fork.c
56812 --- linux-2.6.39.4/kernel/fork.c 2011-05-19 00:06:34.000000000 -0400
56813 +++ linux-2.6.39.4/kernel/fork.c 2011-08-05 19:44:37.000000000 -0400
56814 @@ -287,7 +287,7 @@ static struct task_struct *dup_task_stru
56815 *stackend = STACK_END_MAGIC; /* for overflow detection */
56816
56817 #ifdef CONFIG_CC_STACKPROTECTOR
56818 - tsk->stack_canary = get_random_int();
56819 + tsk->stack_canary = pax_get_random_long();
56820 #endif
56821
56822 /* One for us, one for whoever does the "release_task()" (usually parent) */
56823 @@ -309,13 +309,78 @@ out:
56824 }
56825
56826 #ifdef CONFIG_MMU
56827 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
56828 +{
56829 + struct vm_area_struct *tmp;
56830 + unsigned long charge;
56831 + struct mempolicy *pol;
56832 + struct file *file;
56833 +
56834 + charge = 0;
56835 + if (mpnt->vm_flags & VM_ACCOUNT) {
56836 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
56837 + if (security_vm_enough_memory(len))
56838 + goto fail_nomem;
56839 + charge = len;
56840 + }
56841 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
56842 + if (!tmp)
56843 + goto fail_nomem;
56844 + *tmp = *mpnt;
56845 + tmp->vm_mm = mm;
56846 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
56847 + pol = mpol_dup(vma_policy(mpnt));
56848 + if (IS_ERR(pol))
56849 + goto fail_nomem_policy;
56850 + vma_set_policy(tmp, pol);
56851 + if (anon_vma_fork(tmp, mpnt))
56852 + goto fail_nomem_anon_vma_fork;
56853 + tmp->vm_flags &= ~VM_LOCKED;
56854 + tmp->vm_next = tmp->vm_prev = NULL;
56855 + tmp->vm_mirror = NULL;
56856 + file = tmp->vm_file;
56857 + if (file) {
56858 + struct inode *inode = file->f_path.dentry->d_inode;
56859 + struct address_space *mapping = file->f_mapping;
56860 +
56861 + get_file(file);
56862 + if (tmp->vm_flags & VM_DENYWRITE)
56863 + atomic_dec(&inode->i_writecount);
56864 + spin_lock(&mapping->i_mmap_lock);
56865 + if (tmp->vm_flags & VM_SHARED)
56866 + mapping->i_mmap_writable++;
56867 + tmp->vm_truncate_count = mpnt->vm_truncate_count;
56868 + flush_dcache_mmap_lock(mapping);
56869 + /* insert tmp into the share list, just after mpnt */
56870 + vma_prio_tree_add(tmp, mpnt);
56871 + flush_dcache_mmap_unlock(mapping);
56872 + spin_unlock(&mapping->i_mmap_lock);
56873 + }
56874 +
56875 + /*
56876 + * Clear hugetlb-related page reserves for children. This only
56877 + * affects MAP_PRIVATE mappings. Faults generated by the child
56878 + * are not guaranteed to succeed, even if read-only
56879 + */
56880 + if (is_vm_hugetlb_page(tmp))
56881 + reset_vma_resv_huge_pages(tmp);
56882 +
56883 + return tmp;
56884 +
56885 +fail_nomem_anon_vma_fork:
56886 + mpol_put(pol);
56887 +fail_nomem_policy:
56888 + kmem_cache_free(vm_area_cachep, tmp);
56889 +fail_nomem:
56890 + vm_unacct_memory(charge);
56891 + return NULL;
56892 +}
56893 +
56894 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
56895 {
56896 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
56897 struct rb_node **rb_link, *rb_parent;
56898 int retval;
56899 - unsigned long charge;
56900 - struct mempolicy *pol;
56901
56902 down_write(&oldmm->mmap_sem);
56903 flush_cache_dup_mm(oldmm);
56904 @@ -327,8 +392,8 @@ static int dup_mmap(struct mm_struct *mm
56905 mm->locked_vm = 0;
56906 mm->mmap = NULL;
56907 mm->mmap_cache = NULL;
56908 - mm->free_area_cache = oldmm->mmap_base;
56909 - mm->cached_hole_size = ~0UL;
56910 + mm->free_area_cache = oldmm->free_area_cache;
56911 + mm->cached_hole_size = oldmm->cached_hole_size;
56912 mm->map_count = 0;
56913 cpumask_clear(mm_cpumask(mm));
56914 mm->mm_rb = RB_ROOT;
56915 @@ -344,8 +409,6 @@ static int dup_mmap(struct mm_struct *mm
56916
56917 prev = NULL;
56918 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
56919 - struct file *file;
56920 -
56921 if (mpnt->vm_flags & VM_DONTCOPY) {
56922 long pages = vma_pages(mpnt);
56923 mm->total_vm -= pages;
56924 @@ -353,56 +416,13 @@ static int dup_mmap(struct mm_struct *mm
56925 -pages);
56926 continue;
56927 }
56928 - charge = 0;
56929 - if (mpnt->vm_flags & VM_ACCOUNT) {
56930 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
56931 - if (security_vm_enough_memory(len))
56932 - goto fail_nomem;
56933 - charge = len;
56934 - }
56935 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
56936 - if (!tmp)
56937 - goto fail_nomem;
56938 - *tmp = *mpnt;
56939 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
56940 - pol = mpol_dup(vma_policy(mpnt));
56941 - retval = PTR_ERR(pol);
56942 - if (IS_ERR(pol))
56943 - goto fail_nomem_policy;
56944 - vma_set_policy(tmp, pol);
56945 - tmp->vm_mm = mm;
56946 - if (anon_vma_fork(tmp, mpnt))
56947 - goto fail_nomem_anon_vma_fork;
56948 - tmp->vm_flags &= ~VM_LOCKED;
56949 - tmp->vm_next = tmp->vm_prev = NULL;
56950 - file = tmp->vm_file;
56951 - if (file) {
56952 - struct inode *inode = file->f_path.dentry->d_inode;
56953 - struct address_space *mapping = file->f_mapping;
56954 -
56955 - get_file(file);
56956 - if (tmp->vm_flags & VM_DENYWRITE)
56957 - atomic_dec(&inode->i_writecount);
56958 - spin_lock(&mapping->i_mmap_lock);
56959 - if (tmp->vm_flags & VM_SHARED)
56960 - mapping->i_mmap_writable++;
56961 - tmp->vm_truncate_count = mpnt->vm_truncate_count;
56962 - flush_dcache_mmap_lock(mapping);
56963 - /* insert tmp into the share list, just after mpnt */
56964 - vma_prio_tree_add(tmp, mpnt);
56965 - flush_dcache_mmap_unlock(mapping);
56966 - spin_unlock(&mapping->i_mmap_lock);
56967 + tmp = dup_vma(mm, mpnt);
56968 + if (!tmp) {
56969 + retval = -ENOMEM;
56970 + goto out;
56971 }
56972
56973 /*
56974 - * Clear hugetlb-related page reserves for children. This only
56975 - * affects MAP_PRIVATE mappings. Faults generated by the child
56976 - * are not guaranteed to succeed, even if read-only
56977 - */
56978 - if (is_vm_hugetlb_page(tmp))
56979 - reset_vma_resv_huge_pages(tmp);
56980 -
56981 - /*
56982 * Link in the new vma and copy the page table entries.
56983 */
56984 *pprev = tmp;
56985 @@ -423,6 +443,31 @@ static int dup_mmap(struct mm_struct *mm
56986 if (retval)
56987 goto out;
56988 }
56989 +
56990 +#ifdef CONFIG_PAX_SEGMEXEC
56991 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
56992 + struct vm_area_struct *mpnt_m;
56993 +
56994 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
56995 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
56996 +
56997 + if (!mpnt->vm_mirror)
56998 + continue;
56999 +
57000 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
57001 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
57002 + mpnt->vm_mirror = mpnt_m;
57003 + } else {
57004 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
57005 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
57006 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
57007 + mpnt->vm_mirror->vm_mirror = mpnt;
57008 + }
57009 + }
57010 + BUG_ON(mpnt_m);
57011 + }
57012 +#endif
57013 +
57014 /* a new mm has just been created */
57015 arch_dup_mmap(oldmm, mm);
57016 retval = 0;
57017 @@ -431,14 +476,6 @@ out:
57018 flush_tlb_mm(oldmm);
57019 up_write(&oldmm->mmap_sem);
57020 return retval;
57021 -fail_nomem_anon_vma_fork:
57022 - mpol_put(pol);
57023 -fail_nomem_policy:
57024 - kmem_cache_free(vm_area_cachep, tmp);
57025 -fail_nomem:
57026 - retval = -ENOMEM;
57027 - vm_unacct_memory(charge);
57028 - goto out;
57029 }
57030
57031 static inline int mm_alloc_pgd(struct mm_struct * mm)
57032 @@ -785,13 +822,14 @@ static int copy_fs(unsigned long clone_f
57033 spin_unlock(&fs->lock);
57034 return -EAGAIN;
57035 }
57036 - fs->users++;
57037 + atomic_inc(&fs->users);
57038 spin_unlock(&fs->lock);
57039 return 0;
57040 }
57041 tsk->fs = copy_fs_struct(fs);
57042 if (!tsk->fs)
57043 return -ENOMEM;
57044 + gr_set_chroot_entries(tsk, &tsk->fs->root);
57045 return 0;
57046 }
57047
57048 @@ -1049,10 +1087,13 @@ static struct task_struct *copy_process(
57049 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
57050 #endif
57051 retval = -EAGAIN;
57052 +
57053 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
57054 +
57055 if (atomic_read(&p->real_cred->user->processes) >=
57056 task_rlimit(p, RLIMIT_NPROC)) {
57057 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
57058 - p->real_cred->user != INIT_USER)
57059 + if (p->real_cred->user != INIT_USER &&
57060 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
57061 goto bad_fork_free;
57062 }
57063
57064 @@ -1200,6 +1241,8 @@ static struct task_struct *copy_process(
57065 goto bad_fork_free_pid;
57066 }
57067
57068 + gr_copy_label(p);
57069 +
57070 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
57071 /*
57072 * Clear TID on mm_release()?
57073 @@ -1360,6 +1403,8 @@ bad_fork_cleanup_count:
57074 bad_fork_free:
57075 free_task(p);
57076 fork_out:
57077 + gr_log_forkfail(retval);
57078 +
57079 return ERR_PTR(retval);
57080 }
57081
57082 @@ -1448,6 +1493,8 @@ long do_fork(unsigned long clone_flags,
57083 if (clone_flags & CLONE_PARENT_SETTID)
57084 put_user(nr, parent_tidptr);
57085
57086 + gr_handle_brute_check();
57087 +
57088 if (clone_flags & CLONE_VFORK) {
57089 p->vfork_done = &vfork;
57090 init_completion(&vfork);
57091 @@ -1549,7 +1596,7 @@ static int unshare_fs(unsigned long unsh
57092 return 0;
57093
57094 /* don't need lock here; in the worst case we'll do useless copy */
57095 - if (fs->users == 1)
57096 + if (atomic_read(&fs->users) == 1)
57097 return 0;
57098
57099 *new_fsp = copy_fs_struct(fs);
57100 @@ -1636,7 +1683,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
57101 fs = current->fs;
57102 spin_lock(&fs->lock);
57103 current->fs = new_fs;
57104 - if (--fs->users)
57105 + gr_set_chroot_entries(current, &current->fs->root);
57106 + if (atomic_dec_return(&fs->users))
57107 new_fs = NULL;
57108 else
57109 new_fs = fs;
57110 diff -urNp linux-2.6.39.4/kernel/futex.c linux-2.6.39.4/kernel/futex.c
57111 --- linux-2.6.39.4/kernel/futex.c 2011-05-19 00:06:34.000000000 -0400
57112 +++ linux-2.6.39.4/kernel/futex.c 2011-08-05 19:44:37.000000000 -0400
57113 @@ -54,6 +54,7 @@
57114 #include <linux/mount.h>
57115 #include <linux/pagemap.h>
57116 #include <linux/syscalls.h>
57117 +#include <linux/ptrace.h>
57118 #include <linux/signal.h>
57119 #include <linux/module.h>
57120 #include <linux/magic.h>
57121 @@ -236,6 +237,11 @@ get_futex_key(u32 __user *uaddr, int fsh
57122 struct page *page, *page_head;
57123 int err;
57124
57125 +#ifdef CONFIG_PAX_SEGMEXEC
57126 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
57127 + return -EFAULT;
57128 +#endif
57129 +
57130 /*
57131 * The futex address must be "naturally" aligned.
57132 */
57133 @@ -1833,6 +1839,8 @@ static int futex_wait(u32 __user *uaddr,
57134 struct futex_q q = futex_q_init;
57135 int ret;
57136
57137 + pax_track_stack();
57138 +
57139 if (!bitset)
57140 return -EINVAL;
57141 q.bitset = bitset;
57142 @@ -2229,6 +2237,8 @@ static int futex_wait_requeue_pi(u32 __u
57143 struct futex_q q = futex_q_init;
57144 int res, ret;
57145
57146 + pax_track_stack();
57147 +
57148 if (!bitset)
57149 return -EINVAL;
57150
57151 @@ -2401,7 +2411,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
57152 {
57153 struct robust_list_head __user *head;
57154 unsigned long ret;
57155 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
57156 const struct cred *cred = current_cred(), *pcred;
57157 +#endif
57158
57159 if (!futex_cmpxchg_enabled)
57160 return -ENOSYS;
57161 @@ -2417,6 +2429,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
57162 if (!p)
57163 goto err_unlock;
57164 ret = -EPERM;
57165 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57166 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
57167 + goto err_unlock;
57168 +#else
57169 pcred = __task_cred(p);
57170 /* If victim is in different user_ns, then uids are not
57171 comparable, so we must have CAP_SYS_PTRACE */
57172 @@ -2431,6 +2447,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
57173 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
57174 goto err_unlock;
57175 ok:
57176 +#endif
57177 head = p->robust_list;
57178 rcu_read_unlock();
57179 }
57180 @@ -2682,6 +2699,7 @@ static int __init futex_init(void)
57181 {
57182 u32 curval;
57183 int i;
57184 + mm_segment_t oldfs;
57185
57186 /*
57187 * This will fail and we want it. Some arch implementations do
57188 @@ -2693,8 +2711,11 @@ static int __init futex_init(void)
57189 * implementation, the non-functional ones will return
57190 * -ENOSYS.
57191 */
57192 + oldfs = get_fs();
57193 + set_fs(USER_DS);
57194 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
57195 futex_cmpxchg_enabled = 1;
57196 + set_fs(oldfs);
57197
57198 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
57199 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
57200 diff -urNp linux-2.6.39.4/kernel/futex_compat.c linux-2.6.39.4/kernel/futex_compat.c
57201 --- linux-2.6.39.4/kernel/futex_compat.c 2011-05-19 00:06:34.000000000 -0400
57202 +++ linux-2.6.39.4/kernel/futex_compat.c 2011-08-05 19:44:37.000000000 -0400
57203 @@ -10,6 +10,7 @@
57204 #include <linux/compat.h>
57205 #include <linux/nsproxy.h>
57206 #include <linux/futex.h>
57207 +#include <linux/ptrace.h>
57208
57209 #include <asm/uaccess.h>
57210
57211 @@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
57212 {
57213 struct compat_robust_list_head __user *head;
57214 unsigned long ret;
57215 - const struct cred *cred = current_cred(), *pcred;
57216 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
57217 + const struct cred *cred = current_cred();
57218 + const struct cred *pcred;
57219 +#endif
57220
57221 if (!futex_cmpxchg_enabled)
57222 return -ENOSYS;
57223 @@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
57224 if (!p)
57225 goto err_unlock;
57226 ret = -EPERM;
57227 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57228 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
57229 + goto err_unlock;
57230 +#else
57231 pcred = __task_cred(p);
57232 /* If victim is in different user_ns, then uids are not
57233 comparable, so we must have CAP_SYS_PTRACE */
57234 @@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
57235 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
57236 goto err_unlock;
57237 ok:
57238 +#endif
57239 head = p->compat_robust_list;
57240 rcu_read_unlock();
57241 }
57242 diff -urNp linux-2.6.39.4/kernel/gcov/base.c linux-2.6.39.4/kernel/gcov/base.c
57243 --- linux-2.6.39.4/kernel/gcov/base.c 2011-05-19 00:06:34.000000000 -0400
57244 +++ linux-2.6.39.4/kernel/gcov/base.c 2011-08-05 19:44:37.000000000 -0400
57245 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
57246 }
57247
57248 #ifdef CONFIG_MODULES
57249 -static inline int within(void *addr, void *start, unsigned long size)
57250 -{
57251 - return ((addr >= start) && (addr < start + size));
57252 -}
57253 -
57254 /* Update list and generate events when modules are unloaded. */
57255 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
57256 void *data)
57257 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
57258 prev = NULL;
57259 /* Remove entries located in module from linked list. */
57260 for (info = gcov_info_head; info; info = info->next) {
57261 - if (within(info, mod->module_core, mod->core_size)) {
57262 + if (within_module_core_rw((unsigned long)info, mod)) {
57263 if (prev)
57264 prev->next = info->next;
57265 else
57266 diff -urNp linux-2.6.39.4/kernel/hrtimer.c linux-2.6.39.4/kernel/hrtimer.c
57267 --- linux-2.6.39.4/kernel/hrtimer.c 2011-05-19 00:06:34.000000000 -0400
57268 +++ linux-2.6.39.4/kernel/hrtimer.c 2011-08-05 19:44:37.000000000 -0400
57269 @@ -1383,7 +1383,7 @@ void hrtimer_peek_ahead_timers(void)
57270 local_irq_restore(flags);
57271 }
57272
57273 -static void run_hrtimer_softirq(struct softirq_action *h)
57274 +static void run_hrtimer_softirq(void)
57275 {
57276 hrtimer_peek_ahead_timers();
57277 }
57278 diff -urNp linux-2.6.39.4/kernel/irq/manage.c linux-2.6.39.4/kernel/irq/manage.c
57279 --- linux-2.6.39.4/kernel/irq/manage.c 2011-05-19 00:06:34.000000000 -0400
57280 +++ linux-2.6.39.4/kernel/irq/manage.c 2011-08-05 19:44:37.000000000 -0400
57281 @@ -491,6 +491,9 @@ int irq_set_irq_wake(unsigned int irq, u
57282 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
57283 int ret = 0;
57284
57285 + if (!desc)
57286 + return -EINVAL;
57287 +
57288 /* wakeup-capable irqs can be shared between drivers that
57289 * don't need to have the same sleep mode behaviors.
57290 */
57291 diff -urNp linux-2.6.39.4/kernel/jump_label.c linux-2.6.39.4/kernel/jump_label.c
57292 --- linux-2.6.39.4/kernel/jump_label.c 2011-05-19 00:06:34.000000000 -0400
57293 +++ linux-2.6.39.4/kernel/jump_label.c 2011-08-05 19:44:37.000000000 -0400
57294 @@ -49,6 +49,17 @@ void jump_label_unlock(void)
57295 mutex_unlock(&jump_label_mutex);
57296 }
57297
57298 +static void jump_label_swap(void *a, void *b, int size)
57299 +{
57300 + struct jump_entry t;
57301 +
57302 + t = *(struct jump_entry *)a;
57303 + pax_open_kernel();
57304 + *(struct jump_entry *)a = *(struct jump_entry *)b;
57305 + *(struct jump_entry *)b = t;
57306 + pax_close_kernel();
57307 +}
57308 +
57309 static int jump_label_cmp(const void *a, const void *b)
57310 {
57311 const struct jump_entry *jea = a;
57312 @@ -70,7 +81,7 @@ sort_jump_label_entries(struct jump_entr
57313
57314 size = (((unsigned long)stop - (unsigned long)start)
57315 / sizeof(struct jump_entry));
57316 - sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
57317 + sort(start, size, sizeof(struct jump_entry), jump_label_cmp, jump_label_swap);
57318 }
57319
57320 static struct jump_label_entry *get_jump_label_entry(jump_label_t key)
57321 @@ -407,8 +418,11 @@ static void remove_jump_label_module_ini
57322 count = e_module->nr_entries;
57323 iter = e_module->table;
57324 while (count--) {
57325 - if (within_module_init(iter->code, mod))
57326 + if (within_module_init(iter->code, mod)) {
57327 + pax_open_kernel();
57328 iter->key = 0;
57329 + pax_close_kernel();
57330 + }
57331 iter++;
57332 }
57333 }
57334 diff -urNp linux-2.6.39.4/kernel/kallsyms.c linux-2.6.39.4/kernel/kallsyms.c
57335 --- linux-2.6.39.4/kernel/kallsyms.c 2011-05-19 00:06:34.000000000 -0400
57336 +++ linux-2.6.39.4/kernel/kallsyms.c 2011-08-05 19:44:37.000000000 -0400
57337 @@ -11,6 +11,9 @@
57338 * Changed the compression method from stem compression to "table lookup"
57339 * compression (see scripts/kallsyms.c for a more complete description)
57340 */
57341 +#ifdef CONFIG_GRKERNSEC_HIDESYM
57342 +#define __INCLUDED_BY_HIDESYM 1
57343 +#endif
57344 #include <linux/kallsyms.h>
57345 #include <linux/module.h>
57346 #include <linux/init.h>
57347 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
57348
57349 static inline int is_kernel_inittext(unsigned long addr)
57350 {
57351 + if (system_state != SYSTEM_BOOTING)
57352 + return 0;
57353 +
57354 if (addr >= (unsigned long)_sinittext
57355 && addr <= (unsigned long)_einittext)
57356 return 1;
57357 return 0;
57358 }
57359
57360 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57361 +#ifdef CONFIG_MODULES
57362 +static inline int is_module_text(unsigned long addr)
57363 +{
57364 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
57365 + return 1;
57366 +
57367 + addr = ktla_ktva(addr);
57368 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
57369 +}
57370 +#else
57371 +static inline int is_module_text(unsigned long addr)
57372 +{
57373 + return 0;
57374 +}
57375 +#endif
57376 +#endif
57377 +
57378 static inline int is_kernel_text(unsigned long addr)
57379 {
57380 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
57381 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
57382
57383 static inline int is_kernel(unsigned long addr)
57384 {
57385 +
57386 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57387 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
57388 + return 1;
57389 +
57390 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
57391 +#else
57392 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
57393 +#endif
57394 +
57395 return 1;
57396 return in_gate_area_no_mm(addr);
57397 }
57398
57399 static int is_ksym_addr(unsigned long addr)
57400 {
57401 +
57402 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57403 + if (is_module_text(addr))
57404 + return 0;
57405 +#endif
57406 +
57407 if (all_var)
57408 return is_kernel(addr);
57409
57410 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
57411
57412 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
57413 {
57414 - iter->name[0] = '\0';
57415 iter->nameoff = get_symbol_offset(new_pos);
57416 iter->pos = new_pos;
57417 }
57418 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
57419 {
57420 struct kallsym_iter *iter = m->private;
57421
57422 +#ifdef CONFIG_GRKERNSEC_HIDESYM
57423 + if (current_uid())
57424 + return 0;
57425 +#endif
57426 +
57427 /* Some debugging symbols have no name. Ignore them. */
57428 if (!iter->name[0])
57429 return 0;
57430 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
57431 struct kallsym_iter *iter;
57432 int ret;
57433
57434 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
57435 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
57436 if (!iter)
57437 return -ENOMEM;
57438 reset_iter(iter, 0);
57439 diff -urNp linux-2.6.39.4/kernel/kmod.c linux-2.6.39.4/kernel/kmod.c
57440 --- linux-2.6.39.4/kernel/kmod.c 2011-05-19 00:06:34.000000000 -0400
57441 +++ linux-2.6.39.4/kernel/kmod.c 2011-08-05 19:44:37.000000000 -0400
57442 @@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
57443 * If module auto-loading support is disabled then this function
57444 * becomes a no-operation.
57445 */
57446 -int __request_module(bool wait, const char *fmt, ...)
57447 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
57448 {
57449 - va_list args;
57450 char module_name[MODULE_NAME_LEN];
57451 unsigned int max_modprobes;
57452 int ret;
57453 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
57454 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
57455 static char *envp[] = { "HOME=/",
57456 "TERM=linux",
57457 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
57458 @@ -80,9 +79,7 @@ int __request_module(bool wait, const ch
57459 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
57460 static int kmod_loop_msg;
57461
57462 - va_start(args, fmt);
57463 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
57464 - va_end(args);
57465 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
57466 if (ret >= MODULE_NAME_LEN)
57467 return -ENAMETOOLONG;
57468
57469 @@ -90,6 +87,20 @@ int __request_module(bool wait, const ch
57470 if (ret)
57471 return ret;
57472
57473 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57474 + if (!current_uid()) {
57475 + /* hack to workaround consolekit/udisks stupidity */
57476 + read_lock(&tasklist_lock);
57477 + if (!strcmp(current->comm, "mount") &&
57478 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
57479 + read_unlock(&tasklist_lock);
57480 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
57481 + return -EPERM;
57482 + }
57483 + read_unlock(&tasklist_lock);
57484 + }
57485 +#endif
57486 +
57487 /* If modprobe needs a service that is in a module, we get a recursive
57488 * loop. Limit the number of running kmod threads to max_threads/2 or
57489 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
57490 @@ -123,6 +134,47 @@ int __request_module(bool wait, const ch
57491 atomic_dec(&kmod_concurrent);
57492 return ret;
57493 }
57494 +
57495 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
57496 +{
57497 + va_list args;
57498 + int ret;
57499 +
57500 + va_start(args, fmt);
57501 + ret = ____request_module(wait, module_param, fmt, args);
57502 + va_end(args);
57503 +
57504 + return ret;
57505 +}
57506 +
57507 +int __request_module(bool wait, const char *fmt, ...)
57508 +{
57509 + va_list args;
57510 + int ret;
57511 +
57512 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57513 + if (current_uid()) {
57514 + char module_param[MODULE_NAME_LEN];
57515 +
57516 + memset(module_param, 0, sizeof(module_param));
57517 +
57518 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
57519 +
57520 + va_start(args, fmt);
57521 + ret = ____request_module(wait, module_param, fmt, args);
57522 + va_end(args);
57523 +
57524 + return ret;
57525 + }
57526 +#endif
57527 +
57528 + va_start(args, fmt);
57529 + ret = ____request_module(wait, NULL, fmt, args);
57530 + va_end(args);
57531 +
57532 + return ret;
57533 +}
57534 +
57535 EXPORT_SYMBOL(__request_module);
57536 #endif /* CONFIG_MODULES */
57537
57538 diff -urNp linux-2.6.39.4/kernel/kprobes.c linux-2.6.39.4/kernel/kprobes.c
57539 --- linux-2.6.39.4/kernel/kprobes.c 2011-05-19 00:06:34.000000000 -0400
57540 +++ linux-2.6.39.4/kernel/kprobes.c 2011-08-05 19:44:37.000000000 -0400
57541 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
57542 * kernel image and loaded module images reside. This is required
57543 * so x86_64 can correctly handle the %rip-relative fixups.
57544 */
57545 - kip->insns = module_alloc(PAGE_SIZE);
57546 + kip->insns = module_alloc_exec(PAGE_SIZE);
57547 if (!kip->insns) {
57548 kfree(kip);
57549 return NULL;
57550 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
57551 */
57552 if (!list_is_singular(&kip->list)) {
57553 list_del(&kip->list);
57554 - module_free(NULL, kip->insns);
57555 + module_free_exec(NULL, kip->insns);
57556 kfree(kip);
57557 }
57558 return 1;
57559 @@ -1936,7 +1936,7 @@ static int __init init_kprobes(void)
57560 {
57561 int i, err = 0;
57562 unsigned long offset = 0, size = 0;
57563 - char *modname, namebuf[128];
57564 + char *modname, namebuf[KSYM_NAME_LEN];
57565 const char *symbol_name;
57566 void *addr;
57567 struct kprobe_blackpoint *kb;
57568 @@ -2062,7 +2062,7 @@ static int __kprobes show_kprobe_addr(st
57569 const char *sym = NULL;
57570 unsigned int i = *(loff_t *) v;
57571 unsigned long offset = 0;
57572 - char *modname, namebuf[128];
57573 + char *modname, namebuf[KSYM_NAME_LEN];
57574
57575 head = &kprobe_table[i];
57576 preempt_disable();
57577 diff -urNp linux-2.6.39.4/kernel/lockdep.c linux-2.6.39.4/kernel/lockdep.c
57578 --- linux-2.6.39.4/kernel/lockdep.c 2011-06-25 12:55:23.000000000 -0400
57579 +++ linux-2.6.39.4/kernel/lockdep.c 2011-08-05 19:44:37.000000000 -0400
57580 @@ -571,6 +571,10 @@ static int static_obj(void *obj)
57581 end = (unsigned long) &_end,
57582 addr = (unsigned long) obj;
57583
57584 +#ifdef CONFIG_PAX_KERNEXEC
57585 + start = ktla_ktva(start);
57586 +#endif
57587 +
57588 /*
57589 * static variable?
57590 */
57591 @@ -706,6 +710,7 @@ register_lock_class(struct lockdep_map *
57592 if (!static_obj(lock->key)) {
57593 debug_locks_off();
57594 printk("INFO: trying to register non-static key.\n");
57595 + printk("lock:%pS key:%pS.\n", lock, lock->key);
57596 printk("the code is fine but needs lockdep annotation.\n");
57597 printk("turning off the locking correctness validator.\n");
57598 dump_stack();
57599 @@ -2752,7 +2757,7 @@ static int __lock_acquire(struct lockdep
57600 if (!class)
57601 return 0;
57602 }
57603 - atomic_inc((atomic_t *)&class->ops);
57604 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
57605 if (very_verbose(class)) {
57606 printk("\nacquire class [%p] %s", class->key, class->name);
57607 if (class->name_version > 1)
57608 diff -urNp linux-2.6.39.4/kernel/lockdep_proc.c linux-2.6.39.4/kernel/lockdep_proc.c
57609 --- linux-2.6.39.4/kernel/lockdep_proc.c 2011-05-19 00:06:34.000000000 -0400
57610 +++ linux-2.6.39.4/kernel/lockdep_proc.c 2011-08-05 19:44:37.000000000 -0400
57611 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
57612
57613 static void print_name(struct seq_file *m, struct lock_class *class)
57614 {
57615 - char str[128];
57616 + char str[KSYM_NAME_LEN];
57617 const char *name = class->name;
57618
57619 if (!name) {
57620 diff -urNp linux-2.6.39.4/kernel/module.c linux-2.6.39.4/kernel/module.c
57621 --- linux-2.6.39.4/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
57622 +++ linux-2.6.39.4/kernel/module.c 2011-08-05 19:44:37.000000000 -0400
57623 @@ -57,6 +57,7 @@
57624 #include <linux/kmemleak.h>
57625 #include <linux/jump_label.h>
57626 #include <linux/pfn.h>
57627 +#include <linux/grsecurity.h>
57628
57629 #define CREATE_TRACE_POINTS
57630 #include <trace/events/module.h>
57631 @@ -118,7 +119,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
57632
57633 /* Bounds of module allocation, for speeding __module_address.
57634 * Protected by module_mutex. */
57635 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
57636 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
57637 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
57638
57639 int register_module_notifier(struct notifier_block * nb)
57640 {
57641 @@ -282,7 +284,7 @@ bool each_symbol(bool (*fn)(const struct
57642 return true;
57643
57644 list_for_each_entry_rcu(mod, &modules, list) {
57645 - struct symsearch arr[] = {
57646 + struct symsearch modarr[] = {
57647 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
57648 NOT_GPL_ONLY, false },
57649 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
57650 @@ -304,7 +306,7 @@ bool each_symbol(bool (*fn)(const struct
57651 #endif
57652 };
57653
57654 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
57655 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
57656 return true;
57657 }
57658 return false;
57659 @@ -415,7 +417,7 @@ static inline void __percpu *mod_percpu(
57660 static int percpu_modalloc(struct module *mod,
57661 unsigned long size, unsigned long align)
57662 {
57663 - if (align > PAGE_SIZE) {
57664 + if (align-1 >= PAGE_SIZE) {
57665 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
57666 mod->name, align, PAGE_SIZE);
57667 align = PAGE_SIZE;
57668 @@ -1143,7 +1145,7 @@ resolve_symbol_wait(struct module *mod,
57669 */
57670 #ifdef CONFIG_SYSFS
57671
57672 -#ifdef CONFIG_KALLSYMS
57673 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57674 static inline bool sect_empty(const Elf_Shdr *sect)
57675 {
57676 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
57677 @@ -1612,17 +1614,17 @@ void unset_section_ro_nx(struct module *
57678 {
57679 unsigned long total_pages;
57680
57681 - if (mod->module_core == module_region) {
57682 + if (mod->module_core_rx == module_region) {
57683 /* Set core as NX+RW */
57684 - total_pages = MOD_NUMBER_OF_PAGES(mod->module_core, mod->core_size);
57685 - set_memory_nx((unsigned long)mod->module_core, total_pages);
57686 - set_memory_rw((unsigned long)mod->module_core, total_pages);
57687 + total_pages = MOD_NUMBER_OF_PAGES(mod->module_core_rx, mod->core_size_rx);
57688 + set_memory_nx((unsigned long)mod->module_core_rx, total_pages);
57689 + set_memory_rw((unsigned long)mod->module_core_rx, total_pages);
57690
57691 - } else if (mod->module_init == module_region) {
57692 + } else if (mod->module_init_rx == module_region) {
57693 /* Set init as NX+RW */
57694 - total_pages = MOD_NUMBER_OF_PAGES(mod->module_init, mod->init_size);
57695 - set_memory_nx((unsigned long)mod->module_init, total_pages);
57696 - set_memory_rw((unsigned long)mod->module_init, total_pages);
57697 + total_pages = MOD_NUMBER_OF_PAGES(mod->module_init_rx, mod->init_size_rx);
57698 + set_memory_nx((unsigned long)mod->module_init_rx, total_pages);
57699 + set_memory_rw((unsigned long)mod->module_init_rx, total_pages);
57700 }
57701 }
57702
57703 @@ -1633,14 +1635,14 @@ void set_all_modules_text_rw()
57704
57705 mutex_lock(&module_mutex);
57706 list_for_each_entry_rcu(mod, &modules, list) {
57707 - if ((mod->module_core) && (mod->core_text_size)) {
57708 - set_page_attributes(mod->module_core,
57709 - mod->module_core + mod->core_text_size,
57710 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
57711 + set_page_attributes(mod->module_core_rx,
57712 + mod->module_core_rx + mod->core_size_rx,
57713 set_memory_rw);
57714 }
57715 - if ((mod->module_init) && (mod->init_text_size)) {
57716 - set_page_attributes(mod->module_init,
57717 - mod->module_init + mod->init_text_size,
57718 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
57719 + set_page_attributes(mod->module_init_rx,
57720 + mod->module_init_rx + mod->init_size_rx,
57721 set_memory_rw);
57722 }
57723 }
57724 @@ -1654,14 +1656,14 @@ void set_all_modules_text_ro()
57725
57726 mutex_lock(&module_mutex);
57727 list_for_each_entry_rcu(mod, &modules, list) {
57728 - if ((mod->module_core) && (mod->core_text_size)) {
57729 - set_page_attributes(mod->module_core,
57730 - mod->module_core + mod->core_text_size,
57731 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
57732 + set_page_attributes(mod->module_core_rx,
57733 + mod->module_core_rx + mod->core_size_rx,
57734 set_memory_ro);
57735 }
57736 - if ((mod->module_init) && (mod->init_text_size)) {
57737 - set_page_attributes(mod->module_init,
57738 - mod->module_init + mod->init_text_size,
57739 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
57740 + set_page_attributes(mod->module_init_rx,
57741 + mod->module_init_rx + mod->init_size_rx,
57742 set_memory_ro);
57743 }
57744 }
57745 @@ -1696,17 +1698,20 @@ static void free_module(struct module *m
57746 destroy_params(mod->kp, mod->num_kp);
57747
57748 /* This may be NULL, but that's OK */
57749 - unset_section_ro_nx(mod, mod->module_init);
57750 - module_free(mod, mod->module_init);
57751 + unset_section_ro_nx(mod, mod->module_init_rx);
57752 + module_free(mod, mod->module_init_rw);
57753 + module_free_exec(mod, mod->module_init_rx);
57754 kfree(mod->args);
57755 percpu_modfree(mod);
57756
57757 /* Free lock-classes: */
57758 - lockdep_free_key_range(mod->module_core, mod->core_size);
57759 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
57760 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
57761
57762 /* Finally, free the core (containing the module structure) */
57763 - unset_section_ro_nx(mod, mod->module_core);
57764 - module_free(mod, mod->module_core);
57765 + unset_section_ro_nx(mod, mod->module_core_rx);
57766 + module_free_exec(mod, mod->module_core_rx);
57767 + module_free(mod, mod->module_core_rw);
57768
57769 #ifdef CONFIG_MPU
57770 update_protections(current->mm);
57771 @@ -1775,10 +1780,31 @@ static int simplify_symbols(struct modul
57772 unsigned int i;
57773 int ret = 0;
57774 const struct kernel_symbol *ksym;
57775 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57776 + int is_fs_load = 0;
57777 + int register_filesystem_found = 0;
57778 + char *p;
57779 +
57780 + p = strstr(mod->args, "grsec_modharden_fs");
57781 + if (p) {
57782 + char *endptr = p + strlen("grsec_modharden_fs");
57783 + /* copy \0 as well */
57784 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
57785 + is_fs_load = 1;
57786 + }
57787 +#endif
57788
57789 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
57790 const char *name = info->strtab + sym[i].st_name;
57791
57792 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57793 + /* it's a real shame this will never get ripped and copied
57794 + upstream! ;(
57795 + */
57796 + if (is_fs_load && !strcmp(name, "register_filesystem"))
57797 + register_filesystem_found = 1;
57798 +#endif
57799 +
57800 switch (sym[i].st_shndx) {
57801 case SHN_COMMON:
57802 /* We compiled with -fno-common. These are not
57803 @@ -1799,7 +1825,9 @@ static int simplify_symbols(struct modul
57804 ksym = resolve_symbol_wait(mod, info, name);
57805 /* Ok if resolved. */
57806 if (ksym && !IS_ERR(ksym)) {
57807 + pax_open_kernel();
57808 sym[i].st_value = ksym->value;
57809 + pax_close_kernel();
57810 break;
57811 }
57812
57813 @@ -1818,11 +1846,20 @@ static int simplify_symbols(struct modul
57814 secbase = (unsigned long)mod_percpu(mod);
57815 else
57816 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
57817 + pax_open_kernel();
57818 sym[i].st_value += secbase;
57819 + pax_close_kernel();
57820 break;
57821 }
57822 }
57823
57824 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57825 + if (is_fs_load && !register_filesystem_found) {
57826 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
57827 + ret = -EPERM;
57828 + }
57829 +#endif
57830 +
57831 return ret;
57832 }
57833
57834 @@ -1906,22 +1943,12 @@ static void layout_sections(struct modul
57835 || s->sh_entsize != ~0UL
57836 || strstarts(sname, ".init"))
57837 continue;
57838 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
57839 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
57840 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
57841 + else
57842 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
57843 DEBUGP("\t%s\n", name);
57844 }
57845 - switch (m) {
57846 - case 0: /* executable */
57847 - mod->core_size = debug_align(mod->core_size);
57848 - mod->core_text_size = mod->core_size;
57849 - break;
57850 - case 1: /* RO: text and ro-data */
57851 - mod->core_size = debug_align(mod->core_size);
57852 - mod->core_ro_size = mod->core_size;
57853 - break;
57854 - case 3: /* whole core */
57855 - mod->core_size = debug_align(mod->core_size);
57856 - break;
57857 - }
57858 }
57859
57860 DEBUGP("Init section allocation order:\n");
57861 @@ -1935,23 +1962,13 @@ static void layout_sections(struct modul
57862 || s->sh_entsize != ~0UL
57863 || !strstarts(sname, ".init"))
57864 continue;
57865 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
57866 - | INIT_OFFSET_MASK);
57867 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
57868 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
57869 + else
57870 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
57871 + s->sh_entsize |= INIT_OFFSET_MASK;
57872 DEBUGP("\t%s\n", sname);
57873 }
57874 - switch (m) {
57875 - case 0: /* executable */
57876 - mod->init_size = debug_align(mod->init_size);
57877 - mod->init_text_size = mod->init_size;
57878 - break;
57879 - case 1: /* RO: text and ro-data */
57880 - mod->init_size = debug_align(mod->init_size);
57881 - mod->init_ro_size = mod->init_size;
57882 - break;
57883 - case 3: /* whole init */
57884 - mod->init_size = debug_align(mod->init_size);
57885 - break;
57886 - }
57887 }
57888 }
57889
57890 @@ -2119,7 +2136,7 @@ static void layout_symtab(struct module
57891
57892 /* Put symbol section at end of init part of module. */
57893 symsect->sh_flags |= SHF_ALLOC;
57894 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
57895 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
57896 info->index.sym) | INIT_OFFSET_MASK;
57897 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
57898
57899 @@ -2136,19 +2153,19 @@ static void layout_symtab(struct module
57900 }
57901
57902 /* Append room for core symbols at end of core part. */
57903 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
57904 - mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
57905 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
57906 + mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
57907
57908 /* Put string table section at end of init part of module. */
57909 strsect->sh_flags |= SHF_ALLOC;
57910 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
57911 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
57912 info->index.str) | INIT_OFFSET_MASK;
57913 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
57914
57915 /* Append room for core symbols' strings at end of core part. */
57916 - info->stroffs = mod->core_size;
57917 + info->stroffs = mod->core_size_rx;
57918 __set_bit(0, info->strmap);
57919 - mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
57920 + mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
57921 }
57922
57923 static void add_kallsyms(struct module *mod, const struct load_info *info)
57924 @@ -2164,11 +2181,13 @@ static void add_kallsyms(struct module *
57925 /* Make sure we get permanent strtab: don't use info->strtab. */
57926 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
57927
57928 + pax_open_kernel();
57929 +
57930 /* Set types up while we still have access to sections. */
57931 for (i = 0; i < mod->num_symtab; i++)
57932 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
57933
57934 - mod->core_symtab = dst = mod->module_core + info->symoffs;
57935 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
57936 src = mod->symtab;
57937 *dst = *src;
57938 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
57939 @@ -2181,10 +2200,12 @@ static void add_kallsyms(struct module *
57940 }
57941 mod->core_num_syms = ndst;
57942
57943 - mod->core_strtab = s = mod->module_core + info->stroffs;
57944 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
57945 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
57946 if (test_bit(i, info->strmap))
57947 *++s = mod->strtab[i];
57948 +
57949 + pax_close_kernel();
57950 }
57951 #else
57952 static inline void layout_symtab(struct module *mod, struct load_info *info)
57953 @@ -2213,17 +2234,33 @@ static void dynamic_debug_remove(struct
57954 ddebug_remove_module(debug->modname);
57955 }
57956
57957 -static void *module_alloc_update_bounds(unsigned long size)
57958 +static void *module_alloc_update_bounds_rw(unsigned long size)
57959 {
57960 void *ret = module_alloc(size);
57961
57962 if (ret) {
57963 mutex_lock(&module_mutex);
57964 /* Update module bounds. */
57965 - if ((unsigned long)ret < module_addr_min)
57966 - module_addr_min = (unsigned long)ret;
57967 - if ((unsigned long)ret + size > module_addr_max)
57968 - module_addr_max = (unsigned long)ret + size;
57969 + if ((unsigned long)ret < module_addr_min_rw)
57970 + module_addr_min_rw = (unsigned long)ret;
57971 + if ((unsigned long)ret + size > module_addr_max_rw)
57972 + module_addr_max_rw = (unsigned long)ret + size;
57973 + mutex_unlock(&module_mutex);
57974 + }
57975 + return ret;
57976 +}
57977 +
57978 +static void *module_alloc_update_bounds_rx(unsigned long size)
57979 +{
57980 + void *ret = module_alloc_exec(size);
57981 +
57982 + if (ret) {
57983 + mutex_lock(&module_mutex);
57984 + /* Update module bounds. */
57985 + if ((unsigned long)ret < module_addr_min_rx)
57986 + module_addr_min_rx = (unsigned long)ret;
57987 + if ((unsigned long)ret + size > module_addr_max_rx)
57988 + module_addr_max_rx = (unsigned long)ret + size;
57989 mutex_unlock(&module_mutex);
57990 }
57991 return ret;
57992 @@ -2516,7 +2553,7 @@ static int move_module(struct module *mo
57993 void *ptr;
57994
57995 /* Do the allocs. */
57996 - ptr = module_alloc_update_bounds(mod->core_size);
57997 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
57998 /*
57999 * The pointer to this block is stored in the module structure
58000 * which is inside the block. Just mark it as not being a
58001 @@ -2526,23 +2563,50 @@ static int move_module(struct module *mo
58002 if (!ptr)
58003 return -ENOMEM;
58004
58005 - memset(ptr, 0, mod->core_size);
58006 - mod->module_core = ptr;
58007 + memset(ptr, 0, mod->core_size_rw);
58008 + mod->module_core_rw = ptr;
58009
58010 - ptr = module_alloc_update_bounds(mod->init_size);
58011 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
58012 /*
58013 * The pointer to this block is stored in the module structure
58014 * which is inside the block. This block doesn't need to be
58015 * scanned as it contains data and code that will be freed
58016 * after the module is initialized.
58017 */
58018 - kmemleak_ignore(ptr);
58019 - if (!ptr && mod->init_size) {
58020 - module_free(mod, mod->module_core);
58021 + kmemleak_not_leak(ptr);
58022 + if (!ptr && mod->init_size_rw) {
58023 + module_free(mod, mod->module_core_rw);
58024 return -ENOMEM;
58025 }
58026 - memset(ptr, 0, mod->init_size);
58027 - mod->module_init = ptr;
58028 + memset(ptr, 0, mod->init_size_rw);
58029 + mod->module_init_rw = ptr;
58030 +
58031 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
58032 + kmemleak_not_leak(ptr);
58033 + if (!ptr) {
58034 + module_free(mod, mod->module_init_rw);
58035 + module_free(mod, mod->module_core_rw);
58036 + return -ENOMEM;
58037 + }
58038 +
58039 + pax_open_kernel();
58040 + memset(ptr, 0, mod->core_size_rx);
58041 + pax_close_kernel();
58042 + mod->module_core_rx = ptr;
58043 +
58044 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
58045 + kmemleak_not_leak(ptr);
58046 + if (!ptr && mod->init_size_rx) {
58047 + module_free_exec(mod, mod->module_core_rx);
58048 + module_free(mod, mod->module_init_rw);
58049 + module_free(mod, mod->module_core_rw);
58050 + return -ENOMEM;
58051 + }
58052 +
58053 + pax_open_kernel();
58054 + memset(ptr, 0, mod->init_size_rx);
58055 + pax_close_kernel();
58056 + mod->module_init_rx = ptr;
58057
58058 /* Transfer each section which specifies SHF_ALLOC */
58059 DEBUGP("final section addresses:\n");
58060 @@ -2553,16 +2617,45 @@ static int move_module(struct module *mo
58061 if (!(shdr->sh_flags & SHF_ALLOC))
58062 continue;
58063
58064 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
58065 - dest = mod->module_init
58066 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
58067 - else
58068 - dest = mod->module_core + shdr->sh_entsize;
58069 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
58070 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
58071 + dest = mod->module_init_rw
58072 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
58073 + else
58074 + dest = mod->module_init_rx
58075 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
58076 + } else {
58077 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
58078 + dest = mod->module_core_rw + shdr->sh_entsize;
58079 + else
58080 + dest = mod->module_core_rx + shdr->sh_entsize;
58081 + }
58082 +
58083 + if (shdr->sh_type != SHT_NOBITS) {
58084 +
58085 +#ifdef CONFIG_PAX_KERNEXEC
58086 +#ifdef CONFIG_X86_64
58087 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
58088 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
58089 +#endif
58090 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
58091 + pax_open_kernel();
58092 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
58093 + pax_close_kernel();
58094 + } else
58095 +#endif
58096
58097 - if (shdr->sh_type != SHT_NOBITS)
58098 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
58099 + }
58100 /* Update sh_addr to point to copy in image. */
58101 - shdr->sh_addr = (unsigned long)dest;
58102 +
58103 +#ifdef CONFIG_PAX_KERNEXEC
58104 + if (shdr->sh_flags & SHF_EXECINSTR)
58105 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
58106 + else
58107 +#endif
58108 +
58109 + shdr->sh_addr = (unsigned long)dest;
58110 DEBUGP("\t0x%lx %s\n",
58111 shdr->sh_addr, info->secstrings + shdr->sh_name);
58112 }
58113 @@ -2613,12 +2706,12 @@ static void flush_module_icache(const st
58114 * Do it before processing of module parameters, so the module
58115 * can provide parameter accessor functions of its own.
58116 */
58117 - if (mod->module_init)
58118 - flush_icache_range((unsigned long)mod->module_init,
58119 - (unsigned long)mod->module_init
58120 - + mod->init_size);
58121 - flush_icache_range((unsigned long)mod->module_core,
58122 - (unsigned long)mod->module_core + mod->core_size);
58123 + if (mod->module_init_rx)
58124 + flush_icache_range((unsigned long)mod->module_init_rx,
58125 + (unsigned long)mod->module_init_rx
58126 + + mod->init_size_rx);
58127 + flush_icache_range((unsigned long)mod->module_core_rx,
58128 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
58129
58130 set_fs(old_fs);
58131 }
58132 @@ -2690,8 +2783,10 @@ static void module_deallocate(struct mod
58133 {
58134 kfree(info->strmap);
58135 percpu_modfree(mod);
58136 - module_free(mod, mod->module_init);
58137 - module_free(mod, mod->module_core);
58138 + module_free_exec(mod, mod->module_init_rx);
58139 + module_free_exec(mod, mod->module_core_rx);
58140 + module_free(mod, mod->module_init_rw);
58141 + module_free(mod, mod->module_core_rw);
58142 }
58143
58144 static int post_relocation(struct module *mod, const struct load_info *info)
58145 @@ -2748,9 +2843,38 @@ static struct module *load_module(void _
58146 if (err)
58147 goto free_unload;
58148
58149 + /* Now copy in args */
58150 + mod->args = strndup_user(uargs, ~0UL >> 1);
58151 + if (IS_ERR(mod->args)) {
58152 + err = PTR_ERR(mod->args);
58153 + goto free_unload;
58154 + }
58155 +
58156 /* Set up MODINFO_ATTR fields */
58157 setup_modinfo(mod, &info);
58158
58159 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
58160 + {
58161 + char *p, *p2;
58162 +
58163 + if (strstr(mod->args, "grsec_modharden_netdev")) {
58164 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
58165 + err = -EPERM;
58166 + goto free_modinfo;
58167 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
58168 + p += strlen("grsec_modharden_normal");
58169 + p2 = strstr(p, "_");
58170 + if (p2) {
58171 + *p2 = '\0';
58172 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
58173 + *p2 = '_';
58174 + }
58175 + err = -EPERM;
58176 + goto free_modinfo;
58177 + }
58178 + }
58179 +#endif
58180 +
58181 /* Fix up syms, so that st_value is a pointer to location. */
58182 err = simplify_symbols(mod, &info);
58183 if (err < 0)
58184 @@ -2766,13 +2890,6 @@ static struct module *load_module(void _
58185
58186 flush_module_icache(mod);
58187
58188 - /* Now copy in args */
58189 - mod->args = strndup_user(uargs, ~0UL >> 1);
58190 - if (IS_ERR(mod->args)) {
58191 - err = PTR_ERR(mod->args);
58192 - goto free_arch_cleanup;
58193 - }
58194 -
58195 /* Mark state as coming so strong_try_module_get() ignores us. */
58196 mod->state = MODULE_STATE_COMING;
58197
58198 @@ -2832,11 +2949,10 @@ static struct module *load_module(void _
58199 unlock:
58200 mutex_unlock(&module_mutex);
58201 synchronize_sched();
58202 - kfree(mod->args);
58203 - free_arch_cleanup:
58204 module_arch_cleanup(mod);
58205 free_modinfo:
58206 free_modinfo(mod);
58207 + kfree(mod->args);
58208 free_unload:
58209 module_unload_free(mod);
58210 free_module:
58211 @@ -2877,16 +2993,16 @@ SYSCALL_DEFINE3(init_module, void __user
58212 MODULE_STATE_COMING, mod);
58213
58214 /* Set RO and NX regions for core */
58215 - set_section_ro_nx(mod->module_core,
58216 - mod->core_text_size,
58217 - mod->core_ro_size,
58218 - mod->core_size);
58219 + set_section_ro_nx(mod->module_core_rx,
58220 + mod->core_size_rx,
58221 + mod->core_size_rx,
58222 + mod->core_size_rx);
58223
58224 /* Set RO and NX regions for init */
58225 - set_section_ro_nx(mod->module_init,
58226 - mod->init_text_size,
58227 - mod->init_ro_size,
58228 - mod->init_size);
58229 + set_section_ro_nx(mod->module_init_rx,
58230 + mod->init_size_rx,
58231 + mod->init_size_rx,
58232 + mod->init_size_rx);
58233
58234 do_mod_ctors(mod);
58235 /* Start the module */
58236 @@ -2931,11 +3047,13 @@ SYSCALL_DEFINE3(init_module, void __user
58237 mod->symtab = mod->core_symtab;
58238 mod->strtab = mod->core_strtab;
58239 #endif
58240 - unset_section_ro_nx(mod, mod->module_init);
58241 - module_free(mod, mod->module_init);
58242 - mod->module_init = NULL;
58243 - mod->init_size = 0;
58244 - mod->init_text_size = 0;
58245 + unset_section_ro_nx(mod, mod->module_init_rx);
58246 + module_free(mod, mod->module_init_rw);
58247 + module_free_exec(mod, mod->module_init_rx);
58248 + mod->module_init_rw = NULL;
58249 + mod->module_init_rx = NULL;
58250 + mod->init_size_rw = 0;
58251 + mod->init_size_rx = 0;
58252 mutex_unlock(&module_mutex);
58253
58254 return 0;
58255 @@ -2966,10 +3084,16 @@ static const char *get_ksymbol(struct mo
58256 unsigned long nextval;
58257
58258 /* At worse, next value is at end of module */
58259 - if (within_module_init(addr, mod))
58260 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
58261 + if (within_module_init_rx(addr, mod))
58262 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
58263 + else if (within_module_init_rw(addr, mod))
58264 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
58265 + else if (within_module_core_rx(addr, mod))
58266 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
58267 + else if (within_module_core_rw(addr, mod))
58268 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
58269 else
58270 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
58271 + return NULL;
58272
58273 /* Scan for closest preceding symbol, and next symbol. (ELF
58274 starts real symbols at 1). */
58275 @@ -3215,7 +3339,7 @@ static int m_show(struct seq_file *m, vo
58276 char buf[8];
58277
58278 seq_printf(m, "%s %u",
58279 - mod->name, mod->init_size + mod->core_size);
58280 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
58281 print_unload_info(m, mod);
58282
58283 /* Informative for users. */
58284 @@ -3224,7 +3348,7 @@ static int m_show(struct seq_file *m, vo
58285 mod->state == MODULE_STATE_COMING ? "Loading":
58286 "Live");
58287 /* Used by oprofile and other similar tools. */
58288 - seq_printf(m, " 0x%pK", mod->module_core);
58289 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
58290
58291 /* Taints info */
58292 if (mod->taints)
58293 @@ -3260,7 +3384,17 @@ static const struct file_operations proc
58294
58295 static int __init proc_modules_init(void)
58296 {
58297 +#ifndef CONFIG_GRKERNSEC_HIDESYM
58298 +#ifdef CONFIG_GRKERNSEC_PROC_USER
58299 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
58300 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58301 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
58302 +#else
58303 proc_create("modules", 0, NULL, &proc_modules_operations);
58304 +#endif
58305 +#else
58306 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
58307 +#endif
58308 return 0;
58309 }
58310 module_init(proc_modules_init);
58311 @@ -3319,12 +3453,12 @@ struct module *__module_address(unsigned
58312 {
58313 struct module *mod;
58314
58315 - if (addr < module_addr_min || addr > module_addr_max)
58316 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
58317 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
58318 return NULL;
58319
58320 list_for_each_entry_rcu(mod, &modules, list)
58321 - if (within_module_core(addr, mod)
58322 - || within_module_init(addr, mod))
58323 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
58324 return mod;
58325 return NULL;
58326 }
58327 @@ -3358,11 +3492,20 @@ bool is_module_text_address(unsigned lon
58328 */
58329 struct module *__module_text_address(unsigned long addr)
58330 {
58331 - struct module *mod = __module_address(addr);
58332 + struct module *mod;
58333 +
58334 +#ifdef CONFIG_X86_32
58335 + addr = ktla_ktva(addr);
58336 +#endif
58337 +
58338 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
58339 + return NULL;
58340 +
58341 + mod = __module_address(addr);
58342 +
58343 if (mod) {
58344 /* Make sure it's within the text section. */
58345 - if (!within(addr, mod->module_init, mod->init_text_size)
58346 - && !within(addr, mod->module_core, mod->core_text_size))
58347 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
58348 mod = NULL;
58349 }
58350 return mod;
58351 diff -urNp linux-2.6.39.4/kernel/mutex.c linux-2.6.39.4/kernel/mutex.c
58352 --- linux-2.6.39.4/kernel/mutex.c 2011-05-19 00:06:34.000000000 -0400
58353 +++ linux-2.6.39.4/kernel/mutex.c 2011-08-05 19:44:37.000000000 -0400
58354 @@ -160,7 +160,7 @@ __mutex_lock_common(struct mutex *lock,
58355 */
58356
58357 for (;;) {
58358 - struct thread_info *owner;
58359 + struct task_struct *owner;
58360
58361 /*
58362 * If we own the BKL, then don't spin. The owner of
58363 @@ -205,7 +205,7 @@ __mutex_lock_common(struct mutex *lock,
58364 spin_lock_mutex(&lock->wait_lock, flags);
58365
58366 debug_mutex_lock_common(lock, &waiter);
58367 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
58368 + debug_mutex_add_waiter(lock, &waiter, task);
58369
58370 /* add waiting tasks to the end of the waitqueue (FIFO): */
58371 list_add_tail(&waiter.list, &lock->wait_list);
58372 @@ -234,8 +234,7 @@ __mutex_lock_common(struct mutex *lock,
58373 * TASK_UNINTERRUPTIBLE case.)
58374 */
58375 if (unlikely(signal_pending_state(state, task))) {
58376 - mutex_remove_waiter(lock, &waiter,
58377 - task_thread_info(task));
58378 + mutex_remove_waiter(lock, &waiter, task);
58379 mutex_release(&lock->dep_map, 1, ip);
58380 spin_unlock_mutex(&lock->wait_lock, flags);
58381
58382 @@ -256,7 +255,7 @@ __mutex_lock_common(struct mutex *lock,
58383 done:
58384 lock_acquired(&lock->dep_map, ip);
58385 /* got the lock - rejoice! */
58386 - mutex_remove_waiter(lock, &waiter, current_thread_info());
58387 + mutex_remove_waiter(lock, &waiter, task);
58388 mutex_set_owner(lock);
58389
58390 /* set it to 0 if there are no waiters left: */
58391 diff -urNp linux-2.6.39.4/kernel/mutex-debug.c linux-2.6.39.4/kernel/mutex-debug.c
58392 --- linux-2.6.39.4/kernel/mutex-debug.c 2011-05-19 00:06:34.000000000 -0400
58393 +++ linux-2.6.39.4/kernel/mutex-debug.c 2011-08-05 19:44:37.000000000 -0400
58394 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
58395 }
58396
58397 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58398 - struct thread_info *ti)
58399 + struct task_struct *task)
58400 {
58401 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
58402
58403 /* Mark the current thread as blocked on the lock: */
58404 - ti->task->blocked_on = waiter;
58405 + task->blocked_on = waiter;
58406 }
58407
58408 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58409 - struct thread_info *ti)
58410 + struct task_struct *task)
58411 {
58412 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
58413 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
58414 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
58415 - ti->task->blocked_on = NULL;
58416 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
58417 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
58418 + task->blocked_on = NULL;
58419
58420 list_del_init(&waiter->list);
58421 waiter->task = NULL;
58422 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
58423 return;
58424
58425 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
58426 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
58427 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
58428 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
58429 mutex_clear_owner(lock);
58430 }
58431 diff -urNp linux-2.6.39.4/kernel/mutex-debug.h linux-2.6.39.4/kernel/mutex-debug.h
58432 --- linux-2.6.39.4/kernel/mutex-debug.h 2011-05-19 00:06:34.000000000 -0400
58433 +++ linux-2.6.39.4/kernel/mutex-debug.h 2011-08-05 19:44:37.000000000 -0400
58434 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
58435 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
58436 extern void debug_mutex_add_waiter(struct mutex *lock,
58437 struct mutex_waiter *waiter,
58438 - struct thread_info *ti);
58439 + struct task_struct *task);
58440 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58441 - struct thread_info *ti);
58442 + struct task_struct *task);
58443 extern void debug_mutex_unlock(struct mutex *lock);
58444 extern void debug_mutex_init(struct mutex *lock, const char *name,
58445 struct lock_class_key *key);
58446
58447 static inline void mutex_set_owner(struct mutex *lock)
58448 {
58449 - lock->owner = current_thread_info();
58450 + lock->owner = current;
58451 }
58452
58453 static inline void mutex_clear_owner(struct mutex *lock)
58454 diff -urNp linux-2.6.39.4/kernel/mutex.h linux-2.6.39.4/kernel/mutex.h
58455 --- linux-2.6.39.4/kernel/mutex.h 2011-05-19 00:06:34.000000000 -0400
58456 +++ linux-2.6.39.4/kernel/mutex.h 2011-08-05 19:44:37.000000000 -0400
58457 @@ -19,7 +19,7 @@
58458 #ifdef CONFIG_SMP
58459 static inline void mutex_set_owner(struct mutex *lock)
58460 {
58461 - lock->owner = current_thread_info();
58462 + lock->owner = current;
58463 }
58464
58465 static inline void mutex_clear_owner(struct mutex *lock)
58466 diff -urNp linux-2.6.39.4/kernel/padata.c linux-2.6.39.4/kernel/padata.c
58467 --- linux-2.6.39.4/kernel/padata.c 2011-05-19 00:06:34.000000000 -0400
58468 +++ linux-2.6.39.4/kernel/padata.c 2011-08-05 19:44:37.000000000 -0400
58469 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
58470 padata->pd = pd;
58471 padata->cb_cpu = cb_cpu;
58472
58473 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
58474 - atomic_set(&pd->seq_nr, -1);
58475 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
58476 + atomic_set_unchecked(&pd->seq_nr, -1);
58477
58478 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
58479 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
58480
58481 target_cpu = padata_cpu_hash(padata);
58482 queue = per_cpu_ptr(pd->pqueue, target_cpu);
58483 @@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
58484 padata_init_pqueues(pd);
58485 padata_init_squeues(pd);
58486 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
58487 - atomic_set(&pd->seq_nr, -1);
58488 + atomic_set_unchecked(&pd->seq_nr, -1);
58489 atomic_set(&pd->reorder_objects, 0);
58490 atomic_set(&pd->refcnt, 0);
58491 pd->pinst = pinst;
58492 diff -urNp linux-2.6.39.4/kernel/panic.c linux-2.6.39.4/kernel/panic.c
58493 --- linux-2.6.39.4/kernel/panic.c 2011-05-19 00:06:34.000000000 -0400
58494 +++ linux-2.6.39.4/kernel/panic.c 2011-08-05 19:44:37.000000000 -0400
58495 @@ -369,7 +369,7 @@ static void warn_slowpath_common(const c
58496 const char *board;
58497
58498 printk(KERN_WARNING "------------[ cut here ]------------\n");
58499 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
58500 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
58501 board = dmi_get_system_info(DMI_PRODUCT_NAME);
58502 if (board)
58503 printk(KERN_WARNING "Hardware name: %s\n", board);
58504 @@ -424,7 +424,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
58505 */
58506 void __stack_chk_fail(void)
58507 {
58508 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
58509 + dump_stack();
58510 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
58511 __builtin_return_address(0));
58512 }
58513 EXPORT_SYMBOL(__stack_chk_fail);
58514 diff -urNp linux-2.6.39.4/kernel/perf_event.c linux-2.6.39.4/kernel/perf_event.c
58515 --- linux-2.6.39.4/kernel/perf_event.c 2011-05-19 00:06:34.000000000 -0400
58516 +++ linux-2.6.39.4/kernel/perf_event.c 2011-08-05 20:34:06.000000000 -0400
58517 @@ -170,7 +170,7 @@ int perf_proc_update_handler(struct ctl_
58518 return 0;
58519 }
58520
58521 -static atomic64_t perf_event_id;
58522 +static atomic64_unchecked_t perf_event_id;
58523
58524 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
58525 enum event_type_t event_type);
58526 @@ -2496,7 +2496,7 @@ static void __perf_event_read(void *info
58527
58528 static inline u64 perf_event_count(struct perf_event *event)
58529 {
58530 - return local64_read(&event->count) + atomic64_read(&event->child_count);
58531 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
58532 }
58533
58534 static u64 perf_event_read(struct perf_event *event)
58535 @@ -3031,9 +3031,9 @@ u64 perf_event_read_value(struct perf_ev
58536 mutex_lock(&event->child_mutex);
58537 total += perf_event_read(event);
58538 *enabled += event->total_time_enabled +
58539 - atomic64_read(&event->child_total_time_enabled);
58540 + atomic64_read_unchecked(&event->child_total_time_enabled);
58541 *running += event->total_time_running +
58542 - atomic64_read(&event->child_total_time_running);
58543 + atomic64_read_unchecked(&event->child_total_time_running);
58544
58545 list_for_each_entry(child, &event->child_list, child_list) {
58546 total += perf_event_read(child);
58547 @@ -3396,10 +3396,10 @@ void perf_event_update_userpage(struct p
58548 userpg->offset -= local64_read(&event->hw.prev_count);
58549
58550 userpg->time_enabled = event->total_time_enabled +
58551 - atomic64_read(&event->child_total_time_enabled);
58552 + atomic64_read_unchecked(&event->child_total_time_enabled);
58553
58554 userpg->time_running = event->total_time_running +
58555 - atomic64_read(&event->child_total_time_running);
58556 + atomic64_read_unchecked(&event->child_total_time_running);
58557
58558 barrier();
58559 ++userpg->lock;
58560 @@ -4196,11 +4196,11 @@ static void perf_output_read_one(struct
58561 values[n++] = perf_event_count(event);
58562 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
58563 values[n++] = enabled +
58564 - atomic64_read(&event->child_total_time_enabled);
58565 + atomic64_read_unchecked(&event->child_total_time_enabled);
58566 }
58567 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
58568 values[n++] = running +
58569 - atomic64_read(&event->child_total_time_running);
58570 + atomic64_read_unchecked(&event->child_total_time_running);
58571 }
58572 if (read_format & PERF_FORMAT_ID)
58573 values[n++] = primary_event_id(event);
58574 @@ -6201,7 +6201,7 @@ perf_event_alloc(struct perf_event_attr
58575 event->parent = parent_event;
58576
58577 event->ns = get_pid_ns(current->nsproxy->pid_ns);
58578 - event->id = atomic64_inc_return(&perf_event_id);
58579 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
58580
58581 event->state = PERF_EVENT_STATE_INACTIVE;
58582
58583 @@ -6724,10 +6724,10 @@ static void sync_child_event(struct perf
58584 /*
58585 * Add back the child's count to the parent's count:
58586 */
58587 - atomic64_add(child_val, &parent_event->child_count);
58588 - atomic64_add(child_event->total_time_enabled,
58589 + atomic64_add_unchecked(child_val, &parent_event->child_count);
58590 + atomic64_add_unchecked(child_event->total_time_enabled,
58591 &parent_event->child_total_time_enabled);
58592 - atomic64_add(child_event->total_time_running,
58593 + atomic64_add_unchecked(child_event->total_time_running,
58594 &parent_event->child_total_time_running);
58595
58596 /*
58597 diff -urNp linux-2.6.39.4/kernel/pid.c linux-2.6.39.4/kernel/pid.c
58598 --- linux-2.6.39.4/kernel/pid.c 2011-05-19 00:06:34.000000000 -0400
58599 +++ linux-2.6.39.4/kernel/pid.c 2011-08-05 19:44:37.000000000 -0400
58600 @@ -33,6 +33,7 @@
58601 #include <linux/rculist.h>
58602 #include <linux/bootmem.h>
58603 #include <linux/hash.h>
58604 +#include <linux/security.h>
58605 #include <linux/pid_namespace.h>
58606 #include <linux/init_task.h>
58607 #include <linux/syscalls.h>
58608 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
58609
58610 int pid_max = PID_MAX_DEFAULT;
58611
58612 -#define RESERVED_PIDS 300
58613 +#define RESERVED_PIDS 500
58614
58615 int pid_max_min = RESERVED_PIDS + 1;
58616 int pid_max_max = PID_MAX_LIMIT;
58617 @@ -419,8 +420,15 @@ EXPORT_SYMBOL(pid_task);
58618 */
58619 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
58620 {
58621 + struct task_struct *task;
58622 +
58623 rcu_lockdep_assert(rcu_read_lock_held());
58624 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
58625 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
58626 +
58627 + if (gr_pid_is_chrooted(task))
58628 + return NULL;
58629 +
58630 + return task;
58631 }
58632
58633 struct task_struct *find_task_by_vpid(pid_t vnr)
58634 @@ -428,6 +436,12 @@ struct task_struct *find_task_by_vpid(pi
58635 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
58636 }
58637
58638 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
58639 +{
58640 + rcu_lockdep_assert(rcu_read_lock_held());
58641 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
58642 +}
58643 +
58644 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
58645 {
58646 struct pid *pid;
58647 diff -urNp linux-2.6.39.4/kernel/posix-cpu-timers.c linux-2.6.39.4/kernel/posix-cpu-timers.c
58648 --- linux-2.6.39.4/kernel/posix-cpu-timers.c 2011-05-19 00:06:34.000000000 -0400
58649 +++ linux-2.6.39.4/kernel/posix-cpu-timers.c 2011-08-06 09:34:48.000000000 -0400
58650 @@ -6,6 +6,7 @@
58651 #include <linux/posix-timers.h>
58652 #include <linux/errno.h>
58653 #include <linux/math64.h>
58654 +#include <linux/security.h>
58655 #include <asm/uaccess.h>
58656 #include <linux/kernel_stat.h>
58657 #include <trace/events/timer.h>
58658 @@ -1604,14 +1605,14 @@ struct k_clock clock_posix_cpu = {
58659
58660 static __init int init_posix_cpu_timers(void)
58661 {
58662 - struct k_clock process = {
58663 + static struct k_clock process = {
58664 .clock_getres = process_cpu_clock_getres,
58665 .clock_get = process_cpu_clock_get,
58666 .timer_create = process_cpu_timer_create,
58667 .nsleep = process_cpu_nsleep,
58668 .nsleep_restart = process_cpu_nsleep_restart,
58669 };
58670 - struct k_clock thread = {
58671 + static struct k_clock thread = {
58672 .clock_getres = thread_cpu_clock_getres,
58673 .clock_get = thread_cpu_clock_get,
58674 .timer_create = thread_cpu_timer_create,
58675 diff -urNp linux-2.6.39.4/kernel/posix-timers.c linux-2.6.39.4/kernel/posix-timers.c
58676 --- linux-2.6.39.4/kernel/posix-timers.c 2011-05-19 00:06:34.000000000 -0400
58677 +++ linux-2.6.39.4/kernel/posix-timers.c 2011-08-06 09:30:46.000000000 -0400
58678 @@ -43,6 +43,7 @@
58679 #include <linux/idr.h>
58680 #include <linux/posix-clock.h>
58681 #include <linux/posix-timers.h>
58682 +#include <linux/grsecurity.h>
58683 #include <linux/syscalls.h>
58684 #include <linux/wait.h>
58685 #include <linux/workqueue.h>
58686 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
58687 * which we beg off on and pass to do_sys_settimeofday().
58688 */
58689
58690 -static struct k_clock posix_clocks[MAX_CLOCKS];
58691 +static struct k_clock *posix_clocks[MAX_CLOCKS];
58692
58693 /*
58694 * These ones are defined below.
58695 @@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
58696 */
58697 static __init int init_posix_timers(void)
58698 {
58699 - struct k_clock clock_realtime = {
58700 + static struct k_clock clock_realtime = {
58701 .clock_getres = hrtimer_get_res,
58702 .clock_get = posix_clock_realtime_get,
58703 .clock_set = posix_clock_realtime_set,
58704 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void
58705 .timer_get = common_timer_get,
58706 .timer_del = common_timer_del,
58707 };
58708 - struct k_clock clock_monotonic = {
58709 + static struct k_clock clock_monotonic = {
58710 .clock_getres = hrtimer_get_res,
58711 .clock_get = posix_ktime_get_ts,
58712 .nsleep = common_nsleep,
58713 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void
58714 .timer_get = common_timer_get,
58715 .timer_del = common_timer_del,
58716 };
58717 - struct k_clock clock_monotonic_raw = {
58718 + static struct k_clock clock_monotonic_raw = {
58719 .clock_getres = hrtimer_get_res,
58720 .clock_get = posix_get_monotonic_raw,
58721 };
58722 - struct k_clock clock_realtime_coarse = {
58723 + static struct k_clock clock_realtime_coarse = {
58724 .clock_getres = posix_get_coarse_res,
58725 .clock_get = posix_get_realtime_coarse,
58726 };
58727 - struct k_clock clock_monotonic_coarse = {
58728 + static struct k_clock clock_monotonic_coarse = {
58729 .clock_getres = posix_get_coarse_res,
58730 .clock_get = posix_get_monotonic_coarse,
58731 };
58732 - struct k_clock clock_boottime = {
58733 + static struct k_clock clock_boottime = {
58734 .clock_getres = hrtimer_get_res,
58735 .clock_get = posix_get_boottime,
58736 .nsleep = common_nsleep,
58737 @@ -272,6 +273,8 @@ static __init int init_posix_timers(void
58738 .timer_del = common_timer_del,
58739 };
58740
58741 + pax_track_stack();
58742 +
58743 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
58744 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
58745 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
58746 @@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
58747 return;
58748 }
58749
58750 - posix_clocks[clock_id] = *new_clock;
58751 + posix_clocks[clock_id] = new_clock;
58752 }
58753 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
58754
58755 @@ -512,9 +515,9 @@ static struct k_clock *clockid_to_kclock
58756 return (id & CLOCKFD_MASK) == CLOCKFD ?
58757 &clock_posix_dynamic : &clock_posix_cpu;
58758
58759 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
58760 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
58761 return NULL;
58762 - return &posix_clocks[id];
58763 + return posix_clocks[id];
58764 }
58765
58766 static int common_timer_create(struct k_itimer *new_timer)
58767 @@ -956,6 +959,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
58768 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
58769 return -EFAULT;
58770
58771 + /* only the CLOCK_REALTIME clock can be set, all other clocks
58772 + have their clock_set fptr set to a nosettime dummy function
58773 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
58774 + call common_clock_set, which calls do_sys_settimeofday, which
58775 + we hook
58776 + */
58777 +
58778 return kc->clock_set(which_clock, &new_tp);
58779 }
58780
58781 diff -urNp linux-2.6.39.4/kernel/power/poweroff.c linux-2.6.39.4/kernel/power/poweroff.c
58782 --- linux-2.6.39.4/kernel/power/poweroff.c 2011-05-19 00:06:34.000000000 -0400
58783 +++ linux-2.6.39.4/kernel/power/poweroff.c 2011-08-05 19:44:37.000000000 -0400
58784 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
58785 .enable_mask = SYSRQ_ENABLE_BOOT,
58786 };
58787
58788 -static int pm_sysrq_init(void)
58789 +static int __init pm_sysrq_init(void)
58790 {
58791 register_sysrq_key('o', &sysrq_poweroff_op);
58792 return 0;
58793 diff -urNp linux-2.6.39.4/kernel/power/process.c linux-2.6.39.4/kernel/power/process.c
58794 --- linux-2.6.39.4/kernel/power/process.c 2011-05-19 00:06:34.000000000 -0400
58795 +++ linux-2.6.39.4/kernel/power/process.c 2011-08-05 19:44:37.000000000 -0400
58796 @@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
58797 u64 elapsed_csecs64;
58798 unsigned int elapsed_csecs;
58799 bool wakeup = false;
58800 + bool timedout = false;
58801
58802 do_gettimeofday(&start);
58803
58804 @@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
58805
58806 while (true) {
58807 todo = 0;
58808 + if (time_after(jiffies, end_time))
58809 + timedout = true;
58810 read_lock(&tasklist_lock);
58811 do_each_thread(g, p) {
58812 if (frozen(p) || !freezable(p))
58813 @@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
58814 * try_to_stop() after schedule() in ptrace/signal
58815 * stop sees TIF_FREEZE.
58816 */
58817 - if (!task_is_stopped_or_traced(p) &&
58818 - !freezer_should_skip(p))
58819 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
58820 todo++;
58821 + if (timedout) {
58822 + printk(KERN_ERR "Task refusing to freeze:\n");
58823 + sched_show_task(p);
58824 + }
58825 + }
58826 } while_each_thread(g, p);
58827 read_unlock(&tasklist_lock);
58828
58829 @@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
58830 todo += wq_busy;
58831 }
58832
58833 - if (!todo || time_after(jiffies, end_time))
58834 + if (!todo || timedout)
58835 break;
58836
58837 if (pm_wakeup_pending()) {
58838 diff -urNp linux-2.6.39.4/kernel/printk.c linux-2.6.39.4/kernel/printk.c
58839 --- linux-2.6.39.4/kernel/printk.c 2011-05-19 00:06:34.000000000 -0400
58840 +++ linux-2.6.39.4/kernel/printk.c 2011-08-05 19:44:37.000000000 -0400
58841 @@ -284,12 +284,17 @@ static int check_syslog_permissions(int
58842 if (from_file && type != SYSLOG_ACTION_OPEN)
58843 return 0;
58844
58845 +#ifdef CONFIG_GRKERNSEC_DMESG
58846 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
58847 + return -EPERM;
58848 +#endif
58849 +
58850 if (syslog_action_restricted(type)) {
58851 if (capable(CAP_SYSLOG))
58852 return 0;
58853 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
58854 if (capable(CAP_SYS_ADMIN)) {
58855 - WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
58856 + printk_once(KERN_WARNING "Attempt to access syslog with CAP_SYS_ADMIN "
58857 "but no CAP_SYSLOG (deprecated).\n");
58858 return 0;
58859 }
58860 diff -urNp linux-2.6.39.4/kernel/profile.c linux-2.6.39.4/kernel/profile.c
58861 --- linux-2.6.39.4/kernel/profile.c 2011-05-19 00:06:34.000000000 -0400
58862 +++ linux-2.6.39.4/kernel/profile.c 2011-08-05 19:44:37.000000000 -0400
58863 @@ -39,7 +39,7 @@ struct profile_hit {
58864 /* Oprofile timer tick hook */
58865 static int (*timer_hook)(struct pt_regs *) __read_mostly;
58866
58867 -static atomic_t *prof_buffer;
58868 +static atomic_unchecked_t *prof_buffer;
58869 static unsigned long prof_len, prof_shift;
58870
58871 int prof_on __read_mostly;
58872 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
58873 hits[i].pc = 0;
58874 continue;
58875 }
58876 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
58877 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
58878 hits[i].hits = hits[i].pc = 0;
58879 }
58880 }
58881 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
58882 * Add the current hit(s) and flush the write-queue out
58883 * to the global buffer:
58884 */
58885 - atomic_add(nr_hits, &prof_buffer[pc]);
58886 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
58887 for (i = 0; i < NR_PROFILE_HIT; ++i) {
58888 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
58889 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
58890 hits[i].pc = hits[i].hits = 0;
58891 }
58892 out:
58893 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
58894 if (prof_on != type || !prof_buffer)
58895 return;
58896 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
58897 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
58898 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
58899 }
58900 #endif /* !CONFIG_SMP */
58901 EXPORT_SYMBOL_GPL(profile_hits);
58902 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
58903 return -EFAULT;
58904 buf++; p++; count--; read++;
58905 }
58906 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
58907 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
58908 if (copy_to_user(buf, (void *)pnt, count))
58909 return -EFAULT;
58910 read += count;
58911 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
58912 }
58913 #endif
58914 profile_discard_flip_buffers();
58915 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
58916 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
58917 return count;
58918 }
58919
58920 diff -urNp linux-2.6.39.4/kernel/ptrace.c linux-2.6.39.4/kernel/ptrace.c
58921 --- linux-2.6.39.4/kernel/ptrace.c 2011-05-19 00:06:34.000000000 -0400
58922 +++ linux-2.6.39.4/kernel/ptrace.c 2011-08-05 19:44:37.000000000 -0400
58923 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
58924 return ret;
58925 }
58926
58927 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
58928 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
58929 + unsigned int log)
58930 {
58931 const struct cred *cred = current_cred(), *tcred;
58932
58933 @@ -143,7 +144,8 @@ int __ptrace_may_access(struct task_stru
58934 cred->gid == tcred->sgid &&
58935 cred->gid == tcred->gid))
58936 goto ok;
58937 - if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
58938 + if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
58939 + (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
58940 goto ok;
58941 rcu_read_unlock();
58942 return -EPERM;
58943 @@ -152,7 +154,9 @@ ok:
58944 smp_rmb();
58945 if (task->mm)
58946 dumpable = get_dumpable(task->mm);
58947 - if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
58948 + if (!dumpable &&
58949 + ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
58950 + (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
58951 return -EPERM;
58952
58953 return security_ptrace_access_check(task, mode);
58954 @@ -162,7 +166,16 @@ bool ptrace_may_access(struct task_struc
58955 {
58956 int err;
58957 task_lock(task);
58958 - err = __ptrace_may_access(task, mode);
58959 + err = __ptrace_may_access(task, mode, 0);
58960 + task_unlock(task);
58961 + return !err;
58962 +}
58963 +
58964 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
58965 +{
58966 + int err;
58967 + task_lock(task);
58968 + err = __ptrace_may_access(task, mode, 1);
58969 task_unlock(task);
58970 return !err;
58971 }
58972 @@ -189,7 +202,7 @@ static int ptrace_attach(struct task_str
58973 goto out;
58974
58975 task_lock(task);
58976 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
58977 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
58978 task_unlock(task);
58979 if (retval)
58980 goto unlock_creds;
58981 @@ -202,7 +215,7 @@ static int ptrace_attach(struct task_str
58982 goto unlock_tasklist;
58983
58984 task->ptrace = PT_PTRACED;
58985 - if (task_ns_capable(task, CAP_SYS_PTRACE))
58986 + if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
58987 task->ptrace |= PT_PTRACE_CAP;
58988
58989 __ptrace_link(task, current);
58990 @@ -362,6 +375,8 @@ int ptrace_readdata(struct task_struct *
58991 {
58992 int copied = 0;
58993
58994 + pax_track_stack();
58995 +
58996 while (len > 0) {
58997 char buf[128];
58998 int this_len, retval;
58999 @@ -373,7 +388,7 @@ int ptrace_readdata(struct task_struct *
59000 break;
59001 return -EIO;
59002 }
59003 - if (copy_to_user(dst, buf, retval))
59004 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
59005 return -EFAULT;
59006 copied += retval;
59007 src += retval;
59008 @@ -387,6 +402,8 @@ int ptrace_writedata(struct task_struct
59009 {
59010 int copied = 0;
59011
59012 + pax_track_stack();
59013 +
59014 while (len > 0) {
59015 char buf[128];
59016 int this_len, retval;
59017 @@ -569,9 +586,11 @@ int ptrace_request(struct task_struct *c
59018 {
59019 int ret = -EIO;
59020 siginfo_t siginfo;
59021 - void __user *datavp = (void __user *) data;
59022 + void __user *datavp = (__force void __user *) data;
59023 unsigned long __user *datalp = datavp;
59024
59025 + pax_track_stack();
59026 +
59027 switch (request) {
59028 case PTRACE_PEEKTEXT:
59029 case PTRACE_PEEKDATA:
59030 @@ -717,14 +736,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
59031 goto out;
59032 }
59033
59034 + if (gr_handle_ptrace(child, request)) {
59035 + ret = -EPERM;
59036 + goto out_put_task_struct;
59037 + }
59038 +
59039 if (request == PTRACE_ATTACH) {
59040 ret = ptrace_attach(child);
59041 /*
59042 * Some architectures need to do book-keeping after
59043 * a ptrace attach.
59044 */
59045 - if (!ret)
59046 + if (!ret) {
59047 arch_ptrace_attach(child);
59048 + gr_audit_ptrace(child);
59049 + }
59050 goto out_put_task_struct;
59051 }
59052
59053 @@ -749,7 +775,7 @@ int generic_ptrace_peekdata(struct task_
59054 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
59055 if (copied != sizeof(tmp))
59056 return -EIO;
59057 - return put_user(tmp, (unsigned long __user *)data);
59058 + return put_user(tmp, (__force unsigned long __user *)data);
59059 }
59060
59061 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
59062 @@ -772,6 +798,8 @@ int compat_ptrace_request(struct task_st
59063 siginfo_t siginfo;
59064 int ret;
59065
59066 + pax_track_stack();
59067 +
59068 switch (request) {
59069 case PTRACE_PEEKTEXT:
59070 case PTRACE_PEEKDATA:
59071 @@ -859,14 +887,21 @@ asmlinkage long compat_sys_ptrace(compat
59072 goto out;
59073 }
59074
59075 + if (gr_handle_ptrace(child, request)) {
59076 + ret = -EPERM;
59077 + goto out_put_task_struct;
59078 + }
59079 +
59080 if (request == PTRACE_ATTACH) {
59081 ret = ptrace_attach(child);
59082 /*
59083 * Some architectures need to do book-keeping after
59084 * a ptrace attach.
59085 */
59086 - if (!ret)
59087 + if (!ret) {
59088 arch_ptrace_attach(child);
59089 + gr_audit_ptrace(child);
59090 + }
59091 goto out_put_task_struct;
59092 }
59093
59094 diff -urNp linux-2.6.39.4/kernel/rcutorture.c linux-2.6.39.4/kernel/rcutorture.c
59095 --- linux-2.6.39.4/kernel/rcutorture.c 2011-05-19 00:06:34.000000000 -0400
59096 +++ linux-2.6.39.4/kernel/rcutorture.c 2011-08-05 19:44:37.000000000 -0400
59097 @@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
59098 { 0 };
59099 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
59100 { 0 };
59101 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
59102 -static atomic_t n_rcu_torture_alloc;
59103 -static atomic_t n_rcu_torture_alloc_fail;
59104 -static atomic_t n_rcu_torture_free;
59105 -static atomic_t n_rcu_torture_mberror;
59106 -static atomic_t n_rcu_torture_error;
59107 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
59108 +static atomic_unchecked_t n_rcu_torture_alloc;
59109 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
59110 +static atomic_unchecked_t n_rcu_torture_free;
59111 +static atomic_unchecked_t n_rcu_torture_mberror;
59112 +static atomic_unchecked_t n_rcu_torture_error;
59113 static long n_rcu_torture_boost_ktrerror;
59114 static long n_rcu_torture_boost_rterror;
59115 static long n_rcu_torture_boost_allocerror;
59116 @@ -225,11 +225,11 @@ rcu_torture_alloc(void)
59117
59118 spin_lock_bh(&rcu_torture_lock);
59119 if (list_empty(&rcu_torture_freelist)) {
59120 - atomic_inc(&n_rcu_torture_alloc_fail);
59121 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
59122 spin_unlock_bh(&rcu_torture_lock);
59123 return NULL;
59124 }
59125 - atomic_inc(&n_rcu_torture_alloc);
59126 + atomic_inc_unchecked(&n_rcu_torture_alloc);
59127 p = rcu_torture_freelist.next;
59128 list_del_init(p);
59129 spin_unlock_bh(&rcu_torture_lock);
59130 @@ -242,7 +242,7 @@ rcu_torture_alloc(void)
59131 static void
59132 rcu_torture_free(struct rcu_torture *p)
59133 {
59134 - atomic_inc(&n_rcu_torture_free);
59135 + atomic_inc_unchecked(&n_rcu_torture_free);
59136 spin_lock_bh(&rcu_torture_lock);
59137 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
59138 spin_unlock_bh(&rcu_torture_lock);
59139 @@ -362,7 +362,7 @@ rcu_torture_cb(struct rcu_head *p)
59140 i = rp->rtort_pipe_count;
59141 if (i > RCU_TORTURE_PIPE_LEN)
59142 i = RCU_TORTURE_PIPE_LEN;
59143 - atomic_inc(&rcu_torture_wcount[i]);
59144 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
59145 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
59146 rp->rtort_mbtest = 0;
59147 rcu_torture_free(rp);
59148 @@ -409,7 +409,7 @@ static void rcu_sync_torture_deferred_fr
59149 i = rp->rtort_pipe_count;
59150 if (i > RCU_TORTURE_PIPE_LEN)
59151 i = RCU_TORTURE_PIPE_LEN;
59152 - atomic_inc(&rcu_torture_wcount[i]);
59153 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
59154 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
59155 rp->rtort_mbtest = 0;
59156 list_del(&rp->rtort_free);
59157 @@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
59158 i = old_rp->rtort_pipe_count;
59159 if (i > RCU_TORTURE_PIPE_LEN)
59160 i = RCU_TORTURE_PIPE_LEN;
59161 - atomic_inc(&rcu_torture_wcount[i]);
59162 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
59163 old_rp->rtort_pipe_count++;
59164 cur_ops->deferred_free(old_rp);
59165 }
59166 @@ -951,7 +951,7 @@ static void rcu_torture_timer(unsigned l
59167 return;
59168 }
59169 if (p->rtort_mbtest == 0)
59170 - atomic_inc(&n_rcu_torture_mberror);
59171 + atomic_inc_unchecked(&n_rcu_torture_mberror);
59172 spin_lock(&rand_lock);
59173 cur_ops->read_delay(&rand);
59174 n_rcu_torture_timers++;
59175 @@ -1013,7 +1013,7 @@ rcu_torture_reader(void *arg)
59176 continue;
59177 }
59178 if (p->rtort_mbtest == 0)
59179 - atomic_inc(&n_rcu_torture_mberror);
59180 + atomic_inc_unchecked(&n_rcu_torture_mberror);
59181 cur_ops->read_delay(&rand);
59182 preempt_disable();
59183 pipe_count = p->rtort_pipe_count;
59184 @@ -1072,10 +1072,10 @@ rcu_torture_printk(char *page)
59185 rcu_torture_current,
59186 rcu_torture_current_version,
59187 list_empty(&rcu_torture_freelist),
59188 - atomic_read(&n_rcu_torture_alloc),
59189 - atomic_read(&n_rcu_torture_alloc_fail),
59190 - atomic_read(&n_rcu_torture_free),
59191 - atomic_read(&n_rcu_torture_mberror),
59192 + atomic_read_unchecked(&n_rcu_torture_alloc),
59193 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
59194 + atomic_read_unchecked(&n_rcu_torture_free),
59195 + atomic_read_unchecked(&n_rcu_torture_mberror),
59196 n_rcu_torture_boost_ktrerror,
59197 n_rcu_torture_boost_rterror,
59198 n_rcu_torture_boost_allocerror,
59199 @@ -1083,7 +1083,7 @@ rcu_torture_printk(char *page)
59200 n_rcu_torture_boost_failure,
59201 n_rcu_torture_boosts,
59202 n_rcu_torture_timers);
59203 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
59204 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
59205 n_rcu_torture_boost_ktrerror != 0 ||
59206 n_rcu_torture_boost_rterror != 0 ||
59207 n_rcu_torture_boost_allocerror != 0 ||
59208 @@ -1093,7 +1093,7 @@ rcu_torture_printk(char *page)
59209 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
59210 if (i > 1) {
59211 cnt += sprintf(&page[cnt], "!!! ");
59212 - atomic_inc(&n_rcu_torture_error);
59213 + atomic_inc_unchecked(&n_rcu_torture_error);
59214 WARN_ON_ONCE(1);
59215 }
59216 cnt += sprintf(&page[cnt], "Reader Pipe: ");
59217 @@ -1107,7 +1107,7 @@ rcu_torture_printk(char *page)
59218 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
59219 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
59220 cnt += sprintf(&page[cnt], " %d",
59221 - atomic_read(&rcu_torture_wcount[i]));
59222 + atomic_read_unchecked(&rcu_torture_wcount[i]));
59223 }
59224 cnt += sprintf(&page[cnt], "\n");
59225 if (cur_ops->stats)
59226 @@ -1415,7 +1415,7 @@ rcu_torture_cleanup(void)
59227
59228 if (cur_ops->cleanup)
59229 cur_ops->cleanup();
59230 - if (atomic_read(&n_rcu_torture_error))
59231 + if (atomic_read_unchecked(&n_rcu_torture_error))
59232 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
59233 else
59234 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
59235 @@ -1479,11 +1479,11 @@ rcu_torture_init(void)
59236
59237 rcu_torture_current = NULL;
59238 rcu_torture_current_version = 0;
59239 - atomic_set(&n_rcu_torture_alloc, 0);
59240 - atomic_set(&n_rcu_torture_alloc_fail, 0);
59241 - atomic_set(&n_rcu_torture_free, 0);
59242 - atomic_set(&n_rcu_torture_mberror, 0);
59243 - atomic_set(&n_rcu_torture_error, 0);
59244 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
59245 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
59246 + atomic_set_unchecked(&n_rcu_torture_free, 0);
59247 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
59248 + atomic_set_unchecked(&n_rcu_torture_error, 0);
59249 n_rcu_torture_boost_ktrerror = 0;
59250 n_rcu_torture_boost_rterror = 0;
59251 n_rcu_torture_boost_allocerror = 0;
59252 @@ -1491,7 +1491,7 @@ rcu_torture_init(void)
59253 n_rcu_torture_boost_failure = 0;
59254 n_rcu_torture_boosts = 0;
59255 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
59256 - atomic_set(&rcu_torture_wcount[i], 0);
59257 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
59258 for_each_possible_cpu(cpu) {
59259 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
59260 per_cpu(rcu_torture_count, cpu)[i] = 0;
59261 diff -urNp linux-2.6.39.4/kernel/rcutree.c linux-2.6.39.4/kernel/rcutree.c
59262 --- linux-2.6.39.4/kernel/rcutree.c 2011-05-19 00:06:34.000000000 -0400
59263 +++ linux-2.6.39.4/kernel/rcutree.c 2011-08-05 19:44:37.000000000 -0400
59264 @@ -1389,7 +1389,7 @@ __rcu_process_callbacks(struct rcu_state
59265 /*
59266 * Do softirq processing for the current CPU.
59267 */
59268 -static void rcu_process_callbacks(struct softirq_action *unused)
59269 +static void rcu_process_callbacks(void)
59270 {
59271 /*
59272 * Memory references from any prior RCU read-side critical sections
59273 diff -urNp linux-2.6.39.4/kernel/rcutree_plugin.h linux-2.6.39.4/kernel/rcutree_plugin.h
59274 --- linux-2.6.39.4/kernel/rcutree_plugin.h 2011-05-19 00:06:34.000000000 -0400
59275 +++ linux-2.6.39.4/kernel/rcutree_plugin.h 2011-08-05 19:44:37.000000000 -0400
59276 @@ -730,7 +730,7 @@ void synchronize_rcu_expedited(void)
59277
59278 /* Clean up and exit. */
59279 smp_mb(); /* ensure expedited GP seen before counter increment. */
59280 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
59281 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
59282 unlock_mb_ret:
59283 mutex_unlock(&sync_rcu_preempt_exp_mutex);
59284 mb_ret:
59285 @@ -1025,8 +1025,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
59286
59287 #else /* #ifndef CONFIG_SMP */
59288
59289 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
59290 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
59291 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
59292 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
59293
59294 static int synchronize_sched_expedited_cpu_stop(void *data)
59295 {
59296 @@ -1081,7 +1081,7 @@ void synchronize_sched_expedited(void)
59297 int firstsnap, s, snap, trycount = 0;
59298
59299 /* Note that atomic_inc_return() implies full memory barrier. */
59300 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
59301 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
59302 get_online_cpus();
59303
59304 /*
59305 @@ -1102,7 +1102,7 @@ void synchronize_sched_expedited(void)
59306 }
59307
59308 /* Check to see if someone else did our work for us. */
59309 - s = atomic_read(&sync_sched_expedited_done);
59310 + s = atomic_read_unchecked(&sync_sched_expedited_done);
59311 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
59312 smp_mb(); /* ensure test happens before caller kfree */
59313 return;
59314 @@ -1117,7 +1117,7 @@ void synchronize_sched_expedited(void)
59315 * grace period works for us.
59316 */
59317 get_online_cpus();
59318 - snap = atomic_read(&sync_sched_expedited_started) - 1;
59319 + snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
59320 smp_mb(); /* ensure read is before try_stop_cpus(). */
59321 }
59322
59323 @@ -1128,12 +1128,12 @@ void synchronize_sched_expedited(void)
59324 * than we did beat us to the punch.
59325 */
59326 do {
59327 - s = atomic_read(&sync_sched_expedited_done);
59328 + s = atomic_read_unchecked(&sync_sched_expedited_done);
59329 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
59330 smp_mb(); /* ensure test happens before caller kfree */
59331 break;
59332 }
59333 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
59334 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
59335
59336 put_online_cpus();
59337 }
59338 diff -urNp linux-2.6.39.4/kernel/relay.c linux-2.6.39.4/kernel/relay.c
59339 --- linux-2.6.39.4/kernel/relay.c 2011-05-19 00:06:34.000000000 -0400
59340 +++ linux-2.6.39.4/kernel/relay.c 2011-08-05 19:44:37.000000000 -0400
59341 @@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
59342 };
59343 ssize_t ret;
59344
59345 + pax_track_stack();
59346 +
59347 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
59348 return 0;
59349 if (splice_grow_spd(pipe, &spd))
59350 diff -urNp linux-2.6.39.4/kernel/resource.c linux-2.6.39.4/kernel/resource.c
59351 --- linux-2.6.39.4/kernel/resource.c 2011-05-19 00:06:34.000000000 -0400
59352 +++ linux-2.6.39.4/kernel/resource.c 2011-08-05 19:44:37.000000000 -0400
59353 @@ -133,8 +133,18 @@ static const struct file_operations proc
59354
59355 static int __init ioresources_init(void)
59356 {
59357 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
59358 +#ifdef CONFIG_GRKERNSEC_PROC_USER
59359 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
59360 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
59361 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59362 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
59363 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
59364 +#endif
59365 +#else
59366 proc_create("ioports", 0, NULL, &proc_ioports_operations);
59367 proc_create("iomem", 0, NULL, &proc_iomem_operations);
59368 +#endif
59369 return 0;
59370 }
59371 __initcall(ioresources_init);
59372 diff -urNp linux-2.6.39.4/kernel/rtmutex-tester.c linux-2.6.39.4/kernel/rtmutex-tester.c
59373 --- linux-2.6.39.4/kernel/rtmutex-tester.c 2011-05-19 00:06:34.000000000 -0400
59374 +++ linux-2.6.39.4/kernel/rtmutex-tester.c 2011-08-05 19:44:37.000000000 -0400
59375 @@ -20,7 +20,7 @@
59376 #define MAX_RT_TEST_MUTEXES 8
59377
59378 static spinlock_t rttest_lock;
59379 -static atomic_t rttest_event;
59380 +static atomic_unchecked_t rttest_event;
59381
59382 struct test_thread_data {
59383 int opcode;
59384 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
59385
59386 case RTTEST_LOCKCONT:
59387 td->mutexes[td->opdata] = 1;
59388 - td->event = atomic_add_return(1, &rttest_event);
59389 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59390 return 0;
59391
59392 case RTTEST_RESET:
59393 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
59394 return 0;
59395
59396 case RTTEST_RESETEVENT:
59397 - atomic_set(&rttest_event, 0);
59398 + atomic_set_unchecked(&rttest_event, 0);
59399 return 0;
59400
59401 default:
59402 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
59403 return ret;
59404
59405 td->mutexes[id] = 1;
59406 - td->event = atomic_add_return(1, &rttest_event);
59407 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59408 rt_mutex_lock(&mutexes[id]);
59409 - td->event = atomic_add_return(1, &rttest_event);
59410 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59411 td->mutexes[id] = 4;
59412 return 0;
59413
59414 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
59415 return ret;
59416
59417 td->mutexes[id] = 1;
59418 - td->event = atomic_add_return(1, &rttest_event);
59419 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59420 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
59421 - td->event = atomic_add_return(1, &rttest_event);
59422 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59423 td->mutexes[id] = ret ? 0 : 4;
59424 return ret ? -EINTR : 0;
59425
59426 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
59427 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
59428 return ret;
59429
59430 - td->event = atomic_add_return(1, &rttest_event);
59431 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59432 rt_mutex_unlock(&mutexes[id]);
59433 - td->event = atomic_add_return(1, &rttest_event);
59434 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59435 td->mutexes[id] = 0;
59436 return 0;
59437
59438 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
59439 break;
59440
59441 td->mutexes[dat] = 2;
59442 - td->event = atomic_add_return(1, &rttest_event);
59443 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59444 break;
59445
59446 default:
59447 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
59448 return;
59449
59450 td->mutexes[dat] = 3;
59451 - td->event = atomic_add_return(1, &rttest_event);
59452 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59453 break;
59454
59455 case RTTEST_LOCKNOWAIT:
59456 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
59457 return;
59458
59459 td->mutexes[dat] = 1;
59460 - td->event = atomic_add_return(1, &rttest_event);
59461 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59462 return;
59463
59464 default:
59465 diff -urNp linux-2.6.39.4/kernel/sched_autogroup.c linux-2.6.39.4/kernel/sched_autogroup.c
59466 --- linux-2.6.39.4/kernel/sched_autogroup.c 2011-05-19 00:06:34.000000000 -0400
59467 +++ linux-2.6.39.4/kernel/sched_autogroup.c 2011-08-05 19:44:37.000000000 -0400
59468 @@ -7,7 +7,7 @@
59469
59470 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
59471 static struct autogroup autogroup_default;
59472 -static atomic_t autogroup_seq_nr;
59473 +static atomic_unchecked_t autogroup_seq_nr;
59474
59475 static void __init autogroup_init(struct task_struct *init_task)
59476 {
59477 @@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
59478
59479 kref_init(&ag->kref);
59480 init_rwsem(&ag->lock);
59481 - ag->id = atomic_inc_return(&autogroup_seq_nr);
59482 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
59483 ag->tg = tg;
59484 #ifdef CONFIG_RT_GROUP_SCHED
59485 /*
59486 diff -urNp linux-2.6.39.4/kernel/sched.c linux-2.6.39.4/kernel/sched.c
59487 --- linux-2.6.39.4/kernel/sched.c 2011-05-19 00:06:34.000000000 -0400
59488 +++ linux-2.6.39.4/kernel/sched.c 2011-08-05 19:44:37.000000000 -0400
59489 @@ -4078,6 +4078,8 @@ asmlinkage void __sched schedule(void)
59490 struct rq *rq;
59491 int cpu;
59492
59493 + pax_track_stack();
59494 +
59495 need_resched:
59496 preempt_disable();
59497 cpu = smp_processor_id();
59498 @@ -4165,7 +4167,7 @@ EXPORT_SYMBOL(schedule);
59499 * Look out! "owner" is an entirely speculative pointer
59500 * access and not reliable.
59501 */
59502 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
59503 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
59504 {
59505 unsigned int cpu;
59506 struct rq *rq;
59507 @@ -4179,10 +4181,10 @@ int mutex_spin_on_owner(struct mutex *lo
59508 * DEBUG_PAGEALLOC could have unmapped it if
59509 * the mutex owner just released it and exited.
59510 */
59511 - if (probe_kernel_address(&owner->cpu, cpu))
59512 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
59513 return 0;
59514 #else
59515 - cpu = owner->cpu;
59516 + cpu = task_thread_info(owner)->cpu;
59517 #endif
59518
59519 /*
59520 @@ -4219,7 +4221,7 @@ int mutex_spin_on_owner(struct mutex *lo
59521 /*
59522 * Is that owner really running on that cpu?
59523 */
59524 - if (task_thread_info(rq->curr) != owner || need_resched())
59525 + if (rq->curr != owner || need_resched())
59526 return 0;
59527
59528 arch_mutex_cpu_relax();
59529 @@ -4778,6 +4780,8 @@ int can_nice(const struct task_struct *p
59530 /* convert nice value [19,-20] to rlimit style value [1,40] */
59531 int nice_rlim = 20 - nice;
59532
59533 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
59534 +
59535 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
59536 capable(CAP_SYS_NICE));
59537 }
59538 @@ -4811,7 +4815,8 @@ SYSCALL_DEFINE1(nice, int, increment)
59539 if (nice > 19)
59540 nice = 19;
59541
59542 - if (increment < 0 && !can_nice(current, nice))
59543 + if (increment < 0 && (!can_nice(current, nice) ||
59544 + gr_handle_chroot_nice()))
59545 return -EPERM;
59546
59547 retval = security_task_setnice(current, nice);
59548 @@ -4957,6 +4962,7 @@ recheck:
59549 unsigned long rlim_rtprio =
59550 task_rlimit(p, RLIMIT_RTPRIO);
59551
59552 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
59553 /* can't set/change the rt policy */
59554 if (policy != p->policy && !rlim_rtprio)
59555 return -EPERM;
59556 @@ -7164,7 +7170,7 @@ static void init_sched_groups_power(int
59557 long power;
59558 int weight;
59559
59560 - WARN_ON(!sd || !sd->groups);
59561 + BUG_ON(!sd || !sd->groups);
59562
59563 if (cpu != group_first_cpu(sd->groups))
59564 return;
59565 diff -urNp linux-2.6.39.4/kernel/sched_fair.c linux-2.6.39.4/kernel/sched_fair.c
59566 --- linux-2.6.39.4/kernel/sched_fair.c 2011-05-19 00:06:34.000000000 -0400
59567 +++ linux-2.6.39.4/kernel/sched_fair.c 2011-08-05 19:44:37.000000000 -0400
59568 @@ -3999,7 +3999,7 @@ static void nohz_idle_balance(int this_c
59569 * run_rebalance_domains is triggered when needed from the scheduler tick.
59570 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
59571 */
59572 -static void run_rebalance_domains(struct softirq_action *h)
59573 +static void run_rebalance_domains(void)
59574 {
59575 int this_cpu = smp_processor_id();
59576 struct rq *this_rq = cpu_rq(this_cpu);
59577 diff -urNp linux-2.6.39.4/kernel/signal.c linux-2.6.39.4/kernel/signal.c
59578 --- linux-2.6.39.4/kernel/signal.c 2011-05-19 00:06:34.000000000 -0400
59579 +++ linux-2.6.39.4/kernel/signal.c 2011-08-16 21:16:33.000000000 -0400
59580 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
59581
59582 int print_fatal_signals __read_mostly;
59583
59584 -static void __user *sig_handler(struct task_struct *t, int sig)
59585 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
59586 {
59587 return t->sighand->action[sig - 1].sa.sa_handler;
59588 }
59589
59590 -static int sig_handler_ignored(void __user *handler, int sig)
59591 +static int sig_handler_ignored(__sighandler_t handler, int sig)
59592 {
59593 /* Is it explicitly or implicitly ignored? */
59594 return handler == SIG_IGN ||
59595 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
59596 static int sig_task_ignored(struct task_struct *t, int sig,
59597 int from_ancestor_ns)
59598 {
59599 - void __user *handler;
59600 + __sighandler_t handler;
59601
59602 handler = sig_handler(t, sig);
59603
59604 @@ -243,6 +243,9 @@ __sigqueue_alloc(int sig, struct task_st
59605 atomic_inc(&user->sigpending);
59606 rcu_read_unlock();
59607
59608 + if (!override_rlimit)
59609 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
59610 +
59611 if (override_rlimit ||
59612 atomic_read(&user->sigpending) <=
59613 task_rlimit(t, RLIMIT_SIGPENDING)) {
59614 @@ -367,7 +370,7 @@ flush_signal_handlers(struct task_struct
59615
59616 int unhandled_signal(struct task_struct *tsk, int sig)
59617 {
59618 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
59619 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
59620 if (is_global_init(tsk))
59621 return 1;
59622 if (handler != SIG_IGN && handler != SIG_DFL)
59623 @@ -693,6 +696,13 @@ static int check_kill_permission(int sig
59624 }
59625 }
59626
59627 + /* allow glibc communication via tgkill to other threads in our
59628 + thread group */
59629 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
59630 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
59631 + && gr_handle_signal(t, sig))
59632 + return -EPERM;
59633 +
59634 return security_task_kill(t, info, sig, 0);
59635 }
59636
59637 @@ -1041,7 +1051,7 @@ __group_send_sig_info(int sig, struct si
59638 return send_signal(sig, info, p, 1);
59639 }
59640
59641 -static int
59642 +int
59643 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
59644 {
59645 return send_signal(sig, info, t, 0);
59646 @@ -1078,6 +1088,7 @@ force_sig_info(int sig, struct siginfo *
59647 unsigned long int flags;
59648 int ret, blocked, ignored;
59649 struct k_sigaction *action;
59650 + int is_unhandled = 0;
59651
59652 spin_lock_irqsave(&t->sighand->siglock, flags);
59653 action = &t->sighand->action[sig-1];
59654 @@ -1092,9 +1103,18 @@ force_sig_info(int sig, struct siginfo *
59655 }
59656 if (action->sa.sa_handler == SIG_DFL)
59657 t->signal->flags &= ~SIGNAL_UNKILLABLE;
59658 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
59659 + is_unhandled = 1;
59660 ret = specific_send_sig_info(sig, info, t);
59661 spin_unlock_irqrestore(&t->sighand->siglock, flags);
59662
59663 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
59664 + normal operation */
59665 + if (is_unhandled) {
59666 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
59667 + gr_handle_crash(t, sig);
59668 + }
59669 +
59670 return ret;
59671 }
59672
59673 @@ -1153,8 +1173,11 @@ int group_send_sig_info(int sig, struct
59674 ret = check_kill_permission(sig, info, p);
59675 rcu_read_unlock();
59676
59677 - if (!ret && sig)
59678 + if (!ret && sig) {
59679 ret = do_send_sig_info(sig, info, p, true);
59680 + if (!ret)
59681 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
59682 + }
59683
59684 return ret;
59685 }
59686 @@ -1718,6 +1741,8 @@ void ptrace_notify(int exit_code)
59687 {
59688 siginfo_t info;
59689
59690 + pax_track_stack();
59691 +
59692 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
59693
59694 memset(&info, 0, sizeof info);
59695 @@ -2393,7 +2418,15 @@ do_send_specific(pid_t tgid, pid_t pid,
59696 int error = -ESRCH;
59697
59698 rcu_read_lock();
59699 - p = find_task_by_vpid(pid);
59700 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59701 + /* allow glibc communication via tgkill to other threads in our
59702 + thread group */
59703 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
59704 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
59705 + p = find_task_by_vpid_unrestricted(pid);
59706 + else
59707 +#endif
59708 + p = find_task_by_vpid(pid);
59709 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
59710 error = check_kill_permission(sig, info, p);
59711 /*
59712 diff -urNp linux-2.6.39.4/kernel/smp.c linux-2.6.39.4/kernel/smp.c
59713 --- linux-2.6.39.4/kernel/smp.c 2011-05-19 00:06:34.000000000 -0400
59714 +++ linux-2.6.39.4/kernel/smp.c 2011-08-05 19:44:37.000000000 -0400
59715 @@ -583,22 +583,22 @@ int smp_call_function(smp_call_func_t fu
59716 }
59717 EXPORT_SYMBOL(smp_call_function);
59718
59719 -void ipi_call_lock(void)
59720 +void ipi_call_lock(void) __acquires(call_function.lock)
59721 {
59722 raw_spin_lock(&call_function.lock);
59723 }
59724
59725 -void ipi_call_unlock(void)
59726 +void ipi_call_unlock(void) __releases(call_function.lock)
59727 {
59728 raw_spin_unlock(&call_function.lock);
59729 }
59730
59731 -void ipi_call_lock_irq(void)
59732 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
59733 {
59734 raw_spin_lock_irq(&call_function.lock);
59735 }
59736
59737 -void ipi_call_unlock_irq(void)
59738 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
59739 {
59740 raw_spin_unlock_irq(&call_function.lock);
59741 }
59742 diff -urNp linux-2.6.39.4/kernel/softirq.c linux-2.6.39.4/kernel/softirq.c
59743 --- linux-2.6.39.4/kernel/softirq.c 2011-05-19 00:06:34.000000000 -0400
59744 +++ linux-2.6.39.4/kernel/softirq.c 2011-08-05 20:34:06.000000000 -0400
59745 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
59746
59747 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59748
59749 -char *softirq_to_name[NR_SOFTIRQS] = {
59750 +const char * const softirq_to_name[NR_SOFTIRQS] = {
59751 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
59752 "TASKLET", "SCHED", "HRTIMER", "RCU"
59753 };
59754 @@ -235,7 +235,7 @@ restart:
59755 kstat_incr_softirqs_this_cpu(vec_nr);
59756
59757 trace_softirq_entry(vec_nr);
59758 - h->action(h);
59759 + h->action();
59760 trace_softirq_exit(vec_nr);
59761 if (unlikely(prev_count != preempt_count())) {
59762 printk(KERN_ERR "huh, entered softirq %u %s %p"
59763 @@ -377,9 +377,11 @@ void raise_softirq(unsigned int nr)
59764 local_irq_restore(flags);
59765 }
59766
59767 -void open_softirq(int nr, void (*action)(struct softirq_action *))
59768 +void open_softirq(int nr, void (*action)(void))
59769 {
59770 - softirq_vec[nr].action = action;
59771 + pax_open_kernel();
59772 + *(void **)&softirq_vec[nr].action = action;
59773 + pax_close_kernel();
59774 }
59775
59776 /*
59777 @@ -433,7 +435,7 @@ void __tasklet_hi_schedule_first(struct
59778
59779 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
59780
59781 -static void tasklet_action(struct softirq_action *a)
59782 +static void tasklet_action(void)
59783 {
59784 struct tasklet_struct *list;
59785
59786 @@ -468,7 +470,7 @@ static void tasklet_action(struct softir
59787 }
59788 }
59789
59790 -static void tasklet_hi_action(struct softirq_action *a)
59791 +static void tasklet_hi_action(void)
59792 {
59793 struct tasklet_struct *list;
59794
59795 diff -urNp linux-2.6.39.4/kernel/sys.c linux-2.6.39.4/kernel/sys.c
59796 --- linux-2.6.39.4/kernel/sys.c 2011-05-19 00:06:34.000000000 -0400
59797 +++ linux-2.6.39.4/kernel/sys.c 2011-08-05 19:44:37.000000000 -0400
59798 @@ -154,6 +154,12 @@ static int set_one_prio(struct task_stru
59799 error = -EACCES;
59800 goto out;
59801 }
59802 +
59803 + if (gr_handle_chroot_setpriority(p, niceval)) {
59804 + error = -EACCES;
59805 + goto out;
59806 + }
59807 +
59808 no_nice = security_task_setnice(p, niceval);
59809 if (no_nice) {
59810 error = no_nice;
59811 @@ -538,6 +544,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
59812 goto error;
59813 }
59814
59815 + if (gr_check_group_change(new->gid, new->egid, -1))
59816 + goto error;
59817 +
59818 if (rgid != (gid_t) -1 ||
59819 (egid != (gid_t) -1 && egid != old->gid))
59820 new->sgid = new->egid;
59821 @@ -567,6 +576,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
59822 old = current_cred();
59823
59824 retval = -EPERM;
59825 +
59826 + if (gr_check_group_change(gid, gid, gid))
59827 + goto error;
59828 +
59829 if (nsown_capable(CAP_SETGID))
59830 new->gid = new->egid = new->sgid = new->fsgid = gid;
59831 else if (gid == old->gid || gid == old->sgid)
59832 @@ -647,6 +660,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
59833 goto error;
59834 }
59835
59836 + if (gr_check_user_change(new->uid, new->euid, -1))
59837 + goto error;
59838 +
59839 if (new->uid != old->uid) {
59840 retval = set_user(new);
59841 if (retval < 0)
59842 @@ -691,6 +707,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
59843 old = current_cred();
59844
59845 retval = -EPERM;
59846 +
59847 + if (gr_check_crash_uid(uid))
59848 + goto error;
59849 + if (gr_check_user_change(uid, uid, uid))
59850 + goto error;
59851 +
59852 if (nsown_capable(CAP_SETUID)) {
59853 new->suid = new->uid = uid;
59854 if (uid != old->uid) {
59855 @@ -745,6 +767,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
59856 goto error;
59857 }
59858
59859 + if (gr_check_user_change(ruid, euid, -1))
59860 + goto error;
59861 +
59862 if (ruid != (uid_t) -1) {
59863 new->uid = ruid;
59864 if (ruid != old->uid) {
59865 @@ -809,6 +834,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
59866 goto error;
59867 }
59868
59869 + if (gr_check_group_change(rgid, egid, -1))
59870 + goto error;
59871 +
59872 if (rgid != (gid_t) -1)
59873 new->gid = rgid;
59874 if (egid != (gid_t) -1)
59875 @@ -855,6 +883,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
59876 old = current_cred();
59877 old_fsuid = old->fsuid;
59878
59879 + if (gr_check_user_change(-1, -1, uid))
59880 + goto error;
59881 +
59882 if (uid == old->uid || uid == old->euid ||
59883 uid == old->suid || uid == old->fsuid ||
59884 nsown_capable(CAP_SETUID)) {
59885 @@ -865,6 +896,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
59886 }
59887 }
59888
59889 +error:
59890 abort_creds(new);
59891 return old_fsuid;
59892
59893 @@ -891,12 +923,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
59894 if (gid == old->gid || gid == old->egid ||
59895 gid == old->sgid || gid == old->fsgid ||
59896 nsown_capable(CAP_SETGID)) {
59897 + if (gr_check_group_change(-1, -1, gid))
59898 + goto error;
59899 +
59900 if (gid != old_fsgid) {
59901 new->fsgid = gid;
59902 goto change_okay;
59903 }
59904 }
59905
59906 +error:
59907 abort_creds(new);
59908 return old_fsgid;
59909
59910 @@ -1643,7 +1679,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
59911 error = get_dumpable(me->mm);
59912 break;
59913 case PR_SET_DUMPABLE:
59914 - if (arg2 < 0 || arg2 > 1) {
59915 + if (arg2 > 1) {
59916 error = -EINVAL;
59917 break;
59918 }
59919 diff -urNp linux-2.6.39.4/kernel/sysctl.c linux-2.6.39.4/kernel/sysctl.c
59920 --- linux-2.6.39.4/kernel/sysctl.c 2011-05-19 00:06:34.000000000 -0400
59921 +++ linux-2.6.39.4/kernel/sysctl.c 2011-08-05 19:44:37.000000000 -0400
59922 @@ -84,6 +84,13 @@
59923
59924
59925 #if defined(CONFIG_SYSCTL)
59926 +#include <linux/grsecurity.h>
59927 +#include <linux/grinternal.h>
59928 +
59929 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
59930 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
59931 + const int op);
59932 +extern int gr_handle_chroot_sysctl(const int op);
59933
59934 /* External variables not in a header file. */
59935 extern int sysctl_overcommit_memory;
59936 @@ -196,6 +203,7 @@ static int sysrq_sysctl_handler(ctl_tabl
59937 }
59938
59939 #endif
59940 +extern struct ctl_table grsecurity_table[];
59941
59942 static struct ctl_table root_table[];
59943 static struct ctl_table_root sysctl_table_root;
59944 @@ -225,6 +233,20 @@ extern struct ctl_table epoll_table[];
59945 int sysctl_legacy_va_layout;
59946 #endif
59947
59948 +#ifdef CONFIG_PAX_SOFTMODE
59949 +static ctl_table pax_table[] = {
59950 + {
59951 + .procname = "softmode",
59952 + .data = &pax_softmode,
59953 + .maxlen = sizeof(unsigned int),
59954 + .mode = 0600,
59955 + .proc_handler = &proc_dointvec,
59956 + },
59957 +
59958 + { }
59959 +};
59960 +#endif
59961 +
59962 /* The default sysctl tables: */
59963
59964 static struct ctl_table root_table[] = {
59965 @@ -271,6 +293,22 @@ static int max_extfrag_threshold = 1000;
59966 #endif
59967
59968 static struct ctl_table kern_table[] = {
59969 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
59970 + {
59971 + .procname = "grsecurity",
59972 + .mode = 0500,
59973 + .child = grsecurity_table,
59974 + },
59975 +#endif
59976 +
59977 +#ifdef CONFIG_PAX_SOFTMODE
59978 + {
59979 + .procname = "pax",
59980 + .mode = 0500,
59981 + .child = pax_table,
59982 + },
59983 +#endif
59984 +
59985 {
59986 .procname = "sched_child_runs_first",
59987 .data = &sysctl_sched_child_runs_first,
59988 @@ -545,7 +583,7 @@ static struct ctl_table kern_table[] = {
59989 .data = &modprobe_path,
59990 .maxlen = KMOD_PATH_LEN,
59991 .mode = 0644,
59992 - .proc_handler = proc_dostring,
59993 + .proc_handler = proc_dostring_modpriv,
59994 },
59995 {
59996 .procname = "modules_disabled",
59997 @@ -707,16 +745,20 @@ static struct ctl_table kern_table[] = {
59998 .extra1 = &zero,
59999 .extra2 = &one,
60000 },
60001 +#endif
60002 {
60003 .procname = "kptr_restrict",
60004 .data = &kptr_restrict,
60005 .maxlen = sizeof(int),
60006 .mode = 0644,
60007 .proc_handler = proc_dmesg_restrict,
60008 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60009 + .extra1 = &two,
60010 +#else
60011 .extra1 = &zero,
60012 +#endif
60013 .extra2 = &two,
60014 },
60015 -#endif
60016 {
60017 .procname = "ngroups_max",
60018 .data = &ngroups_max,
60019 @@ -1189,6 +1231,13 @@ static struct ctl_table vm_table[] = {
60020 .proc_handler = proc_dointvec_minmax,
60021 .extra1 = &zero,
60022 },
60023 + {
60024 + .procname = "heap_stack_gap",
60025 + .data = &sysctl_heap_stack_gap,
60026 + .maxlen = sizeof(sysctl_heap_stack_gap),
60027 + .mode = 0644,
60028 + .proc_handler = proc_doulongvec_minmax,
60029 + },
60030 #else
60031 {
60032 .procname = "nr_trim_pages",
60033 @@ -1698,6 +1747,17 @@ static int test_perm(int mode, int op)
60034 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
60035 {
60036 int mode;
60037 + int error;
60038 +
60039 + if (table->parent != NULL && table->parent->procname != NULL &&
60040 + table->procname != NULL &&
60041 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
60042 + return -EACCES;
60043 + if (gr_handle_chroot_sysctl(op))
60044 + return -EACCES;
60045 + error = gr_handle_sysctl(table, op);
60046 + if (error)
60047 + return error;
60048
60049 if (root->permissions)
60050 mode = root->permissions(root, current->nsproxy, table);
60051 @@ -2102,6 +2162,16 @@ int proc_dostring(struct ctl_table *tabl
60052 buffer, lenp, ppos);
60053 }
60054
60055 +int proc_dostring_modpriv(struct ctl_table *table, int write,
60056 + void __user *buffer, size_t *lenp, loff_t *ppos)
60057 +{
60058 + if (write && !capable(CAP_SYS_MODULE))
60059 + return -EPERM;
60060 +
60061 + return _proc_do_string(table->data, table->maxlen, write,
60062 + buffer, lenp, ppos);
60063 +}
60064 +
60065 static size_t proc_skip_spaces(char **buf)
60066 {
60067 size_t ret;
60068 @@ -2207,6 +2277,8 @@ static int proc_put_long(void __user **b
60069 len = strlen(tmp);
60070 if (len > *size)
60071 len = *size;
60072 + if (len > sizeof(tmp))
60073 + len = sizeof(tmp);
60074 if (copy_to_user(*buf, tmp, len))
60075 return -EFAULT;
60076 *size -= len;
60077 @@ -2523,8 +2595,11 @@ static int __do_proc_doulongvec_minmax(v
60078 *i = val;
60079 } else {
60080 val = convdiv * (*i) / convmul;
60081 - if (!first)
60082 + if (!first) {
60083 err = proc_put_char(&buffer, &left, '\t');
60084 + if (err)
60085 + break;
60086 + }
60087 err = proc_put_long(&buffer, &left, val, false);
60088 if (err)
60089 break;
60090 @@ -2919,6 +2994,12 @@ int proc_dostring(struct ctl_table *tabl
60091 return -ENOSYS;
60092 }
60093
60094 +int proc_dostring_modpriv(struct ctl_table *table, int write,
60095 + void __user *buffer, size_t *lenp, loff_t *ppos)
60096 +{
60097 + return -ENOSYS;
60098 +}
60099 +
60100 int proc_dointvec(struct ctl_table *table, int write,
60101 void __user *buffer, size_t *lenp, loff_t *ppos)
60102 {
60103 @@ -2975,6 +3056,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
60104 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
60105 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
60106 EXPORT_SYMBOL(proc_dostring);
60107 +EXPORT_SYMBOL(proc_dostring_modpriv);
60108 EXPORT_SYMBOL(proc_doulongvec_minmax);
60109 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
60110 EXPORT_SYMBOL(register_sysctl_table);
60111 diff -urNp linux-2.6.39.4/kernel/sysctl_check.c linux-2.6.39.4/kernel/sysctl_check.c
60112 --- linux-2.6.39.4/kernel/sysctl_check.c 2011-05-19 00:06:34.000000000 -0400
60113 +++ linux-2.6.39.4/kernel/sysctl_check.c 2011-08-05 19:44:37.000000000 -0400
60114 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
60115 set_fail(&fail, table, "Directory with extra2");
60116 } else {
60117 if ((table->proc_handler == proc_dostring) ||
60118 + (table->proc_handler == proc_dostring_modpriv) ||
60119 (table->proc_handler == proc_dointvec) ||
60120 (table->proc_handler == proc_dointvec_minmax) ||
60121 (table->proc_handler == proc_dointvec_jiffies) ||
60122 diff -urNp linux-2.6.39.4/kernel/taskstats.c linux-2.6.39.4/kernel/taskstats.c
60123 --- linux-2.6.39.4/kernel/taskstats.c 2011-07-09 09:18:51.000000000 -0400
60124 +++ linux-2.6.39.4/kernel/taskstats.c 2011-08-05 19:44:37.000000000 -0400
60125 @@ -27,9 +27,12 @@
60126 #include <linux/cgroup.h>
60127 #include <linux/fs.h>
60128 #include <linux/file.h>
60129 +#include <linux/grsecurity.h>
60130 #include <net/genetlink.h>
60131 #include <asm/atomic.h>
60132
60133 +extern int gr_is_taskstats_denied(int pid);
60134 +
60135 /*
60136 * Maximum length of a cpumask that can be specified in
60137 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
60138 @@ -558,6 +561,9 @@ err:
60139
60140 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
60141 {
60142 + if (gr_is_taskstats_denied(current->pid))
60143 + return -EACCES;
60144 +
60145 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
60146 return cmd_attr_register_cpumask(info);
60147 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
60148 diff -urNp linux-2.6.39.4/kernel/time/tick-broadcast.c linux-2.6.39.4/kernel/time/tick-broadcast.c
60149 --- linux-2.6.39.4/kernel/time/tick-broadcast.c 2011-05-19 00:06:34.000000000 -0400
60150 +++ linux-2.6.39.4/kernel/time/tick-broadcast.c 2011-08-05 19:44:37.000000000 -0400
60151 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
60152 * then clear the broadcast bit.
60153 */
60154 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
60155 - int cpu = smp_processor_id();
60156 + cpu = smp_processor_id();
60157
60158 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
60159 tick_broadcast_clear_oneshot(cpu);
60160 diff -urNp linux-2.6.39.4/kernel/time/timekeeping.c linux-2.6.39.4/kernel/time/timekeeping.c
60161 --- linux-2.6.39.4/kernel/time/timekeeping.c 2011-05-19 00:06:34.000000000 -0400
60162 +++ linux-2.6.39.4/kernel/time/timekeeping.c 2011-08-05 19:44:37.000000000 -0400
60163 @@ -14,6 +14,7 @@
60164 #include <linux/init.h>
60165 #include <linux/mm.h>
60166 #include <linux/sched.h>
60167 +#include <linux/grsecurity.h>
60168 #include <linux/syscore_ops.h>
60169 #include <linux/clocksource.h>
60170 #include <linux/jiffies.h>
60171 @@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
60172 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
60173 return -EINVAL;
60174
60175 + gr_log_timechange();
60176 +
60177 write_seqlock_irqsave(&xtime_lock, flags);
60178
60179 timekeeping_forward_now();
60180 diff -urNp linux-2.6.39.4/kernel/time/timer_list.c linux-2.6.39.4/kernel/time/timer_list.c
60181 --- linux-2.6.39.4/kernel/time/timer_list.c 2011-05-19 00:06:34.000000000 -0400
60182 +++ linux-2.6.39.4/kernel/time/timer_list.c 2011-08-05 19:44:37.000000000 -0400
60183 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
60184
60185 static void print_name_offset(struct seq_file *m, void *sym)
60186 {
60187 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60188 + SEQ_printf(m, "<%p>", NULL);
60189 +#else
60190 char symname[KSYM_NAME_LEN];
60191
60192 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
60193 SEQ_printf(m, "<%pK>", sym);
60194 else
60195 SEQ_printf(m, "%s", symname);
60196 +#endif
60197 }
60198
60199 static void
60200 @@ -112,7 +116,11 @@ next_one:
60201 static void
60202 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
60203 {
60204 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60205 + SEQ_printf(m, " .base: %p\n", NULL);
60206 +#else
60207 SEQ_printf(m, " .base: %pK\n", base);
60208 +#endif
60209 SEQ_printf(m, " .index: %d\n",
60210 base->index);
60211 SEQ_printf(m, " .resolution: %Lu nsecs\n",
60212 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
60213 {
60214 struct proc_dir_entry *pe;
60215
60216 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
60217 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
60218 +#else
60219 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
60220 +#endif
60221 if (!pe)
60222 return -ENOMEM;
60223 return 0;
60224 diff -urNp linux-2.6.39.4/kernel/time/timer_stats.c linux-2.6.39.4/kernel/time/timer_stats.c
60225 --- linux-2.6.39.4/kernel/time/timer_stats.c 2011-05-19 00:06:34.000000000 -0400
60226 +++ linux-2.6.39.4/kernel/time/timer_stats.c 2011-08-05 19:44:37.000000000 -0400
60227 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
60228 static unsigned long nr_entries;
60229 static struct entry entries[MAX_ENTRIES];
60230
60231 -static atomic_t overflow_count;
60232 +static atomic_unchecked_t overflow_count;
60233
60234 /*
60235 * The entries are in a hash-table, for fast lookup:
60236 @@ -140,7 +140,7 @@ static void reset_entries(void)
60237 nr_entries = 0;
60238 memset(entries, 0, sizeof(entries));
60239 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
60240 - atomic_set(&overflow_count, 0);
60241 + atomic_set_unchecked(&overflow_count, 0);
60242 }
60243
60244 static struct entry *alloc_entry(void)
60245 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
60246 if (likely(entry))
60247 entry->count++;
60248 else
60249 - atomic_inc(&overflow_count);
60250 + atomic_inc_unchecked(&overflow_count);
60251
60252 out_unlock:
60253 raw_spin_unlock_irqrestore(lock, flags);
60254 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
60255
60256 static void print_name_offset(struct seq_file *m, unsigned long addr)
60257 {
60258 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60259 + seq_printf(m, "<%p>", NULL);
60260 +#else
60261 char symname[KSYM_NAME_LEN];
60262
60263 if (lookup_symbol_name(addr, symname) < 0)
60264 seq_printf(m, "<%p>", (void *)addr);
60265 else
60266 seq_printf(m, "%s", symname);
60267 +#endif
60268 }
60269
60270 static int tstats_show(struct seq_file *m, void *v)
60271 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
60272
60273 seq_puts(m, "Timer Stats Version: v0.2\n");
60274 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
60275 - if (atomic_read(&overflow_count))
60276 + if (atomic_read_unchecked(&overflow_count))
60277 seq_printf(m, "Overflow: %d entries\n",
60278 - atomic_read(&overflow_count));
60279 + atomic_read_unchecked(&overflow_count));
60280
60281 for (i = 0; i < nr_entries; i++) {
60282 entry = entries + i;
60283 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
60284 {
60285 struct proc_dir_entry *pe;
60286
60287 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
60288 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
60289 +#else
60290 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
60291 +#endif
60292 if (!pe)
60293 return -ENOMEM;
60294 return 0;
60295 diff -urNp linux-2.6.39.4/kernel/time.c linux-2.6.39.4/kernel/time.c
60296 --- linux-2.6.39.4/kernel/time.c 2011-05-19 00:06:34.000000000 -0400
60297 +++ linux-2.6.39.4/kernel/time.c 2011-08-05 19:44:37.000000000 -0400
60298 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
60299 return error;
60300
60301 if (tz) {
60302 + /* we log in do_settimeofday called below, so don't log twice
60303 + */
60304 + if (!tv)
60305 + gr_log_timechange();
60306 +
60307 /* SMP safe, global irq locking makes it work. */
60308 sys_tz = *tz;
60309 update_vsyscall_tz();
60310 diff -urNp linux-2.6.39.4/kernel/timer.c linux-2.6.39.4/kernel/timer.c
60311 --- linux-2.6.39.4/kernel/timer.c 2011-05-19 00:06:34.000000000 -0400
60312 +++ linux-2.6.39.4/kernel/timer.c 2011-08-05 19:44:37.000000000 -0400
60313 @@ -1305,7 +1305,7 @@ void update_process_times(int user_tick)
60314 /*
60315 * This function runs timers and the timer-tq in bottom half context.
60316 */
60317 -static void run_timer_softirq(struct softirq_action *h)
60318 +static void run_timer_softirq(void)
60319 {
60320 struct tvec_base *base = __this_cpu_read(tvec_bases);
60321
60322 diff -urNp linux-2.6.39.4/kernel/trace/blktrace.c linux-2.6.39.4/kernel/trace/blktrace.c
60323 --- linux-2.6.39.4/kernel/trace/blktrace.c 2011-05-19 00:06:34.000000000 -0400
60324 +++ linux-2.6.39.4/kernel/trace/blktrace.c 2011-08-05 19:44:37.000000000 -0400
60325 @@ -321,7 +321,7 @@ static ssize_t blk_dropped_read(struct f
60326 struct blk_trace *bt = filp->private_data;
60327 char buf[16];
60328
60329 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
60330 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
60331
60332 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
60333 }
60334 @@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
60335 return 1;
60336
60337 bt = buf->chan->private_data;
60338 - atomic_inc(&bt->dropped);
60339 + atomic_inc_unchecked(&bt->dropped);
60340 return 0;
60341 }
60342
60343 @@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
60344
60345 bt->dir = dir;
60346 bt->dev = dev;
60347 - atomic_set(&bt->dropped, 0);
60348 + atomic_set_unchecked(&bt->dropped, 0);
60349
60350 ret = -EIO;
60351 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
60352 diff -urNp linux-2.6.39.4/kernel/trace/ftrace.c linux-2.6.39.4/kernel/trace/ftrace.c
60353 --- linux-2.6.39.4/kernel/trace/ftrace.c 2011-06-03 00:04:14.000000000 -0400
60354 +++ linux-2.6.39.4/kernel/trace/ftrace.c 2011-08-05 20:34:06.000000000 -0400
60355 @@ -1107,13 +1107,18 @@ ftrace_code_disable(struct module *mod,
60356
60357 ip = rec->ip;
60358
60359 + ret = ftrace_arch_code_modify_prepare();
60360 + FTRACE_WARN_ON(ret);
60361 + if (ret)
60362 + return 0;
60363 +
60364 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
60365 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
60366 if (ret) {
60367 ftrace_bug(ret, ip);
60368 rec->flags |= FTRACE_FL_FAILED;
60369 - return 0;
60370 }
60371 - return 1;
60372 + return ret ? 0 : 1;
60373 }
60374
60375 /*
60376 @@ -2011,7 +2016,7 @@ static void ftrace_free_entry_rcu(struct
60377
60378 int
60379 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
60380 - void *data)
60381 + void *data)
60382 {
60383 struct ftrace_func_probe *entry;
60384 struct ftrace_page *pg;
60385 diff -urNp linux-2.6.39.4/kernel/trace/trace.c linux-2.6.39.4/kernel/trace/trace.c
60386 --- linux-2.6.39.4/kernel/trace/trace.c 2011-05-19 00:06:34.000000000 -0400
60387 +++ linux-2.6.39.4/kernel/trace/trace.c 2011-08-05 19:44:37.000000000 -0400
60388 @@ -3330,6 +3330,8 @@ static ssize_t tracing_splice_read_pipe(
60389 size_t rem;
60390 unsigned int i;
60391
60392 + pax_track_stack();
60393 +
60394 if (splice_grow_spd(pipe, &spd))
60395 return -ENOMEM;
60396
60397 @@ -3813,6 +3815,8 @@ tracing_buffers_splice_read(struct file
60398 int entries, size, i;
60399 size_t ret;
60400
60401 + pax_track_stack();
60402 +
60403 if (splice_grow_spd(pipe, &spd))
60404 return -ENOMEM;
60405
60406 @@ -3981,10 +3985,9 @@ static const struct file_operations trac
60407 };
60408 #endif
60409
60410 -static struct dentry *d_tracer;
60411 -
60412 struct dentry *tracing_init_dentry(void)
60413 {
60414 + static struct dentry *d_tracer;
60415 static int once;
60416
60417 if (d_tracer)
60418 @@ -4004,10 +4007,9 @@ struct dentry *tracing_init_dentry(void)
60419 return d_tracer;
60420 }
60421
60422 -static struct dentry *d_percpu;
60423 -
60424 struct dentry *tracing_dentry_percpu(void)
60425 {
60426 + static struct dentry *d_percpu;
60427 static int once;
60428 struct dentry *d_tracer;
60429
60430 diff -urNp linux-2.6.39.4/kernel/trace/trace_events.c linux-2.6.39.4/kernel/trace/trace_events.c
60431 --- linux-2.6.39.4/kernel/trace/trace_events.c 2011-05-19 00:06:34.000000000 -0400
60432 +++ linux-2.6.39.4/kernel/trace/trace_events.c 2011-08-05 20:34:06.000000000 -0400
60433 @@ -1241,10 +1241,6 @@ static LIST_HEAD(ftrace_module_file_list
60434 struct ftrace_module_file_ops {
60435 struct list_head list;
60436 struct module *mod;
60437 - struct file_operations id;
60438 - struct file_operations enable;
60439 - struct file_operations format;
60440 - struct file_operations filter;
60441 };
60442
60443 static struct ftrace_module_file_ops *
60444 @@ -1265,17 +1261,12 @@ trace_create_file_ops(struct module *mod
60445
60446 file_ops->mod = mod;
60447
60448 - file_ops->id = ftrace_event_id_fops;
60449 - file_ops->id.owner = mod;
60450 -
60451 - file_ops->enable = ftrace_enable_fops;
60452 - file_ops->enable.owner = mod;
60453 -
60454 - file_ops->filter = ftrace_event_filter_fops;
60455 - file_ops->filter.owner = mod;
60456 -
60457 - file_ops->format = ftrace_event_format_fops;
60458 - file_ops->format.owner = mod;
60459 + pax_open_kernel();
60460 + *(void **)&mod->trace_id.owner = mod;
60461 + *(void **)&mod->trace_enable.owner = mod;
60462 + *(void **)&mod->trace_filter.owner = mod;
60463 + *(void **)&mod->trace_format.owner = mod;
60464 + pax_close_kernel();
60465
60466 list_add(&file_ops->list, &ftrace_module_file_list);
60467
60468 @@ -1299,8 +1290,8 @@ static void trace_module_add_events(stru
60469
60470 for_each_event(call, start, end) {
60471 __trace_add_event_call(*call, mod,
60472 - &file_ops->id, &file_ops->enable,
60473 - &file_ops->filter, &file_ops->format);
60474 + &mod->trace_id, &mod->trace_enable,
60475 + &mod->trace_filter, &mod->trace_format);
60476 }
60477 }
60478
60479 diff -urNp linux-2.6.39.4/kernel/trace/trace_mmiotrace.c linux-2.6.39.4/kernel/trace/trace_mmiotrace.c
60480 --- linux-2.6.39.4/kernel/trace/trace_mmiotrace.c 2011-05-19 00:06:34.000000000 -0400
60481 +++ linux-2.6.39.4/kernel/trace/trace_mmiotrace.c 2011-08-05 19:44:37.000000000 -0400
60482 @@ -24,7 +24,7 @@ struct header_iter {
60483 static struct trace_array *mmio_trace_array;
60484 static bool overrun_detected;
60485 static unsigned long prev_overruns;
60486 -static atomic_t dropped_count;
60487 +static atomic_unchecked_t dropped_count;
60488
60489 static void mmio_reset_data(struct trace_array *tr)
60490 {
60491 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
60492
60493 static unsigned long count_overruns(struct trace_iterator *iter)
60494 {
60495 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
60496 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
60497 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
60498
60499 if (over > prev_overruns)
60500 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
60501 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
60502 sizeof(*entry), 0, pc);
60503 if (!event) {
60504 - atomic_inc(&dropped_count);
60505 + atomic_inc_unchecked(&dropped_count);
60506 return;
60507 }
60508 entry = ring_buffer_event_data(event);
60509 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
60510 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
60511 sizeof(*entry), 0, pc);
60512 if (!event) {
60513 - atomic_inc(&dropped_count);
60514 + atomic_inc_unchecked(&dropped_count);
60515 return;
60516 }
60517 entry = ring_buffer_event_data(event);
60518 diff -urNp linux-2.6.39.4/kernel/trace/trace_output.c linux-2.6.39.4/kernel/trace/trace_output.c
60519 --- linux-2.6.39.4/kernel/trace/trace_output.c 2011-05-19 00:06:34.000000000 -0400
60520 +++ linux-2.6.39.4/kernel/trace/trace_output.c 2011-08-05 19:44:37.000000000 -0400
60521 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
60522
60523 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
60524 if (!IS_ERR(p)) {
60525 - p = mangle_path(s->buffer + s->len, p, "\n");
60526 + p = mangle_path(s->buffer + s->len, p, "\n\\");
60527 if (p) {
60528 s->len = p - s->buffer;
60529 return 1;
60530 diff -urNp linux-2.6.39.4/kernel/trace/trace_stack.c linux-2.6.39.4/kernel/trace/trace_stack.c
60531 --- linux-2.6.39.4/kernel/trace/trace_stack.c 2011-05-19 00:06:34.000000000 -0400
60532 +++ linux-2.6.39.4/kernel/trace/trace_stack.c 2011-08-05 19:44:37.000000000 -0400
60533 @@ -50,7 +50,7 @@ static inline void check_stack(void)
60534 return;
60535
60536 /* we do not handle interrupt stacks yet */
60537 - if (!object_is_on_stack(&this_size))
60538 + if (!object_starts_on_stack(&this_size))
60539 return;
60540
60541 local_irq_save(flags);
60542 diff -urNp linux-2.6.39.4/kernel/trace/trace_workqueue.c linux-2.6.39.4/kernel/trace/trace_workqueue.c
60543 --- linux-2.6.39.4/kernel/trace/trace_workqueue.c 2011-05-19 00:06:34.000000000 -0400
60544 +++ linux-2.6.39.4/kernel/trace/trace_workqueue.c 2011-08-05 19:44:37.000000000 -0400
60545 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
60546 int cpu;
60547 pid_t pid;
60548 /* Can be inserted from interrupt or user context, need to be atomic */
60549 - atomic_t inserted;
60550 + atomic_unchecked_t inserted;
60551 /*
60552 * Don't need to be atomic, works are serialized in a single workqueue thread
60553 * on a single CPU.
60554 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
60555 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
60556 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
60557 if (node->pid == wq_thread->pid) {
60558 - atomic_inc(&node->inserted);
60559 + atomic_inc_unchecked(&node->inserted);
60560 goto found;
60561 }
60562 }
60563 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
60564 tsk = get_pid_task(pid, PIDTYPE_PID);
60565 if (tsk) {
60566 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
60567 - atomic_read(&cws->inserted), cws->executed,
60568 + atomic_read_unchecked(&cws->inserted), cws->executed,
60569 tsk->comm);
60570 put_task_struct(tsk);
60571 }
60572 diff -urNp linux-2.6.39.4/lib/bug.c linux-2.6.39.4/lib/bug.c
60573 --- linux-2.6.39.4/lib/bug.c 2011-05-19 00:06:34.000000000 -0400
60574 +++ linux-2.6.39.4/lib/bug.c 2011-08-05 19:44:37.000000000 -0400
60575 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
60576 return BUG_TRAP_TYPE_NONE;
60577
60578 bug = find_bug(bugaddr);
60579 + if (!bug)
60580 + return BUG_TRAP_TYPE_NONE;
60581
60582 file = NULL;
60583 line = 0;
60584 diff -urNp linux-2.6.39.4/lib/debugobjects.c linux-2.6.39.4/lib/debugobjects.c
60585 --- linux-2.6.39.4/lib/debugobjects.c 2011-07-09 09:18:51.000000000 -0400
60586 +++ linux-2.6.39.4/lib/debugobjects.c 2011-08-05 19:44:37.000000000 -0400
60587 @@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
60588 if (limit > 4)
60589 return;
60590
60591 - is_on_stack = object_is_on_stack(addr);
60592 + is_on_stack = object_starts_on_stack(addr);
60593 if (is_on_stack == onstack)
60594 return;
60595
60596 diff -urNp linux-2.6.39.4/lib/dma-debug.c linux-2.6.39.4/lib/dma-debug.c
60597 --- linux-2.6.39.4/lib/dma-debug.c 2011-05-19 00:06:34.000000000 -0400
60598 +++ linux-2.6.39.4/lib/dma-debug.c 2011-08-05 19:44:37.000000000 -0400
60599 @@ -862,7 +862,7 @@ out:
60600
60601 static void check_for_stack(struct device *dev, void *addr)
60602 {
60603 - if (object_is_on_stack(addr))
60604 + if (object_starts_on_stack(addr))
60605 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
60606 "stack [addr=%p]\n", addr);
60607 }
60608 diff -urNp linux-2.6.39.4/lib/inflate.c linux-2.6.39.4/lib/inflate.c
60609 --- linux-2.6.39.4/lib/inflate.c 2011-05-19 00:06:34.000000000 -0400
60610 +++ linux-2.6.39.4/lib/inflate.c 2011-08-05 19:44:37.000000000 -0400
60611 @@ -269,7 +269,7 @@ static void free(void *where)
60612 malloc_ptr = free_mem_ptr;
60613 }
60614 #else
60615 -#define malloc(a) kmalloc(a, GFP_KERNEL)
60616 +#define malloc(a) kmalloc((a), GFP_KERNEL)
60617 #define free(a) kfree(a)
60618 #endif
60619
60620 diff -urNp linux-2.6.39.4/lib/Kconfig.debug linux-2.6.39.4/lib/Kconfig.debug
60621 --- linux-2.6.39.4/lib/Kconfig.debug 2011-05-19 00:06:34.000000000 -0400
60622 +++ linux-2.6.39.4/lib/Kconfig.debug 2011-08-05 19:44:37.000000000 -0400
60623 @@ -1078,6 +1078,7 @@ config LATENCYTOP
60624 depends on DEBUG_KERNEL
60625 depends on STACKTRACE_SUPPORT
60626 depends on PROC_FS
60627 + depends on !GRKERNSEC_HIDESYM
60628 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
60629 select KALLSYMS
60630 select KALLSYMS_ALL
60631 diff -urNp linux-2.6.39.4/lib/kref.c linux-2.6.39.4/lib/kref.c
60632 --- linux-2.6.39.4/lib/kref.c 2011-05-19 00:06:34.000000000 -0400
60633 +++ linux-2.6.39.4/lib/kref.c 2011-08-05 19:44:37.000000000 -0400
60634 @@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
60635 */
60636 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
60637 {
60638 - WARN_ON(release == NULL);
60639 + BUG_ON(release == NULL);
60640 WARN_ON(release == (void (*)(struct kref *))kfree);
60641
60642 if (atomic_dec_and_test(&kref->refcount)) {
60643 diff -urNp linux-2.6.39.4/lib/radix-tree.c linux-2.6.39.4/lib/radix-tree.c
60644 --- linux-2.6.39.4/lib/radix-tree.c 2011-05-19 00:06:34.000000000 -0400
60645 +++ linux-2.6.39.4/lib/radix-tree.c 2011-08-05 19:44:37.000000000 -0400
60646 @@ -80,7 +80,7 @@ struct radix_tree_preload {
60647 int nr;
60648 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
60649 };
60650 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
60651 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
60652
60653 static inline void *ptr_to_indirect(void *ptr)
60654 {
60655 diff -urNp linux-2.6.39.4/lib/vsprintf.c linux-2.6.39.4/lib/vsprintf.c
60656 --- linux-2.6.39.4/lib/vsprintf.c 2011-05-19 00:06:34.000000000 -0400
60657 +++ linux-2.6.39.4/lib/vsprintf.c 2011-08-05 19:44:37.000000000 -0400
60658 @@ -16,6 +16,9 @@
60659 * - scnprintf and vscnprintf
60660 */
60661
60662 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60663 +#define __INCLUDED_BY_HIDESYM 1
60664 +#endif
60665 #include <stdarg.h>
60666 #include <linux/module.h>
60667 #include <linux/types.h>
60668 @@ -435,7 +438,7 @@ char *symbol_string(char *buf, char *end
60669 char sym[KSYM_SYMBOL_LEN];
60670 if (ext == 'B')
60671 sprint_backtrace(sym, value);
60672 - else if (ext != 'f' && ext != 's')
60673 + else if (ext != 'f' && ext != 's' && ext != 'a')
60674 sprint_symbol(sym, value);
60675 else
60676 kallsyms_lookup(value, NULL, NULL, NULL, sym);
60677 @@ -797,7 +800,11 @@ char *uuid_string(char *buf, char *end,
60678 return string(buf, end, uuid, spec);
60679 }
60680
60681 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60682 +int kptr_restrict __read_mostly = 2;
60683 +#else
60684 int kptr_restrict __read_mostly;
60685 +#endif
60686
60687 /*
60688 * Show a '%p' thing. A kernel extension is that the '%p' is followed
60689 @@ -811,6 +818,8 @@ int kptr_restrict __read_mostly;
60690 * - 'S' For symbolic direct pointers with offset
60691 * - 's' For symbolic direct pointers without offset
60692 * - 'B' For backtraced symbolic direct pointers with offset
60693 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
60694 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
60695 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
60696 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
60697 * - 'M' For a 6-byte MAC address, it prints the address in the
60698 @@ -855,12 +864,12 @@ char *pointer(const char *fmt, char *buf
60699 {
60700 if (!ptr && *fmt != 'K') {
60701 /*
60702 - * Print (null) with the same width as a pointer so it makes
60703 + * Print (nil) with the same width as a pointer so it makes
60704 * tabular output look nice.
60705 */
60706 if (spec.field_width == -1)
60707 spec.field_width = 2 * sizeof(void *);
60708 - return string(buf, end, "(null)", spec);
60709 + return string(buf, end, "(nil)", spec);
60710 }
60711
60712 switch (*fmt) {
60713 @@ -870,6 +879,13 @@ char *pointer(const char *fmt, char *buf
60714 /* Fallthrough */
60715 case 'S':
60716 case 's':
60717 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60718 + break;
60719 +#else
60720 + return symbol_string(buf, end, ptr, spec, *fmt);
60721 +#endif
60722 + case 'A':
60723 + case 'a':
60724 case 'B':
60725 return symbol_string(buf, end, ptr, spec, *fmt);
60726 case 'R':
60727 @@ -1632,11 +1648,11 @@ int bstr_printf(char *buf, size_t size,
60728 typeof(type) value; \
60729 if (sizeof(type) == 8) { \
60730 args = PTR_ALIGN(args, sizeof(u32)); \
60731 - *(u32 *)&value = *(u32 *)args; \
60732 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
60733 + *(u32 *)&value = *(const u32 *)args; \
60734 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
60735 } else { \
60736 args = PTR_ALIGN(args, sizeof(type)); \
60737 - value = *(typeof(type) *)args; \
60738 + value = *(const typeof(type) *)args; \
60739 } \
60740 args += sizeof(type); \
60741 value; \
60742 @@ -1699,7 +1715,7 @@ int bstr_printf(char *buf, size_t size,
60743 case FORMAT_TYPE_STR: {
60744 const char *str_arg = args;
60745 args += strlen(str_arg) + 1;
60746 - str = string(str, end, (char *)str_arg, spec);
60747 + str = string(str, end, str_arg, spec);
60748 break;
60749 }
60750
60751 diff -urNp linux-2.6.39.4/localversion-grsec linux-2.6.39.4/localversion-grsec
60752 --- linux-2.6.39.4/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
60753 +++ linux-2.6.39.4/localversion-grsec 2011-08-05 19:44:37.000000000 -0400
60754 @@ -0,0 +1 @@
60755 +-grsec
60756 diff -urNp linux-2.6.39.4/Makefile linux-2.6.39.4/Makefile
60757 --- linux-2.6.39.4/Makefile 2011-08-05 21:11:51.000000000 -0400
60758 +++ linux-2.6.39.4/Makefile 2011-08-07 14:17:20.000000000 -0400
60759 @@ -237,8 +237,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
60760
60761 HOSTCC = gcc
60762 HOSTCXX = g++
60763 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
60764 -HOSTCXXFLAGS = -O2
60765 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
60766 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
60767 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
60768
60769 # Decide whether to build built-in, modular, or both.
60770 # Normally, just do built-in.
60771 @@ -356,10 +357,12 @@ LINUXINCLUDE := -I$(srctree)/arch/$(h
60772 KBUILD_CPPFLAGS := -D__KERNEL__
60773
60774 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
60775 + -W -Wno-unused-parameter -Wno-missing-field-initializers \
60776 -fno-strict-aliasing -fno-common \
60777 -Werror-implicit-function-declaration \
60778 -Wno-format-security \
60779 -fno-delete-null-pointer-checks
60780 +KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
60781 KBUILD_AFLAGS_KERNEL :=
60782 KBUILD_CFLAGS_KERNEL :=
60783 KBUILD_AFLAGS := -D__ASSEMBLY__
60784 @@ -397,8 +400,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
60785 # Rules shared between *config targets and build targets
60786
60787 # Basic helpers built in scripts/
60788 -PHONY += scripts_basic
60789 -scripts_basic:
60790 +PHONY += scripts_basic gcc-plugins
60791 +scripts_basic: gcc-plugins
60792 $(Q)$(MAKE) $(build)=scripts/basic
60793 $(Q)rm -f .tmp_quiet_recordmcount
60794
60795 @@ -548,6 +551,25 @@ else
60796 KBUILD_CFLAGS += -O2
60797 endif
60798
60799 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
60800 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so
60801 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
60802 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
60803 +endif
60804 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
60805 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
60806 +gcc-plugins:
60807 + $(Q)$(MAKE) $(build)=tools/gcc
60808 +else
60809 +gcc-plugins:
60810 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
60811 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
60812 +else
60813 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
60814 +endif
60815 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
60816 +endif
60817 +
60818 include $(srctree)/arch/$(SRCARCH)/Makefile
60819
60820 ifneq ($(CONFIG_FRAME_WARN),0)
60821 @@ -685,7 +707,7 @@ export mod_strip_cmd
60822
60823
60824 ifeq ($(KBUILD_EXTMOD),)
60825 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
60826 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
60827
60828 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
60829 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
60830 @@ -947,7 +969,7 @@ ifneq ($(KBUILD_SRC),)
60831 endif
60832
60833 # prepare2 creates a makefile if using a separate output directory
60834 -prepare2: prepare3 outputmakefile
60835 +prepare2: prepare3 outputmakefile gcc-plugins
60836
60837 prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \
60838 include/config/auto.conf
60839 @@ -1375,7 +1397,7 @@ clean: $(clean-dirs)
60840 $(call cmd,rmdirs)
60841 $(call cmd,rmfiles)
60842 @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
60843 - \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
60844 + \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
60845 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
60846 -o -name '*.symtypes' -o -name 'modules.order' \
60847 -o -name modules.builtin -o -name '.tmp_*.o.*' \
60848 diff -urNp linux-2.6.39.4/mm/filemap.c linux-2.6.39.4/mm/filemap.c
60849 --- linux-2.6.39.4/mm/filemap.c 2011-05-19 00:06:34.000000000 -0400
60850 +++ linux-2.6.39.4/mm/filemap.c 2011-08-05 19:44:37.000000000 -0400
60851 @@ -1724,7 +1724,7 @@ int generic_file_mmap(struct file * file
60852 struct address_space *mapping = file->f_mapping;
60853
60854 if (!mapping->a_ops->readpage)
60855 - return -ENOEXEC;
60856 + return -ENODEV;
60857 file_accessed(file);
60858 vma->vm_ops = &generic_file_vm_ops;
60859 vma->vm_flags |= VM_CAN_NONLINEAR;
60860 @@ -2120,6 +2120,7 @@ inline int generic_write_checks(struct f
60861 *pos = i_size_read(inode);
60862
60863 if (limit != RLIM_INFINITY) {
60864 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
60865 if (*pos >= limit) {
60866 send_sig(SIGXFSZ, current, 0);
60867 return -EFBIG;
60868 diff -urNp linux-2.6.39.4/mm/fremap.c linux-2.6.39.4/mm/fremap.c
60869 --- linux-2.6.39.4/mm/fremap.c 2011-05-19 00:06:34.000000000 -0400
60870 +++ linux-2.6.39.4/mm/fremap.c 2011-08-05 19:44:37.000000000 -0400
60871 @@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
60872 retry:
60873 vma = find_vma(mm, start);
60874
60875 +#ifdef CONFIG_PAX_SEGMEXEC
60876 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
60877 + goto out;
60878 +#endif
60879 +
60880 /*
60881 * Make sure the vma is shared, that it supports prefaulting,
60882 * and that the remapped range is valid and fully within
60883 @@ -224,7 +229,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
60884 /*
60885 * drop PG_Mlocked flag for over-mapped range
60886 */
60887 - unsigned int saved_flags = vma->vm_flags;
60888 + unsigned long saved_flags = vma->vm_flags;
60889 munlock_vma_pages_range(vma, start, start + size);
60890 vma->vm_flags = saved_flags;
60891 }
60892 diff -urNp linux-2.6.39.4/mm/highmem.c linux-2.6.39.4/mm/highmem.c
60893 --- linux-2.6.39.4/mm/highmem.c 2011-05-19 00:06:34.000000000 -0400
60894 +++ linux-2.6.39.4/mm/highmem.c 2011-08-05 19:44:37.000000000 -0400
60895 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
60896 * So no dangers, even with speculative execution.
60897 */
60898 page = pte_page(pkmap_page_table[i]);
60899 + pax_open_kernel();
60900 pte_clear(&init_mm, (unsigned long)page_address(page),
60901 &pkmap_page_table[i]);
60902 -
60903 + pax_close_kernel();
60904 set_page_address(page, NULL);
60905 need_flush = 1;
60906 }
60907 @@ -186,9 +187,11 @@ start:
60908 }
60909 }
60910 vaddr = PKMAP_ADDR(last_pkmap_nr);
60911 +
60912 + pax_open_kernel();
60913 set_pte_at(&init_mm, vaddr,
60914 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
60915 -
60916 + pax_close_kernel();
60917 pkmap_count[last_pkmap_nr] = 1;
60918 set_page_address(page, (void *)vaddr);
60919
60920 diff -urNp linux-2.6.39.4/mm/huge_memory.c linux-2.6.39.4/mm/huge_memory.c
60921 --- linux-2.6.39.4/mm/huge_memory.c 2011-05-19 00:06:34.000000000 -0400
60922 +++ linux-2.6.39.4/mm/huge_memory.c 2011-08-05 19:44:37.000000000 -0400
60923 @@ -702,7 +702,7 @@ out:
60924 * run pte_offset_map on the pmd, if an huge pmd could
60925 * materialize from under us from a different thread.
60926 */
60927 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
60928 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
60929 return VM_FAULT_OOM;
60930 /* if an huge pmd materialized from under us just retry later */
60931 if (unlikely(pmd_trans_huge(*pmd)))
60932 diff -urNp linux-2.6.39.4/mm/hugetlb.c linux-2.6.39.4/mm/hugetlb.c
60933 --- linux-2.6.39.4/mm/hugetlb.c 2011-07-09 09:18:51.000000000 -0400
60934 +++ linux-2.6.39.4/mm/hugetlb.c 2011-08-05 19:44:37.000000000 -0400
60935 @@ -2339,6 +2339,27 @@ static int unmap_ref_private(struct mm_s
60936 return 1;
60937 }
60938
60939 +#ifdef CONFIG_PAX_SEGMEXEC
60940 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
60941 +{
60942 + struct mm_struct *mm = vma->vm_mm;
60943 + struct vm_area_struct *vma_m;
60944 + unsigned long address_m;
60945 + pte_t *ptep_m;
60946 +
60947 + vma_m = pax_find_mirror_vma(vma);
60948 + if (!vma_m)
60949 + return;
60950 +
60951 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
60952 + address_m = address + SEGMEXEC_TASK_SIZE;
60953 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
60954 + get_page(page_m);
60955 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
60956 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
60957 +}
60958 +#endif
60959 +
60960 /*
60961 * Hugetlb_cow() should be called with page lock of the original hugepage held.
60962 */
60963 @@ -2440,6 +2461,11 @@ retry_avoidcopy:
60964 make_huge_pte(vma, new_page, 1));
60965 page_remove_rmap(old_page);
60966 hugepage_add_new_anon_rmap(new_page, vma, address);
60967 +
60968 +#ifdef CONFIG_PAX_SEGMEXEC
60969 + pax_mirror_huge_pte(vma, address, new_page);
60970 +#endif
60971 +
60972 /* Make the old page be freed below */
60973 new_page = old_page;
60974 mmu_notifier_invalidate_range_end(mm,
60975 @@ -2591,6 +2617,10 @@ retry:
60976 && (vma->vm_flags & VM_SHARED)));
60977 set_huge_pte_at(mm, address, ptep, new_pte);
60978
60979 +#ifdef CONFIG_PAX_SEGMEXEC
60980 + pax_mirror_huge_pte(vma, address, page);
60981 +#endif
60982 +
60983 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
60984 /* Optimization, do the COW without a second fault */
60985 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
60986 @@ -2620,6 +2650,10 @@ int hugetlb_fault(struct mm_struct *mm,
60987 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
60988 struct hstate *h = hstate_vma(vma);
60989
60990 +#ifdef CONFIG_PAX_SEGMEXEC
60991 + struct vm_area_struct *vma_m;
60992 +#endif
60993 +
60994 ptep = huge_pte_offset(mm, address);
60995 if (ptep) {
60996 entry = huge_ptep_get(ptep);
60997 @@ -2631,6 +2665,26 @@ int hugetlb_fault(struct mm_struct *mm,
60998 VM_FAULT_SET_HINDEX(h - hstates);
60999 }
61000
61001 +#ifdef CONFIG_PAX_SEGMEXEC
61002 + vma_m = pax_find_mirror_vma(vma);
61003 + if (vma_m) {
61004 + unsigned long address_m;
61005 +
61006 + if (vma->vm_start > vma_m->vm_start) {
61007 + address_m = address;
61008 + address -= SEGMEXEC_TASK_SIZE;
61009 + vma = vma_m;
61010 + h = hstate_vma(vma);
61011 + } else
61012 + address_m = address + SEGMEXEC_TASK_SIZE;
61013 +
61014 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
61015 + return VM_FAULT_OOM;
61016 + address_m &= HPAGE_MASK;
61017 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
61018 + }
61019 +#endif
61020 +
61021 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
61022 if (!ptep)
61023 return VM_FAULT_OOM;
61024 diff -urNp linux-2.6.39.4/mm/internal.h linux-2.6.39.4/mm/internal.h
61025 --- linux-2.6.39.4/mm/internal.h 2011-05-19 00:06:34.000000000 -0400
61026 +++ linux-2.6.39.4/mm/internal.h 2011-08-05 19:44:37.000000000 -0400
61027 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
61028 * in mm/page_alloc.c
61029 */
61030 extern void __free_pages_bootmem(struct page *page, unsigned int order);
61031 +extern void free_compound_page(struct page *page);
61032 extern void prep_compound_page(struct page *page, unsigned long order);
61033 #ifdef CONFIG_MEMORY_FAILURE
61034 extern bool is_free_buddy_page(struct page *page);
61035 diff -urNp linux-2.6.39.4/mm/Kconfig linux-2.6.39.4/mm/Kconfig
61036 --- linux-2.6.39.4/mm/Kconfig 2011-05-19 00:06:34.000000000 -0400
61037 +++ linux-2.6.39.4/mm/Kconfig 2011-08-05 19:44:37.000000000 -0400
61038 @@ -240,7 +240,7 @@ config KSM
61039 config DEFAULT_MMAP_MIN_ADDR
61040 int "Low address space to protect from user allocation"
61041 depends on MMU
61042 - default 4096
61043 + default 65536
61044 help
61045 This is the portion of low virtual memory which should be protected
61046 from userspace allocation. Keeping a user from writing to low pages
61047 diff -urNp linux-2.6.39.4/mm/kmemleak.c linux-2.6.39.4/mm/kmemleak.c
61048 --- linux-2.6.39.4/mm/kmemleak.c 2011-06-03 00:04:14.000000000 -0400
61049 +++ linux-2.6.39.4/mm/kmemleak.c 2011-08-05 19:44:37.000000000 -0400
61050 @@ -357,7 +357,7 @@ static void print_unreferenced(struct se
61051
61052 for (i = 0; i < object->trace_len; i++) {
61053 void *ptr = (void *)object->trace[i];
61054 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
61055 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
61056 }
61057 }
61058
61059 diff -urNp linux-2.6.39.4/mm/maccess.c linux-2.6.39.4/mm/maccess.c
61060 --- linux-2.6.39.4/mm/maccess.c 2011-05-19 00:06:34.000000000 -0400
61061 +++ linux-2.6.39.4/mm/maccess.c 2011-08-05 19:44:37.000000000 -0400
61062 @@ -15,10 +15,10 @@
61063 * happens, handle that and return -EFAULT.
61064 */
61065
61066 -long __weak probe_kernel_read(void *dst, void *src, size_t size)
61067 +long __weak probe_kernel_read(void *dst, const void *src, size_t size)
61068 __attribute__((alias("__probe_kernel_read")));
61069
61070 -long __probe_kernel_read(void *dst, void *src, size_t size)
61071 +long __probe_kernel_read(void *dst, const void *src, size_t size)
61072 {
61073 long ret;
61074 mm_segment_t old_fs = get_fs();
61075 @@ -43,10 +43,10 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
61076 * Safely write to address @dst from the buffer at @src. If a kernel fault
61077 * happens, handle that and return -EFAULT.
61078 */
61079 -long __weak probe_kernel_write(void *dst, void *src, size_t size)
61080 +long __weak probe_kernel_write(void *dst, const void *src, size_t size)
61081 __attribute__((alias("__probe_kernel_write")));
61082
61083 -long __probe_kernel_write(void *dst, void *src, size_t size)
61084 +long __probe_kernel_write(void *dst, const void *src, size_t size)
61085 {
61086 long ret;
61087 mm_segment_t old_fs = get_fs();
61088 diff -urNp linux-2.6.39.4/mm/madvise.c linux-2.6.39.4/mm/madvise.c
61089 --- linux-2.6.39.4/mm/madvise.c 2011-05-19 00:06:34.000000000 -0400
61090 +++ linux-2.6.39.4/mm/madvise.c 2011-08-05 19:44:37.000000000 -0400
61091 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
61092 pgoff_t pgoff;
61093 unsigned long new_flags = vma->vm_flags;
61094
61095 +#ifdef CONFIG_PAX_SEGMEXEC
61096 + struct vm_area_struct *vma_m;
61097 +#endif
61098 +
61099 switch (behavior) {
61100 case MADV_NORMAL:
61101 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
61102 @@ -110,6 +114,13 @@ success:
61103 /*
61104 * vm_flags is protected by the mmap_sem held in write mode.
61105 */
61106 +
61107 +#ifdef CONFIG_PAX_SEGMEXEC
61108 + vma_m = pax_find_mirror_vma(vma);
61109 + if (vma_m)
61110 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
61111 +#endif
61112 +
61113 vma->vm_flags = new_flags;
61114
61115 out:
61116 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
61117 struct vm_area_struct ** prev,
61118 unsigned long start, unsigned long end)
61119 {
61120 +
61121 +#ifdef CONFIG_PAX_SEGMEXEC
61122 + struct vm_area_struct *vma_m;
61123 +#endif
61124 +
61125 *prev = vma;
61126 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
61127 return -EINVAL;
61128 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
61129 zap_page_range(vma, start, end - start, &details);
61130 } else
61131 zap_page_range(vma, start, end - start, NULL);
61132 +
61133 +#ifdef CONFIG_PAX_SEGMEXEC
61134 + vma_m = pax_find_mirror_vma(vma);
61135 + if (vma_m) {
61136 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
61137 + struct zap_details details = {
61138 + .nonlinear_vma = vma_m,
61139 + .last_index = ULONG_MAX,
61140 + };
61141 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
61142 + } else
61143 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
61144 + }
61145 +#endif
61146 +
61147 return 0;
61148 }
61149
61150 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
61151 if (end < start)
61152 goto out;
61153
61154 +#ifdef CONFIG_PAX_SEGMEXEC
61155 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
61156 + if (end > SEGMEXEC_TASK_SIZE)
61157 + goto out;
61158 + } else
61159 +#endif
61160 +
61161 + if (end > TASK_SIZE)
61162 + goto out;
61163 +
61164 error = 0;
61165 if (end == start)
61166 goto out;
61167 diff -urNp linux-2.6.39.4/mm/memory.c linux-2.6.39.4/mm/memory.c
61168 --- linux-2.6.39.4/mm/memory.c 2011-05-19 00:06:34.000000000 -0400
61169 +++ linux-2.6.39.4/mm/memory.c 2011-08-05 19:44:37.000000000 -0400
61170 @@ -259,8 +259,12 @@ static inline void free_pmd_range(struct
61171 return;
61172
61173 pmd = pmd_offset(pud, start);
61174 +
61175 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
61176 pud_clear(pud);
61177 pmd_free_tlb(tlb, pmd, start);
61178 +#endif
61179 +
61180 }
61181
61182 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
61183 @@ -291,9 +295,12 @@ static inline void free_pud_range(struct
61184 if (end - 1 > ceiling - 1)
61185 return;
61186
61187 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
61188 pud = pud_offset(pgd, start);
61189 pgd_clear(pgd);
61190 pud_free_tlb(tlb, pud, start);
61191 +#endif
61192 +
61193 }
61194
61195 /*
61196 @@ -1410,12 +1417,6 @@ no_page_table:
61197 return page;
61198 }
61199
61200 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
61201 -{
61202 - return stack_guard_page_start(vma, addr) ||
61203 - stack_guard_page_end(vma, addr+PAGE_SIZE);
61204 -}
61205 -
61206 /**
61207 * __get_user_pages() - pin user pages in memory
61208 * @tsk: task_struct of target task
61209 @@ -1488,10 +1489,10 @@ int __get_user_pages(struct task_struct
61210 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
61211 i = 0;
61212
61213 - do {
61214 + while (nr_pages) {
61215 struct vm_area_struct *vma;
61216
61217 - vma = find_extend_vma(mm, start);
61218 + vma = find_vma(mm, start);
61219 if (!vma && in_gate_area(mm, start)) {
61220 unsigned long pg = start & PAGE_MASK;
61221 pgd_t *pgd;
61222 @@ -1539,7 +1540,7 @@ int __get_user_pages(struct task_struct
61223 goto next_page;
61224 }
61225
61226 - if (!vma ||
61227 + if (!vma || start < vma->vm_start ||
61228 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
61229 !(vm_flags & vma->vm_flags))
61230 return i ? : -EFAULT;
61231 @@ -1566,11 +1567,6 @@ int __get_user_pages(struct task_struct
61232 int ret;
61233 unsigned int fault_flags = 0;
61234
61235 - /* For mlock, just skip the stack guard page. */
61236 - if (foll_flags & FOLL_MLOCK) {
61237 - if (stack_guard_page(vma, start))
61238 - goto next_page;
61239 - }
61240 if (foll_flags & FOLL_WRITE)
61241 fault_flags |= FAULT_FLAG_WRITE;
61242 if (nonblocking)
61243 @@ -1644,7 +1640,7 @@ next_page:
61244 start += PAGE_SIZE;
61245 nr_pages--;
61246 } while (nr_pages && start < vma->vm_end);
61247 - } while (nr_pages);
61248 + }
61249 return i;
61250 }
61251 EXPORT_SYMBOL(__get_user_pages);
61252 @@ -1795,6 +1791,10 @@ static int insert_page(struct vm_area_st
61253 page_add_file_rmap(page);
61254 set_pte_at(mm, addr, pte, mk_pte(page, prot));
61255
61256 +#ifdef CONFIG_PAX_SEGMEXEC
61257 + pax_mirror_file_pte(vma, addr, page, ptl);
61258 +#endif
61259 +
61260 retval = 0;
61261 pte_unmap_unlock(pte, ptl);
61262 return retval;
61263 @@ -1829,10 +1829,22 @@ out:
61264 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
61265 struct page *page)
61266 {
61267 +
61268 +#ifdef CONFIG_PAX_SEGMEXEC
61269 + struct vm_area_struct *vma_m;
61270 +#endif
61271 +
61272 if (addr < vma->vm_start || addr >= vma->vm_end)
61273 return -EFAULT;
61274 if (!page_count(page))
61275 return -EINVAL;
61276 +
61277 +#ifdef CONFIG_PAX_SEGMEXEC
61278 + vma_m = pax_find_mirror_vma(vma);
61279 + if (vma_m)
61280 + vma_m->vm_flags |= VM_INSERTPAGE;
61281 +#endif
61282 +
61283 vma->vm_flags |= VM_INSERTPAGE;
61284 return insert_page(vma, addr, page, vma->vm_page_prot);
61285 }
61286 @@ -1918,6 +1930,7 @@ int vm_insert_mixed(struct vm_area_struc
61287 unsigned long pfn)
61288 {
61289 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
61290 + BUG_ON(vma->vm_mirror);
61291
61292 if (addr < vma->vm_start || addr >= vma->vm_end)
61293 return -EFAULT;
61294 @@ -2233,6 +2246,186 @@ static inline void cow_user_page(struct
61295 copy_user_highpage(dst, src, va, vma);
61296 }
61297
61298 +#ifdef CONFIG_PAX_SEGMEXEC
61299 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
61300 +{
61301 + struct mm_struct *mm = vma->vm_mm;
61302 + spinlock_t *ptl;
61303 + pte_t *pte, entry;
61304 +
61305 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
61306 + entry = *pte;
61307 + if (!pte_present(entry)) {
61308 + if (!pte_none(entry)) {
61309 + BUG_ON(pte_file(entry));
61310 + free_swap_and_cache(pte_to_swp_entry(entry));
61311 + pte_clear_not_present_full(mm, address, pte, 0);
61312 + }
61313 + } else {
61314 + struct page *page;
61315 +
61316 + flush_cache_page(vma, address, pte_pfn(entry));
61317 + entry = ptep_clear_flush(vma, address, pte);
61318 + BUG_ON(pte_dirty(entry));
61319 + page = vm_normal_page(vma, address, entry);
61320 + if (page) {
61321 + update_hiwater_rss(mm);
61322 + if (PageAnon(page))
61323 + dec_mm_counter_fast(mm, MM_ANONPAGES);
61324 + else
61325 + dec_mm_counter_fast(mm, MM_FILEPAGES);
61326 + page_remove_rmap(page);
61327 + page_cache_release(page);
61328 + }
61329 + }
61330 + pte_unmap_unlock(pte, ptl);
61331 +}
61332 +
61333 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
61334 + *
61335 + * the ptl of the lower mapped page is held on entry and is not released on exit
61336 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
61337 + */
61338 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
61339 +{
61340 + struct mm_struct *mm = vma->vm_mm;
61341 + unsigned long address_m;
61342 + spinlock_t *ptl_m;
61343 + struct vm_area_struct *vma_m;
61344 + pmd_t *pmd_m;
61345 + pte_t *pte_m, entry_m;
61346 +
61347 + BUG_ON(!page_m || !PageAnon(page_m));
61348 +
61349 + vma_m = pax_find_mirror_vma(vma);
61350 + if (!vma_m)
61351 + return;
61352 +
61353 + BUG_ON(!PageLocked(page_m));
61354 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61355 + address_m = address + SEGMEXEC_TASK_SIZE;
61356 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61357 + pte_m = pte_offset_map(pmd_m, address_m);
61358 + ptl_m = pte_lockptr(mm, pmd_m);
61359 + if (ptl != ptl_m) {
61360 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61361 + if (!pte_none(*pte_m))
61362 + goto out;
61363 + }
61364 +
61365 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
61366 + page_cache_get(page_m);
61367 + page_add_anon_rmap(page_m, vma_m, address_m);
61368 + inc_mm_counter_fast(mm, MM_ANONPAGES);
61369 + set_pte_at(mm, address_m, pte_m, entry_m);
61370 + update_mmu_cache(vma_m, address_m, entry_m);
61371 +out:
61372 + if (ptl != ptl_m)
61373 + spin_unlock(ptl_m);
61374 + pte_unmap(pte_m);
61375 + unlock_page(page_m);
61376 +}
61377 +
61378 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
61379 +{
61380 + struct mm_struct *mm = vma->vm_mm;
61381 + unsigned long address_m;
61382 + spinlock_t *ptl_m;
61383 + struct vm_area_struct *vma_m;
61384 + pmd_t *pmd_m;
61385 + pte_t *pte_m, entry_m;
61386 +
61387 + BUG_ON(!page_m || PageAnon(page_m));
61388 +
61389 + vma_m = pax_find_mirror_vma(vma);
61390 + if (!vma_m)
61391 + return;
61392 +
61393 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61394 + address_m = address + SEGMEXEC_TASK_SIZE;
61395 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61396 + pte_m = pte_offset_map(pmd_m, address_m);
61397 + ptl_m = pte_lockptr(mm, pmd_m);
61398 + if (ptl != ptl_m) {
61399 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61400 + if (!pte_none(*pte_m))
61401 + goto out;
61402 + }
61403 +
61404 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
61405 + page_cache_get(page_m);
61406 + page_add_file_rmap(page_m);
61407 + inc_mm_counter_fast(mm, MM_FILEPAGES);
61408 + set_pte_at(mm, address_m, pte_m, entry_m);
61409 + update_mmu_cache(vma_m, address_m, entry_m);
61410 +out:
61411 + if (ptl != ptl_m)
61412 + spin_unlock(ptl_m);
61413 + pte_unmap(pte_m);
61414 +}
61415 +
61416 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
61417 +{
61418 + struct mm_struct *mm = vma->vm_mm;
61419 + unsigned long address_m;
61420 + spinlock_t *ptl_m;
61421 + struct vm_area_struct *vma_m;
61422 + pmd_t *pmd_m;
61423 + pte_t *pte_m, entry_m;
61424 +
61425 + vma_m = pax_find_mirror_vma(vma);
61426 + if (!vma_m)
61427 + return;
61428 +
61429 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61430 + address_m = address + SEGMEXEC_TASK_SIZE;
61431 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61432 + pte_m = pte_offset_map(pmd_m, address_m);
61433 + ptl_m = pte_lockptr(mm, pmd_m);
61434 + if (ptl != ptl_m) {
61435 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61436 + if (!pte_none(*pte_m))
61437 + goto out;
61438 + }
61439 +
61440 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
61441 + set_pte_at(mm, address_m, pte_m, entry_m);
61442 +out:
61443 + if (ptl != ptl_m)
61444 + spin_unlock(ptl_m);
61445 + pte_unmap(pte_m);
61446 +}
61447 +
61448 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
61449 +{
61450 + struct page *page_m;
61451 + pte_t entry;
61452 +
61453 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
61454 + goto out;
61455 +
61456 + entry = *pte;
61457 + page_m = vm_normal_page(vma, address, entry);
61458 + if (!page_m)
61459 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
61460 + else if (PageAnon(page_m)) {
61461 + if (pax_find_mirror_vma(vma)) {
61462 + pte_unmap_unlock(pte, ptl);
61463 + lock_page(page_m);
61464 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
61465 + if (pte_same(entry, *pte))
61466 + pax_mirror_anon_pte(vma, address, page_m, ptl);
61467 + else
61468 + unlock_page(page_m);
61469 + }
61470 + } else
61471 + pax_mirror_file_pte(vma, address, page_m, ptl);
61472 +
61473 +out:
61474 + pte_unmap_unlock(pte, ptl);
61475 +}
61476 +#endif
61477 +
61478 /*
61479 * This routine handles present pages, when users try to write
61480 * to a shared page. It is done by copying the page to a new address
61481 @@ -2444,6 +2637,12 @@ gotten:
61482 */
61483 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
61484 if (likely(pte_same(*page_table, orig_pte))) {
61485 +
61486 +#ifdef CONFIG_PAX_SEGMEXEC
61487 + if (pax_find_mirror_vma(vma))
61488 + BUG_ON(!trylock_page(new_page));
61489 +#endif
61490 +
61491 if (old_page) {
61492 if (!PageAnon(old_page)) {
61493 dec_mm_counter_fast(mm, MM_FILEPAGES);
61494 @@ -2495,6 +2694,10 @@ gotten:
61495 page_remove_rmap(old_page);
61496 }
61497
61498 +#ifdef CONFIG_PAX_SEGMEXEC
61499 + pax_mirror_anon_pte(vma, address, new_page, ptl);
61500 +#endif
61501 +
61502 /* Free the old page.. */
61503 new_page = old_page;
61504 ret |= VM_FAULT_WRITE;
61505 @@ -2905,6 +3108,11 @@ static int do_swap_page(struct mm_struct
61506 swap_free(entry);
61507 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
61508 try_to_free_swap(page);
61509 +
61510 +#ifdef CONFIG_PAX_SEGMEXEC
61511 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
61512 +#endif
61513 +
61514 unlock_page(page);
61515 if (swapcache) {
61516 /*
61517 @@ -2928,6 +3136,11 @@ static int do_swap_page(struct mm_struct
61518
61519 /* No need to invalidate - it was non-present before */
61520 update_mmu_cache(vma, address, page_table);
61521 +
61522 +#ifdef CONFIG_PAX_SEGMEXEC
61523 + pax_mirror_anon_pte(vma, address, page, ptl);
61524 +#endif
61525 +
61526 unlock:
61527 pte_unmap_unlock(page_table, ptl);
61528 out:
61529 @@ -2947,40 +3160,6 @@ out_release:
61530 }
61531
61532 /*
61533 - * This is like a special single-page "expand_{down|up}wards()",
61534 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
61535 - * doesn't hit another vma.
61536 - */
61537 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
61538 -{
61539 - address &= PAGE_MASK;
61540 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
61541 - struct vm_area_struct *prev = vma->vm_prev;
61542 -
61543 - /*
61544 - * Is there a mapping abutting this one below?
61545 - *
61546 - * That's only ok if it's the same stack mapping
61547 - * that has gotten split..
61548 - */
61549 - if (prev && prev->vm_end == address)
61550 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
61551 -
61552 - expand_stack(vma, address - PAGE_SIZE);
61553 - }
61554 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
61555 - struct vm_area_struct *next = vma->vm_next;
61556 -
61557 - /* As VM_GROWSDOWN but s/below/above/ */
61558 - if (next && next->vm_start == address + PAGE_SIZE)
61559 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
61560 -
61561 - expand_upwards(vma, address + PAGE_SIZE);
61562 - }
61563 - return 0;
61564 -}
61565 -
61566 -/*
61567 * We enter with non-exclusive mmap_sem (to exclude vma changes,
61568 * but allow concurrent faults), and pte mapped but not yet locked.
61569 * We return with mmap_sem still held, but pte unmapped and unlocked.
61570 @@ -2989,27 +3168,23 @@ static int do_anonymous_page(struct mm_s
61571 unsigned long address, pte_t *page_table, pmd_t *pmd,
61572 unsigned int flags)
61573 {
61574 - struct page *page;
61575 + struct page *page = NULL;
61576 spinlock_t *ptl;
61577 pte_t entry;
61578
61579 - pte_unmap(page_table);
61580 -
61581 - /* Check if we need to add a guard page to the stack */
61582 - if (check_stack_guard_page(vma, address) < 0)
61583 - return VM_FAULT_SIGBUS;
61584 -
61585 - /* Use the zero-page for reads */
61586 if (!(flags & FAULT_FLAG_WRITE)) {
61587 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
61588 vma->vm_page_prot));
61589 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
61590 + ptl = pte_lockptr(mm, pmd);
61591 + spin_lock(ptl);
61592 if (!pte_none(*page_table))
61593 goto unlock;
61594 goto setpte;
61595 }
61596
61597 /* Allocate our own private page. */
61598 + pte_unmap(page_table);
61599 +
61600 if (unlikely(anon_vma_prepare(vma)))
61601 goto oom;
61602 page = alloc_zeroed_user_highpage_movable(vma, address);
61603 @@ -3028,6 +3203,11 @@ static int do_anonymous_page(struct mm_s
61604 if (!pte_none(*page_table))
61605 goto release;
61606
61607 +#ifdef CONFIG_PAX_SEGMEXEC
61608 + if (pax_find_mirror_vma(vma))
61609 + BUG_ON(!trylock_page(page));
61610 +#endif
61611 +
61612 inc_mm_counter_fast(mm, MM_ANONPAGES);
61613 page_add_new_anon_rmap(page, vma, address);
61614 setpte:
61615 @@ -3035,6 +3215,12 @@ setpte:
61616
61617 /* No need to invalidate - it was non-present before */
61618 update_mmu_cache(vma, address, page_table);
61619 +
61620 +#ifdef CONFIG_PAX_SEGMEXEC
61621 + if (page)
61622 + pax_mirror_anon_pte(vma, address, page, ptl);
61623 +#endif
61624 +
61625 unlock:
61626 pte_unmap_unlock(page_table, ptl);
61627 return 0;
61628 @@ -3172,6 +3358,12 @@ static int __do_fault(struct mm_struct *
61629 */
61630 /* Only go through if we didn't race with anybody else... */
61631 if (likely(pte_same(*page_table, orig_pte))) {
61632 +
61633 +#ifdef CONFIG_PAX_SEGMEXEC
61634 + if (anon && pax_find_mirror_vma(vma))
61635 + BUG_ON(!trylock_page(page));
61636 +#endif
61637 +
61638 flush_icache_page(vma, page);
61639 entry = mk_pte(page, vma->vm_page_prot);
61640 if (flags & FAULT_FLAG_WRITE)
61641 @@ -3191,6 +3383,14 @@ static int __do_fault(struct mm_struct *
61642
61643 /* no need to invalidate: a not-present page won't be cached */
61644 update_mmu_cache(vma, address, page_table);
61645 +
61646 +#ifdef CONFIG_PAX_SEGMEXEC
61647 + if (anon)
61648 + pax_mirror_anon_pte(vma, address, page, ptl);
61649 + else
61650 + pax_mirror_file_pte(vma, address, page, ptl);
61651 +#endif
61652 +
61653 } else {
61654 if (charged)
61655 mem_cgroup_uncharge_page(page);
61656 @@ -3338,6 +3538,12 @@ int handle_pte_fault(struct mm_struct *m
61657 if (flags & FAULT_FLAG_WRITE)
61658 flush_tlb_fix_spurious_fault(vma, address);
61659 }
61660 +
61661 +#ifdef CONFIG_PAX_SEGMEXEC
61662 + pax_mirror_pte(vma, address, pte, pmd, ptl);
61663 + return 0;
61664 +#endif
61665 +
61666 unlock:
61667 pte_unmap_unlock(pte, ptl);
61668 return 0;
61669 @@ -3354,6 +3560,10 @@ int handle_mm_fault(struct mm_struct *mm
61670 pmd_t *pmd;
61671 pte_t *pte;
61672
61673 +#ifdef CONFIG_PAX_SEGMEXEC
61674 + struct vm_area_struct *vma_m;
61675 +#endif
61676 +
61677 __set_current_state(TASK_RUNNING);
61678
61679 count_vm_event(PGFAULT);
61680 @@ -3364,6 +3574,34 @@ int handle_mm_fault(struct mm_struct *mm
61681 if (unlikely(is_vm_hugetlb_page(vma)))
61682 return hugetlb_fault(mm, vma, address, flags);
61683
61684 +#ifdef CONFIG_PAX_SEGMEXEC
61685 + vma_m = pax_find_mirror_vma(vma);
61686 + if (vma_m) {
61687 + unsigned long address_m;
61688 + pgd_t *pgd_m;
61689 + pud_t *pud_m;
61690 + pmd_t *pmd_m;
61691 +
61692 + if (vma->vm_start > vma_m->vm_start) {
61693 + address_m = address;
61694 + address -= SEGMEXEC_TASK_SIZE;
61695 + vma = vma_m;
61696 + } else
61697 + address_m = address + SEGMEXEC_TASK_SIZE;
61698 +
61699 + pgd_m = pgd_offset(mm, address_m);
61700 + pud_m = pud_alloc(mm, pgd_m, address_m);
61701 + if (!pud_m)
61702 + return VM_FAULT_OOM;
61703 + pmd_m = pmd_alloc(mm, pud_m, address_m);
61704 + if (!pmd_m)
61705 + return VM_FAULT_OOM;
61706 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
61707 + return VM_FAULT_OOM;
61708 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
61709 + }
61710 +#endif
61711 +
61712 pgd = pgd_offset(mm, address);
61713 pud = pud_alloc(mm, pgd, address);
61714 if (!pud)
61715 @@ -3393,7 +3631,7 @@ int handle_mm_fault(struct mm_struct *mm
61716 * run pte_offset_map on the pmd, if an huge pmd could
61717 * materialize from under us from a different thread.
61718 */
61719 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
61720 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
61721 return VM_FAULT_OOM;
61722 /* if an huge pmd materialized from under us just retry later */
61723 if (unlikely(pmd_trans_huge(*pmd)))
61724 @@ -3497,7 +3735,7 @@ static int __init gate_vma_init(void)
61725 gate_vma.vm_start = FIXADDR_USER_START;
61726 gate_vma.vm_end = FIXADDR_USER_END;
61727 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
61728 - gate_vma.vm_page_prot = __P101;
61729 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
61730 /*
61731 * Make sure the vDSO gets into every core dump.
61732 * Dumping its contents makes post-mortem fully interpretable later
61733 diff -urNp linux-2.6.39.4/mm/memory-failure.c linux-2.6.39.4/mm/memory-failure.c
61734 --- linux-2.6.39.4/mm/memory-failure.c 2011-07-09 09:18:51.000000000 -0400
61735 +++ linux-2.6.39.4/mm/memory-failure.c 2011-08-05 19:44:37.000000000 -0400
61736 @@ -59,7 +59,7 @@ int sysctl_memory_failure_early_kill __r
61737
61738 int sysctl_memory_failure_recovery __read_mostly = 1;
61739
61740 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
61741 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
61742
61743 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
61744
61745 @@ -1013,7 +1013,7 @@ int __memory_failure(unsigned long pfn,
61746 }
61747
61748 nr_pages = 1 << compound_trans_order(hpage);
61749 - atomic_long_add(nr_pages, &mce_bad_pages);
61750 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
61751
61752 /*
61753 * We need/can do nothing about count=0 pages.
61754 @@ -1043,7 +1043,7 @@ int __memory_failure(unsigned long pfn,
61755 if (!PageHWPoison(hpage)
61756 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
61757 || (p != hpage && TestSetPageHWPoison(hpage))) {
61758 - atomic_long_sub(nr_pages, &mce_bad_pages);
61759 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61760 return 0;
61761 }
61762 set_page_hwpoison_huge_page(hpage);
61763 @@ -1101,7 +1101,7 @@ int __memory_failure(unsigned long pfn,
61764 }
61765 if (hwpoison_filter(p)) {
61766 if (TestClearPageHWPoison(p))
61767 - atomic_long_sub(nr_pages, &mce_bad_pages);
61768 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61769 unlock_page(hpage);
61770 put_page(hpage);
61771 return 0;
61772 @@ -1227,7 +1227,7 @@ int unpoison_memory(unsigned long pfn)
61773 return 0;
61774 }
61775 if (TestClearPageHWPoison(p))
61776 - atomic_long_sub(nr_pages, &mce_bad_pages);
61777 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61778 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
61779 return 0;
61780 }
61781 @@ -1241,7 +1241,7 @@ int unpoison_memory(unsigned long pfn)
61782 */
61783 if (TestClearPageHWPoison(page)) {
61784 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
61785 - atomic_long_sub(nr_pages, &mce_bad_pages);
61786 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61787 freeit = 1;
61788 if (PageHuge(page))
61789 clear_page_hwpoison_huge_page(page);
61790 @@ -1354,7 +1354,7 @@ static int soft_offline_huge_page(struct
61791 }
61792 done:
61793 if (!PageHWPoison(hpage))
61794 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
61795 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
61796 set_page_hwpoison_huge_page(hpage);
61797 dequeue_hwpoisoned_huge_page(hpage);
61798 /* keep elevated page count for bad page */
61799 @@ -1484,7 +1484,7 @@ int soft_offline_page(struct page *page,
61800 return ret;
61801
61802 done:
61803 - atomic_long_add(1, &mce_bad_pages);
61804 + atomic_long_add_unchecked(1, &mce_bad_pages);
61805 SetPageHWPoison(page);
61806 /* keep elevated page count for bad page */
61807 return ret;
61808 diff -urNp linux-2.6.39.4/mm/mempolicy.c linux-2.6.39.4/mm/mempolicy.c
61809 --- linux-2.6.39.4/mm/mempolicy.c 2011-05-19 00:06:34.000000000 -0400
61810 +++ linux-2.6.39.4/mm/mempolicy.c 2011-08-05 19:44:37.000000000 -0400
61811 @@ -643,6 +643,10 @@ static int mbind_range(struct mm_struct
61812 unsigned long vmstart;
61813 unsigned long vmend;
61814
61815 +#ifdef CONFIG_PAX_SEGMEXEC
61816 + struct vm_area_struct *vma_m;
61817 +#endif
61818 +
61819 vma = find_vma_prev(mm, start, &prev);
61820 if (!vma || vma->vm_start > start)
61821 return -EFAULT;
61822 @@ -673,6 +677,16 @@ static int mbind_range(struct mm_struct
61823 err = policy_vma(vma, new_pol);
61824 if (err)
61825 goto out;
61826 +
61827 +#ifdef CONFIG_PAX_SEGMEXEC
61828 + vma_m = pax_find_mirror_vma(vma);
61829 + if (vma_m) {
61830 + err = policy_vma(vma_m, new_pol);
61831 + if (err)
61832 + goto out;
61833 + }
61834 +#endif
61835 +
61836 }
61837
61838 out:
61839 @@ -1106,6 +1120,17 @@ static long do_mbind(unsigned long start
61840
61841 if (end < start)
61842 return -EINVAL;
61843 +
61844 +#ifdef CONFIG_PAX_SEGMEXEC
61845 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
61846 + if (end > SEGMEXEC_TASK_SIZE)
61847 + return -EINVAL;
61848 + } else
61849 +#endif
61850 +
61851 + if (end > TASK_SIZE)
61852 + return -EINVAL;
61853 +
61854 if (end == start)
61855 return 0;
61856
61857 @@ -1324,6 +1349,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
61858 if (!mm)
61859 goto out;
61860
61861 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61862 + if (mm != current->mm &&
61863 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
61864 + err = -EPERM;
61865 + goto out;
61866 + }
61867 +#endif
61868 +
61869 /*
61870 * Check if this process has the right to modify the specified
61871 * process. The right exists if the process has administrative
61872 @@ -1333,8 +1366,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
61873 rcu_read_lock();
61874 tcred = __task_cred(task);
61875 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
61876 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
61877 - !capable(CAP_SYS_NICE)) {
61878 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
61879 rcu_read_unlock();
61880 err = -EPERM;
61881 goto out;
61882 @@ -2634,7 +2666,7 @@ int show_numa_map(struct seq_file *m, vo
61883
61884 if (file) {
61885 seq_printf(m, " file=");
61886 - seq_path(m, &file->f_path, "\n\t= ");
61887 + seq_path(m, &file->f_path, "\n\t\\= ");
61888 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
61889 seq_printf(m, " heap");
61890 } else if (vma->vm_start <= mm->start_stack &&
61891 diff -urNp linux-2.6.39.4/mm/migrate.c linux-2.6.39.4/mm/migrate.c
61892 --- linux-2.6.39.4/mm/migrate.c 2011-07-09 09:18:51.000000000 -0400
61893 +++ linux-2.6.39.4/mm/migrate.c 2011-08-05 19:44:37.000000000 -0400
61894 @@ -1133,6 +1133,8 @@ static int do_pages_move(struct mm_struc
61895 unsigned long chunk_start;
61896 int err;
61897
61898 + pax_track_stack();
61899 +
61900 task_nodes = cpuset_mems_allowed(task);
61901
61902 err = -ENOMEM;
61903 @@ -1317,6 +1319,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
61904 if (!mm)
61905 return -EINVAL;
61906
61907 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61908 + if (mm != current->mm &&
61909 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
61910 + err = -EPERM;
61911 + goto out;
61912 + }
61913 +#endif
61914 +
61915 /*
61916 * Check if this process has the right to modify the specified
61917 * process. The right exists if the process has administrative
61918 @@ -1326,8 +1336,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
61919 rcu_read_lock();
61920 tcred = __task_cred(task);
61921 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
61922 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
61923 - !capable(CAP_SYS_NICE)) {
61924 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
61925 rcu_read_unlock();
61926 err = -EPERM;
61927 goto out;
61928 diff -urNp linux-2.6.39.4/mm/mlock.c linux-2.6.39.4/mm/mlock.c
61929 --- linux-2.6.39.4/mm/mlock.c 2011-05-19 00:06:34.000000000 -0400
61930 +++ linux-2.6.39.4/mm/mlock.c 2011-08-05 19:44:37.000000000 -0400
61931 @@ -13,6 +13,7 @@
61932 #include <linux/pagemap.h>
61933 #include <linux/mempolicy.h>
61934 #include <linux/syscalls.h>
61935 +#include <linux/security.h>
61936 #include <linux/sched.h>
61937 #include <linux/module.h>
61938 #include <linux/rmap.h>
61939 @@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
61940 return -EINVAL;
61941 if (end == start)
61942 return 0;
61943 + if (end > TASK_SIZE)
61944 + return -EINVAL;
61945 +
61946 vma = find_vma_prev(current->mm, start, &prev);
61947 if (!vma || vma->vm_start > start)
61948 return -ENOMEM;
61949 @@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
61950 for (nstart = start ; ; ) {
61951 unsigned int newflags;
61952
61953 +#ifdef CONFIG_PAX_SEGMEXEC
61954 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
61955 + break;
61956 +#endif
61957 +
61958 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
61959
61960 newflags = vma->vm_flags | VM_LOCKED;
61961 @@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
61962 lock_limit >>= PAGE_SHIFT;
61963
61964 /* check against resource limits */
61965 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
61966 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
61967 error = do_mlock(start, len, 1);
61968 up_write(&current->mm->mmap_sem);
61969 @@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
61970 static int do_mlockall(int flags)
61971 {
61972 struct vm_area_struct * vma, * prev = NULL;
61973 - unsigned int def_flags = 0;
61974
61975 if (flags & MCL_FUTURE)
61976 - def_flags = VM_LOCKED;
61977 - current->mm->def_flags = def_flags;
61978 + current->mm->def_flags |= VM_LOCKED;
61979 + else
61980 + current->mm->def_flags &= ~VM_LOCKED;
61981 if (flags == MCL_FUTURE)
61982 goto out;
61983
61984 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
61985 - unsigned int newflags;
61986 + unsigned long newflags;
61987 +
61988 +#ifdef CONFIG_PAX_SEGMEXEC
61989 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
61990 + break;
61991 +#endif
61992
61993 + BUG_ON(vma->vm_end > TASK_SIZE);
61994 newflags = vma->vm_flags | VM_LOCKED;
61995 if (!(flags & MCL_CURRENT))
61996 newflags &= ~VM_LOCKED;
61997 @@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
61998 lock_limit >>= PAGE_SHIFT;
61999
62000 ret = -ENOMEM;
62001 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
62002 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
62003 capable(CAP_IPC_LOCK))
62004 ret = do_mlockall(flags);
62005 diff -urNp linux-2.6.39.4/mm/mmap.c linux-2.6.39.4/mm/mmap.c
62006 --- linux-2.6.39.4/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
62007 +++ linux-2.6.39.4/mm/mmap.c 2011-08-05 20:34:06.000000000 -0400
62008 @@ -46,6 +46,16 @@
62009 #define arch_rebalance_pgtables(addr, len) (addr)
62010 #endif
62011
62012 +static inline void verify_mm_writelocked(struct mm_struct *mm)
62013 +{
62014 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
62015 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
62016 + up_read(&mm->mmap_sem);
62017 + BUG();
62018 + }
62019 +#endif
62020 +}
62021 +
62022 static void unmap_region(struct mm_struct *mm,
62023 struct vm_area_struct *vma, struct vm_area_struct *prev,
62024 unsigned long start, unsigned long end);
62025 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
62026 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
62027 *
62028 */
62029 -pgprot_t protection_map[16] = {
62030 +pgprot_t protection_map[16] __read_only = {
62031 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
62032 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
62033 };
62034
62035 pgprot_t vm_get_page_prot(unsigned long vm_flags)
62036 {
62037 - return __pgprot(pgprot_val(protection_map[vm_flags &
62038 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
62039 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
62040 pgprot_val(arch_vm_get_page_prot(vm_flags)));
62041 +
62042 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62043 + if (!(__supported_pte_mask & _PAGE_NX) &&
62044 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
62045 + (vm_flags & (VM_READ | VM_WRITE)))
62046 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
62047 +#endif
62048 +
62049 + return prot;
62050 }
62051 EXPORT_SYMBOL(vm_get_page_prot);
62052
62053 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
62054 int sysctl_overcommit_ratio = 50; /* default is 50% */
62055 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
62056 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
62057 struct percpu_counter vm_committed_as;
62058
62059 /*
62060 @@ -232,6 +252,7 @@ static struct vm_area_struct *remove_vma
62061 struct vm_area_struct *next = vma->vm_next;
62062
62063 might_sleep();
62064 + BUG_ON(vma->vm_mirror);
62065 if (vma->vm_ops && vma->vm_ops->close)
62066 vma->vm_ops->close(vma);
62067 if (vma->vm_file) {
62068 @@ -276,6 +297,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
62069 * not page aligned -Ram Gupta
62070 */
62071 rlim = rlimit(RLIMIT_DATA);
62072 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
62073 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
62074 (mm->end_data - mm->start_data) > rlim)
62075 goto out;
62076 @@ -719,6 +741,12 @@ static int
62077 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
62078 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
62079 {
62080 +
62081 +#ifdef CONFIG_PAX_SEGMEXEC
62082 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
62083 + return 0;
62084 +#endif
62085 +
62086 if (is_mergeable_vma(vma, file, vm_flags) &&
62087 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
62088 if (vma->vm_pgoff == vm_pgoff)
62089 @@ -738,6 +766,12 @@ static int
62090 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
62091 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
62092 {
62093 +
62094 +#ifdef CONFIG_PAX_SEGMEXEC
62095 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
62096 + return 0;
62097 +#endif
62098 +
62099 if (is_mergeable_vma(vma, file, vm_flags) &&
62100 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
62101 pgoff_t vm_pglen;
62102 @@ -780,13 +814,20 @@ can_vma_merge_after(struct vm_area_struc
62103 struct vm_area_struct *vma_merge(struct mm_struct *mm,
62104 struct vm_area_struct *prev, unsigned long addr,
62105 unsigned long end, unsigned long vm_flags,
62106 - struct anon_vma *anon_vma, struct file *file,
62107 + struct anon_vma *anon_vma, struct file *file,
62108 pgoff_t pgoff, struct mempolicy *policy)
62109 {
62110 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
62111 struct vm_area_struct *area, *next;
62112 int err;
62113
62114 +#ifdef CONFIG_PAX_SEGMEXEC
62115 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
62116 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
62117 +
62118 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
62119 +#endif
62120 +
62121 /*
62122 * We later require that vma->vm_flags == vm_flags,
62123 * so this tests vma->vm_flags & VM_SPECIAL, too.
62124 @@ -802,6 +843,15 @@ struct vm_area_struct *vma_merge(struct
62125 if (next && next->vm_end == end) /* cases 6, 7, 8 */
62126 next = next->vm_next;
62127
62128 +#ifdef CONFIG_PAX_SEGMEXEC
62129 + if (prev)
62130 + prev_m = pax_find_mirror_vma(prev);
62131 + if (area)
62132 + area_m = pax_find_mirror_vma(area);
62133 + if (next)
62134 + next_m = pax_find_mirror_vma(next);
62135 +#endif
62136 +
62137 /*
62138 * Can it merge with the predecessor?
62139 */
62140 @@ -821,9 +871,24 @@ struct vm_area_struct *vma_merge(struct
62141 /* cases 1, 6 */
62142 err = vma_adjust(prev, prev->vm_start,
62143 next->vm_end, prev->vm_pgoff, NULL);
62144 - } else /* cases 2, 5, 7 */
62145 +
62146 +#ifdef CONFIG_PAX_SEGMEXEC
62147 + if (!err && prev_m)
62148 + err = vma_adjust(prev_m, prev_m->vm_start,
62149 + next_m->vm_end, prev_m->vm_pgoff, NULL);
62150 +#endif
62151 +
62152 + } else { /* cases 2, 5, 7 */
62153 err = vma_adjust(prev, prev->vm_start,
62154 end, prev->vm_pgoff, NULL);
62155 +
62156 +#ifdef CONFIG_PAX_SEGMEXEC
62157 + if (!err && prev_m)
62158 + err = vma_adjust(prev_m, prev_m->vm_start,
62159 + end_m, prev_m->vm_pgoff, NULL);
62160 +#endif
62161 +
62162 + }
62163 if (err)
62164 return NULL;
62165 khugepaged_enter_vma_merge(prev);
62166 @@ -837,12 +902,27 @@ struct vm_area_struct *vma_merge(struct
62167 mpol_equal(policy, vma_policy(next)) &&
62168 can_vma_merge_before(next, vm_flags,
62169 anon_vma, file, pgoff+pglen)) {
62170 - if (prev && addr < prev->vm_end) /* case 4 */
62171 + if (prev && addr < prev->vm_end) { /* case 4 */
62172 err = vma_adjust(prev, prev->vm_start,
62173 addr, prev->vm_pgoff, NULL);
62174 - else /* cases 3, 8 */
62175 +
62176 +#ifdef CONFIG_PAX_SEGMEXEC
62177 + if (!err && prev_m)
62178 + err = vma_adjust(prev_m, prev_m->vm_start,
62179 + addr_m, prev_m->vm_pgoff, NULL);
62180 +#endif
62181 +
62182 + } else { /* cases 3, 8 */
62183 err = vma_adjust(area, addr, next->vm_end,
62184 next->vm_pgoff - pglen, NULL);
62185 +
62186 +#ifdef CONFIG_PAX_SEGMEXEC
62187 + if (!err && area_m)
62188 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
62189 + next_m->vm_pgoff - pglen, NULL);
62190 +#endif
62191 +
62192 + }
62193 if (err)
62194 return NULL;
62195 khugepaged_enter_vma_merge(area);
62196 @@ -958,14 +1038,11 @@ none:
62197 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
62198 struct file *file, long pages)
62199 {
62200 - const unsigned long stack_flags
62201 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
62202 -
62203 if (file) {
62204 mm->shared_vm += pages;
62205 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
62206 mm->exec_vm += pages;
62207 - } else if (flags & stack_flags)
62208 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
62209 mm->stack_vm += pages;
62210 if (flags & (VM_RESERVED|VM_IO))
62211 mm->reserved_vm += pages;
62212 @@ -992,7 +1069,7 @@ unsigned long do_mmap_pgoff(struct file
62213 * (the exception is when the underlying filesystem is noexec
62214 * mounted, in which case we dont add PROT_EXEC.)
62215 */
62216 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
62217 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
62218 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
62219 prot |= PROT_EXEC;
62220
62221 @@ -1018,7 +1095,7 @@ unsigned long do_mmap_pgoff(struct file
62222 /* Obtain the address to map to. we verify (or select) it and ensure
62223 * that it represents a valid section of the address space.
62224 */
62225 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
62226 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
62227 if (addr & ~PAGE_MASK)
62228 return addr;
62229
62230 @@ -1029,6 +1106,36 @@ unsigned long do_mmap_pgoff(struct file
62231 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
62232 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
62233
62234 +#ifdef CONFIG_PAX_MPROTECT
62235 + if (mm->pax_flags & MF_PAX_MPROTECT) {
62236 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
62237 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
62238 + gr_log_rwxmmap(file);
62239 +
62240 +#ifdef CONFIG_PAX_EMUPLT
62241 + vm_flags &= ~VM_EXEC;
62242 +#else
62243 + return -EPERM;
62244 +#endif
62245 +
62246 + }
62247 +
62248 + if (!(vm_flags & VM_EXEC))
62249 + vm_flags &= ~VM_MAYEXEC;
62250 +#else
62251 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
62252 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
62253 +#endif
62254 + else
62255 + vm_flags &= ~VM_MAYWRITE;
62256 + }
62257 +#endif
62258 +
62259 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62260 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
62261 + vm_flags &= ~VM_PAGEEXEC;
62262 +#endif
62263 +
62264 if (flags & MAP_LOCKED)
62265 if (!can_do_mlock())
62266 return -EPERM;
62267 @@ -1040,6 +1147,7 @@ unsigned long do_mmap_pgoff(struct file
62268 locked += mm->locked_vm;
62269 lock_limit = rlimit(RLIMIT_MEMLOCK);
62270 lock_limit >>= PAGE_SHIFT;
62271 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
62272 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
62273 return -EAGAIN;
62274 }
62275 @@ -1110,6 +1218,9 @@ unsigned long do_mmap_pgoff(struct file
62276 if (error)
62277 return error;
62278
62279 + if (!gr_acl_handle_mmap(file, prot))
62280 + return -EACCES;
62281 +
62282 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
62283 }
62284 EXPORT_SYMBOL(do_mmap_pgoff);
62285 @@ -1187,10 +1298,10 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_ar
62286 */
62287 int vma_wants_writenotify(struct vm_area_struct *vma)
62288 {
62289 - unsigned int vm_flags = vma->vm_flags;
62290 + unsigned long vm_flags = vma->vm_flags;
62291
62292 /* If it was private or non-writable, the write bit is already clear */
62293 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
62294 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
62295 return 0;
62296
62297 /* The backer wishes to know when pages are first written to? */
62298 @@ -1239,14 +1350,24 @@ unsigned long mmap_region(struct file *f
62299 unsigned long charged = 0;
62300 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
62301
62302 +#ifdef CONFIG_PAX_SEGMEXEC
62303 + struct vm_area_struct *vma_m = NULL;
62304 +#endif
62305 +
62306 + /*
62307 + * mm->mmap_sem is required to protect against another thread
62308 + * changing the mappings in case we sleep.
62309 + */
62310 + verify_mm_writelocked(mm);
62311 +
62312 /* Clear old maps */
62313 error = -ENOMEM;
62314 -munmap_back:
62315 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62316 if (vma && vma->vm_start < addr + len) {
62317 if (do_munmap(mm, addr, len))
62318 return -ENOMEM;
62319 - goto munmap_back;
62320 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62321 + BUG_ON(vma && vma->vm_start < addr + len);
62322 }
62323
62324 /* Check against address space limit. */
62325 @@ -1295,6 +1416,16 @@ munmap_back:
62326 goto unacct_error;
62327 }
62328
62329 +#ifdef CONFIG_PAX_SEGMEXEC
62330 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
62331 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
62332 + if (!vma_m) {
62333 + error = -ENOMEM;
62334 + goto free_vma;
62335 + }
62336 + }
62337 +#endif
62338 +
62339 vma->vm_mm = mm;
62340 vma->vm_start = addr;
62341 vma->vm_end = addr + len;
62342 @@ -1318,6 +1449,19 @@ munmap_back:
62343 error = file->f_op->mmap(file, vma);
62344 if (error)
62345 goto unmap_and_free_vma;
62346 +
62347 +#ifdef CONFIG_PAX_SEGMEXEC
62348 + if (vma_m && (vm_flags & VM_EXECUTABLE))
62349 + added_exe_file_vma(mm);
62350 +#endif
62351 +
62352 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62353 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
62354 + vma->vm_flags |= VM_PAGEEXEC;
62355 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
62356 + }
62357 +#endif
62358 +
62359 if (vm_flags & VM_EXECUTABLE)
62360 added_exe_file_vma(mm);
62361
62362 @@ -1353,6 +1497,11 @@ munmap_back:
62363 vma_link(mm, vma, prev, rb_link, rb_parent);
62364 file = vma->vm_file;
62365
62366 +#ifdef CONFIG_PAX_SEGMEXEC
62367 + if (vma_m)
62368 + BUG_ON(pax_mirror_vma(vma_m, vma));
62369 +#endif
62370 +
62371 /* Once vma denies write, undo our temporary denial count */
62372 if (correct_wcount)
62373 atomic_inc(&inode->i_writecount);
62374 @@ -1361,6 +1510,7 @@ out:
62375
62376 mm->total_vm += len >> PAGE_SHIFT;
62377 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
62378 + track_exec_limit(mm, addr, addr + len, vm_flags);
62379 if (vm_flags & VM_LOCKED) {
62380 if (!mlock_vma_pages_range(vma, addr, addr + len))
62381 mm->locked_vm += (len >> PAGE_SHIFT);
62382 @@ -1378,6 +1528,12 @@ unmap_and_free_vma:
62383 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
62384 charged = 0;
62385 free_vma:
62386 +
62387 +#ifdef CONFIG_PAX_SEGMEXEC
62388 + if (vma_m)
62389 + kmem_cache_free(vm_area_cachep, vma_m);
62390 +#endif
62391 +
62392 kmem_cache_free(vm_area_cachep, vma);
62393 unacct_error:
62394 if (charged)
62395 @@ -1385,6 +1541,44 @@ unacct_error:
62396 return error;
62397 }
62398
62399 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
62400 +{
62401 + if (!vma) {
62402 +#ifdef CONFIG_STACK_GROWSUP
62403 + if (addr > sysctl_heap_stack_gap)
62404 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
62405 + else
62406 + vma = find_vma(current->mm, 0);
62407 + if (vma && (vma->vm_flags & VM_GROWSUP))
62408 + return false;
62409 +#endif
62410 + return true;
62411 + }
62412 +
62413 + if (addr + len > vma->vm_start)
62414 + return false;
62415 +
62416 + if (vma->vm_flags & VM_GROWSDOWN)
62417 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
62418 +#ifdef CONFIG_STACK_GROWSUP
62419 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
62420 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
62421 +#endif
62422 +
62423 + return true;
62424 +}
62425 +
62426 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
62427 +{
62428 + if (vma->vm_start < len)
62429 + return -ENOMEM;
62430 + if (!(vma->vm_flags & VM_GROWSDOWN))
62431 + return vma->vm_start - len;
62432 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
62433 + return vma->vm_start - len - sysctl_heap_stack_gap;
62434 + return -ENOMEM;
62435 +}
62436 +
62437 /* Get an address range which is currently unmapped.
62438 * For shmat() with addr=0.
62439 *
62440 @@ -1411,18 +1605,23 @@ arch_get_unmapped_area(struct file *filp
62441 if (flags & MAP_FIXED)
62442 return addr;
62443
62444 +#ifdef CONFIG_PAX_RANDMMAP
62445 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
62446 +#endif
62447 +
62448 if (addr) {
62449 addr = PAGE_ALIGN(addr);
62450 - vma = find_vma(mm, addr);
62451 - if (TASK_SIZE - len >= addr &&
62452 - (!vma || addr + len <= vma->vm_start))
62453 - return addr;
62454 + if (TASK_SIZE - len >= addr) {
62455 + vma = find_vma(mm, addr);
62456 + if (check_heap_stack_gap(vma, addr, len))
62457 + return addr;
62458 + }
62459 }
62460 if (len > mm->cached_hole_size) {
62461 - start_addr = addr = mm->free_area_cache;
62462 + start_addr = addr = mm->free_area_cache;
62463 } else {
62464 - start_addr = addr = TASK_UNMAPPED_BASE;
62465 - mm->cached_hole_size = 0;
62466 + start_addr = addr = mm->mmap_base;
62467 + mm->cached_hole_size = 0;
62468 }
62469
62470 full_search:
62471 @@ -1433,34 +1632,40 @@ full_search:
62472 * Start a new search - just in case we missed
62473 * some holes.
62474 */
62475 - if (start_addr != TASK_UNMAPPED_BASE) {
62476 - addr = TASK_UNMAPPED_BASE;
62477 - start_addr = addr;
62478 + if (start_addr != mm->mmap_base) {
62479 + start_addr = addr = mm->mmap_base;
62480 mm->cached_hole_size = 0;
62481 goto full_search;
62482 }
62483 return -ENOMEM;
62484 }
62485 - if (!vma || addr + len <= vma->vm_start) {
62486 - /*
62487 - * Remember the place where we stopped the search:
62488 - */
62489 - mm->free_area_cache = addr + len;
62490 - return addr;
62491 - }
62492 + if (check_heap_stack_gap(vma, addr, len))
62493 + break;
62494 if (addr + mm->cached_hole_size < vma->vm_start)
62495 mm->cached_hole_size = vma->vm_start - addr;
62496 addr = vma->vm_end;
62497 }
62498 +
62499 + /*
62500 + * Remember the place where we stopped the search:
62501 + */
62502 + mm->free_area_cache = addr + len;
62503 + return addr;
62504 }
62505 #endif
62506
62507 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
62508 {
62509 +
62510 +#ifdef CONFIG_PAX_SEGMEXEC
62511 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
62512 + return;
62513 +#endif
62514 +
62515 /*
62516 * Is this a new hole at the lowest possible address?
62517 */
62518 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
62519 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
62520 mm->free_area_cache = addr;
62521 mm->cached_hole_size = ~0UL;
62522 }
62523 @@ -1478,7 +1683,7 @@ arch_get_unmapped_area_topdown(struct fi
62524 {
62525 struct vm_area_struct *vma;
62526 struct mm_struct *mm = current->mm;
62527 - unsigned long addr = addr0;
62528 + unsigned long base = mm->mmap_base, addr = addr0;
62529
62530 /* requested length too big for entire address space */
62531 if (len > TASK_SIZE)
62532 @@ -1487,13 +1692,18 @@ arch_get_unmapped_area_topdown(struct fi
62533 if (flags & MAP_FIXED)
62534 return addr;
62535
62536 +#ifdef CONFIG_PAX_RANDMMAP
62537 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
62538 +#endif
62539 +
62540 /* requesting a specific address */
62541 if (addr) {
62542 addr = PAGE_ALIGN(addr);
62543 - vma = find_vma(mm, addr);
62544 - if (TASK_SIZE - len >= addr &&
62545 - (!vma || addr + len <= vma->vm_start))
62546 - return addr;
62547 + if (TASK_SIZE - len >= addr) {
62548 + vma = find_vma(mm, addr);
62549 + if (check_heap_stack_gap(vma, addr, len))
62550 + return addr;
62551 + }
62552 }
62553
62554 /* check if free_area_cache is useful for us */
62555 @@ -1508,7 +1718,7 @@ arch_get_unmapped_area_topdown(struct fi
62556 /* make sure it can fit in the remaining address space */
62557 if (addr > len) {
62558 vma = find_vma(mm, addr-len);
62559 - if (!vma || addr <= vma->vm_start)
62560 + if (check_heap_stack_gap(vma, addr - len, len))
62561 /* remember the address as a hint for next time */
62562 return (mm->free_area_cache = addr-len);
62563 }
62564 @@ -1525,7 +1735,7 @@ arch_get_unmapped_area_topdown(struct fi
62565 * return with success:
62566 */
62567 vma = find_vma(mm, addr);
62568 - if (!vma || addr+len <= vma->vm_start)
62569 + if (check_heap_stack_gap(vma, addr, len))
62570 /* remember the address as a hint for next time */
62571 return (mm->free_area_cache = addr);
62572
62573 @@ -1534,8 +1744,8 @@ arch_get_unmapped_area_topdown(struct fi
62574 mm->cached_hole_size = vma->vm_start - addr;
62575
62576 /* try just below the current vma->vm_start */
62577 - addr = vma->vm_start-len;
62578 - } while (len < vma->vm_start);
62579 + addr = skip_heap_stack_gap(vma, len);
62580 + } while (!IS_ERR_VALUE(addr));
62581
62582 bottomup:
62583 /*
62584 @@ -1544,13 +1754,21 @@ bottomup:
62585 * can happen with large stack limits and large mmap()
62586 * allocations.
62587 */
62588 + mm->mmap_base = TASK_UNMAPPED_BASE;
62589 +
62590 +#ifdef CONFIG_PAX_RANDMMAP
62591 + if (mm->pax_flags & MF_PAX_RANDMMAP)
62592 + mm->mmap_base += mm->delta_mmap;
62593 +#endif
62594 +
62595 + mm->free_area_cache = mm->mmap_base;
62596 mm->cached_hole_size = ~0UL;
62597 - mm->free_area_cache = TASK_UNMAPPED_BASE;
62598 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
62599 /*
62600 * Restore the topdown base:
62601 */
62602 - mm->free_area_cache = mm->mmap_base;
62603 + mm->mmap_base = base;
62604 + mm->free_area_cache = base;
62605 mm->cached_hole_size = ~0UL;
62606
62607 return addr;
62608 @@ -1559,6 +1777,12 @@ bottomup:
62609
62610 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
62611 {
62612 +
62613 +#ifdef CONFIG_PAX_SEGMEXEC
62614 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
62615 + return;
62616 +#endif
62617 +
62618 /*
62619 * Is this a new hole at the highest possible address?
62620 */
62621 @@ -1566,8 +1790,10 @@ void arch_unmap_area_topdown(struct mm_s
62622 mm->free_area_cache = addr;
62623
62624 /* dont allow allocations above current base */
62625 - if (mm->free_area_cache > mm->mmap_base)
62626 + if (mm->free_area_cache > mm->mmap_base) {
62627 mm->free_area_cache = mm->mmap_base;
62628 + mm->cached_hole_size = ~0UL;
62629 + }
62630 }
62631
62632 unsigned long
62633 @@ -1675,6 +1901,28 @@ out:
62634 return prev ? prev->vm_next : vma;
62635 }
62636
62637 +#ifdef CONFIG_PAX_SEGMEXEC
62638 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
62639 +{
62640 + struct vm_area_struct *vma_m;
62641 +
62642 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
62643 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
62644 + BUG_ON(vma->vm_mirror);
62645 + return NULL;
62646 + }
62647 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
62648 + vma_m = vma->vm_mirror;
62649 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
62650 + BUG_ON(vma->vm_file != vma_m->vm_file);
62651 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
62652 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
62653 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
62654 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
62655 + return vma_m;
62656 +}
62657 +#endif
62658 +
62659 /*
62660 * Verify that the stack growth is acceptable and
62661 * update accounting. This is shared with both the
62662 @@ -1691,6 +1939,7 @@ static int acct_stack_growth(struct vm_a
62663 return -ENOMEM;
62664
62665 /* Stack limit test */
62666 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
62667 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
62668 return -ENOMEM;
62669
62670 @@ -1701,6 +1950,7 @@ static int acct_stack_growth(struct vm_a
62671 locked = mm->locked_vm + grow;
62672 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
62673 limit >>= PAGE_SHIFT;
62674 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
62675 if (locked > limit && !capable(CAP_IPC_LOCK))
62676 return -ENOMEM;
62677 }
62678 @@ -1731,37 +1981,48 @@ static int acct_stack_growth(struct vm_a
62679 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
62680 * vma is the last one with address > vma->vm_end. Have to extend vma.
62681 */
62682 +#ifndef CONFIG_IA64
62683 +static
62684 +#endif
62685 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
62686 {
62687 int error;
62688 + bool locknext;
62689
62690 if (!(vma->vm_flags & VM_GROWSUP))
62691 return -EFAULT;
62692
62693 + /* Also guard against wrapping around to address 0. */
62694 + if (address < PAGE_ALIGN(address+1))
62695 + address = PAGE_ALIGN(address+1);
62696 + else
62697 + return -ENOMEM;
62698 +
62699 /*
62700 * We must make sure the anon_vma is allocated
62701 * so that the anon_vma locking is not a noop.
62702 */
62703 if (unlikely(anon_vma_prepare(vma)))
62704 return -ENOMEM;
62705 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
62706 + if (locknext && anon_vma_prepare(vma->vm_next))
62707 + return -ENOMEM;
62708 vma_lock_anon_vma(vma);
62709 + if (locknext)
62710 + vma_lock_anon_vma(vma->vm_next);
62711
62712 /*
62713 * vma->vm_start/vm_end cannot change under us because the caller
62714 * is required to hold the mmap_sem in read mode. We need the
62715 - * anon_vma lock to serialize against concurrent expand_stacks.
62716 - * Also guard against wrapping around to address 0.
62717 + * anon_vma locks to serialize against concurrent expand_stacks
62718 + * and expand_upwards.
62719 */
62720 - if (address < PAGE_ALIGN(address+4))
62721 - address = PAGE_ALIGN(address+4);
62722 - else {
62723 - vma_unlock_anon_vma(vma);
62724 - return -ENOMEM;
62725 - }
62726 error = 0;
62727
62728 /* Somebody else might have raced and expanded it already */
62729 - if (address > vma->vm_end) {
62730 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
62731 + error = -ENOMEM;
62732 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
62733 unsigned long size, grow;
62734
62735 size = address - vma->vm_start;
62736 @@ -1776,6 +2037,8 @@ int expand_upwards(struct vm_area_struct
62737 }
62738 }
62739 }
62740 + if (locknext)
62741 + vma_unlock_anon_vma(vma->vm_next);
62742 vma_unlock_anon_vma(vma);
62743 khugepaged_enter_vma_merge(vma);
62744 return error;
62745 @@ -1789,6 +2052,8 @@ static int expand_downwards(struct vm_ar
62746 unsigned long address)
62747 {
62748 int error;
62749 + bool lockprev = false;
62750 + struct vm_area_struct *prev;
62751
62752 /*
62753 * We must make sure the anon_vma is allocated
62754 @@ -1802,6 +2067,15 @@ static int expand_downwards(struct vm_ar
62755 if (error)
62756 return error;
62757
62758 + prev = vma->vm_prev;
62759 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
62760 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
62761 +#endif
62762 + if (lockprev && anon_vma_prepare(prev))
62763 + return -ENOMEM;
62764 + if (lockprev)
62765 + vma_lock_anon_vma(prev);
62766 +
62767 vma_lock_anon_vma(vma);
62768
62769 /*
62770 @@ -1811,9 +2085,17 @@ static int expand_downwards(struct vm_ar
62771 */
62772
62773 /* Somebody else might have raced and expanded it already */
62774 - if (address < vma->vm_start) {
62775 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
62776 + error = -ENOMEM;
62777 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
62778 unsigned long size, grow;
62779
62780 +#ifdef CONFIG_PAX_SEGMEXEC
62781 + struct vm_area_struct *vma_m;
62782 +
62783 + vma_m = pax_find_mirror_vma(vma);
62784 +#endif
62785 +
62786 size = vma->vm_end - address;
62787 grow = (vma->vm_start - address) >> PAGE_SHIFT;
62788
62789 @@ -1823,11 +2105,22 @@ static int expand_downwards(struct vm_ar
62790 if (!error) {
62791 vma->vm_start = address;
62792 vma->vm_pgoff -= grow;
62793 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
62794 +
62795 +#ifdef CONFIG_PAX_SEGMEXEC
62796 + if (vma_m) {
62797 + vma_m->vm_start -= grow << PAGE_SHIFT;
62798 + vma_m->vm_pgoff -= grow;
62799 + }
62800 +#endif
62801 +
62802 perf_event_mmap(vma);
62803 }
62804 }
62805 }
62806 vma_unlock_anon_vma(vma);
62807 + if (lockprev)
62808 + vma_unlock_anon_vma(prev);
62809 khugepaged_enter_vma_merge(vma);
62810 return error;
62811 }
62812 @@ -1902,6 +2195,13 @@ static void remove_vma_list(struct mm_st
62813 do {
62814 long nrpages = vma_pages(vma);
62815
62816 +#ifdef CONFIG_PAX_SEGMEXEC
62817 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
62818 + vma = remove_vma(vma);
62819 + continue;
62820 + }
62821 +#endif
62822 +
62823 mm->total_vm -= nrpages;
62824 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
62825 vma = remove_vma(vma);
62826 @@ -1947,6 +2247,16 @@ detach_vmas_to_be_unmapped(struct mm_str
62827 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
62828 vma->vm_prev = NULL;
62829 do {
62830 +
62831 +#ifdef CONFIG_PAX_SEGMEXEC
62832 + if (vma->vm_mirror) {
62833 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
62834 + vma->vm_mirror->vm_mirror = NULL;
62835 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
62836 + vma->vm_mirror = NULL;
62837 + }
62838 +#endif
62839 +
62840 rb_erase(&vma->vm_rb, &mm->mm_rb);
62841 mm->map_count--;
62842 tail_vma = vma;
62843 @@ -1975,14 +2285,33 @@ static int __split_vma(struct mm_struct
62844 struct vm_area_struct *new;
62845 int err = -ENOMEM;
62846
62847 +#ifdef CONFIG_PAX_SEGMEXEC
62848 + struct vm_area_struct *vma_m, *new_m = NULL;
62849 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
62850 +#endif
62851 +
62852 if (is_vm_hugetlb_page(vma) && (addr &
62853 ~(huge_page_mask(hstate_vma(vma)))))
62854 return -EINVAL;
62855
62856 +#ifdef CONFIG_PAX_SEGMEXEC
62857 + vma_m = pax_find_mirror_vma(vma);
62858 +#endif
62859 +
62860 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62861 if (!new)
62862 goto out_err;
62863
62864 +#ifdef CONFIG_PAX_SEGMEXEC
62865 + if (vma_m) {
62866 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62867 + if (!new_m) {
62868 + kmem_cache_free(vm_area_cachep, new);
62869 + goto out_err;
62870 + }
62871 + }
62872 +#endif
62873 +
62874 /* most fields are the same, copy all, and then fixup */
62875 *new = *vma;
62876
62877 @@ -1995,6 +2324,22 @@ static int __split_vma(struct mm_struct
62878 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
62879 }
62880
62881 +#ifdef CONFIG_PAX_SEGMEXEC
62882 + if (vma_m) {
62883 + *new_m = *vma_m;
62884 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
62885 + new_m->vm_mirror = new;
62886 + new->vm_mirror = new_m;
62887 +
62888 + if (new_below)
62889 + new_m->vm_end = addr_m;
62890 + else {
62891 + new_m->vm_start = addr_m;
62892 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
62893 + }
62894 + }
62895 +#endif
62896 +
62897 pol = mpol_dup(vma_policy(vma));
62898 if (IS_ERR(pol)) {
62899 err = PTR_ERR(pol);
62900 @@ -2020,6 +2365,42 @@ static int __split_vma(struct mm_struct
62901 else
62902 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
62903
62904 +#ifdef CONFIG_PAX_SEGMEXEC
62905 + if (!err && vma_m) {
62906 + if (anon_vma_clone(new_m, vma_m))
62907 + goto out_free_mpol;
62908 +
62909 + mpol_get(pol);
62910 + vma_set_policy(new_m, pol);
62911 +
62912 + if (new_m->vm_file) {
62913 + get_file(new_m->vm_file);
62914 + if (vma_m->vm_flags & VM_EXECUTABLE)
62915 + added_exe_file_vma(mm);
62916 + }
62917 +
62918 + if (new_m->vm_ops && new_m->vm_ops->open)
62919 + new_m->vm_ops->open(new_m);
62920 +
62921 + if (new_below)
62922 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
62923 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
62924 + else
62925 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
62926 +
62927 + if (err) {
62928 + if (new_m->vm_ops && new_m->vm_ops->close)
62929 + new_m->vm_ops->close(new_m);
62930 + if (new_m->vm_file) {
62931 + if (vma_m->vm_flags & VM_EXECUTABLE)
62932 + removed_exe_file_vma(mm);
62933 + fput(new_m->vm_file);
62934 + }
62935 + mpol_put(pol);
62936 + }
62937 + }
62938 +#endif
62939 +
62940 /* Success. */
62941 if (!err)
62942 return 0;
62943 @@ -2032,10 +2413,18 @@ static int __split_vma(struct mm_struct
62944 removed_exe_file_vma(mm);
62945 fput(new->vm_file);
62946 }
62947 - unlink_anon_vmas(new);
62948 out_free_mpol:
62949 mpol_put(pol);
62950 out_free_vma:
62951 +
62952 +#ifdef CONFIG_PAX_SEGMEXEC
62953 + if (new_m) {
62954 + unlink_anon_vmas(new_m);
62955 + kmem_cache_free(vm_area_cachep, new_m);
62956 + }
62957 +#endif
62958 +
62959 + unlink_anon_vmas(new);
62960 kmem_cache_free(vm_area_cachep, new);
62961 out_err:
62962 return err;
62963 @@ -2048,6 +2437,15 @@ static int __split_vma(struct mm_struct
62964 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
62965 unsigned long addr, int new_below)
62966 {
62967 +
62968 +#ifdef CONFIG_PAX_SEGMEXEC
62969 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
62970 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
62971 + if (mm->map_count >= sysctl_max_map_count-1)
62972 + return -ENOMEM;
62973 + } else
62974 +#endif
62975 +
62976 if (mm->map_count >= sysctl_max_map_count)
62977 return -ENOMEM;
62978
62979 @@ -2059,11 +2457,30 @@ int split_vma(struct mm_struct *mm, stru
62980 * work. This now handles partial unmappings.
62981 * Jeremy Fitzhardinge <jeremy@goop.org>
62982 */
62983 +#ifdef CONFIG_PAX_SEGMEXEC
62984 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62985 {
62986 + int ret = __do_munmap(mm, start, len);
62987 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
62988 + return ret;
62989 +
62990 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
62991 +}
62992 +
62993 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62994 +#else
62995 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62996 +#endif
62997 +{
62998 unsigned long end;
62999 struct vm_area_struct *vma, *prev, *last;
63000
63001 + /*
63002 + * mm->mmap_sem is required to protect against another thread
63003 + * changing the mappings in case we sleep.
63004 + */
63005 + verify_mm_writelocked(mm);
63006 +
63007 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
63008 return -EINVAL;
63009
63010 @@ -2137,6 +2554,8 @@ int do_munmap(struct mm_struct *mm, unsi
63011 /* Fix up all other VM information */
63012 remove_vma_list(mm, vma);
63013
63014 + track_exec_limit(mm, start, end, 0UL);
63015 +
63016 return 0;
63017 }
63018
63019 @@ -2149,22 +2568,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
63020
63021 profile_munmap(addr);
63022
63023 +#ifdef CONFIG_PAX_SEGMEXEC
63024 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
63025 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
63026 + return -EINVAL;
63027 +#endif
63028 +
63029 down_write(&mm->mmap_sem);
63030 ret = do_munmap(mm, addr, len);
63031 up_write(&mm->mmap_sem);
63032 return ret;
63033 }
63034
63035 -static inline void verify_mm_writelocked(struct mm_struct *mm)
63036 -{
63037 -#ifdef CONFIG_DEBUG_VM
63038 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
63039 - WARN_ON(1);
63040 - up_read(&mm->mmap_sem);
63041 - }
63042 -#endif
63043 -}
63044 -
63045 /*
63046 * this is really a simplified "do_mmap". it only handles
63047 * anonymous maps. eventually we may be able to do some
63048 @@ -2178,6 +2593,7 @@ unsigned long do_brk(unsigned long addr,
63049 struct rb_node ** rb_link, * rb_parent;
63050 pgoff_t pgoff = addr >> PAGE_SHIFT;
63051 int error;
63052 + unsigned long charged;
63053
63054 len = PAGE_ALIGN(len);
63055 if (!len)
63056 @@ -2189,16 +2605,30 @@ unsigned long do_brk(unsigned long addr,
63057
63058 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
63059
63060 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
63061 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
63062 + flags &= ~VM_EXEC;
63063 +
63064 +#ifdef CONFIG_PAX_MPROTECT
63065 + if (mm->pax_flags & MF_PAX_MPROTECT)
63066 + flags &= ~VM_MAYEXEC;
63067 +#endif
63068 +
63069 + }
63070 +#endif
63071 +
63072 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
63073 if (error & ~PAGE_MASK)
63074 return error;
63075
63076 + charged = len >> PAGE_SHIFT;
63077 +
63078 /*
63079 * mlock MCL_FUTURE?
63080 */
63081 if (mm->def_flags & VM_LOCKED) {
63082 unsigned long locked, lock_limit;
63083 - locked = len >> PAGE_SHIFT;
63084 + locked = charged;
63085 locked += mm->locked_vm;
63086 lock_limit = rlimit(RLIMIT_MEMLOCK);
63087 lock_limit >>= PAGE_SHIFT;
63088 @@ -2215,22 +2645,22 @@ unsigned long do_brk(unsigned long addr,
63089 /*
63090 * Clear old maps. this also does some error checking for us
63091 */
63092 - munmap_back:
63093 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
63094 if (vma && vma->vm_start < addr + len) {
63095 if (do_munmap(mm, addr, len))
63096 return -ENOMEM;
63097 - goto munmap_back;
63098 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
63099 + BUG_ON(vma && vma->vm_start < addr + len);
63100 }
63101
63102 /* Check against address space limits *after* clearing old maps... */
63103 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
63104 + if (!may_expand_vm(mm, charged))
63105 return -ENOMEM;
63106
63107 if (mm->map_count > sysctl_max_map_count)
63108 return -ENOMEM;
63109
63110 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
63111 + if (security_vm_enough_memory(charged))
63112 return -ENOMEM;
63113
63114 /* Can we just expand an old private anonymous mapping? */
63115 @@ -2244,7 +2674,7 @@ unsigned long do_brk(unsigned long addr,
63116 */
63117 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
63118 if (!vma) {
63119 - vm_unacct_memory(len >> PAGE_SHIFT);
63120 + vm_unacct_memory(charged);
63121 return -ENOMEM;
63122 }
63123
63124 @@ -2258,11 +2688,12 @@ unsigned long do_brk(unsigned long addr,
63125 vma_link(mm, vma, prev, rb_link, rb_parent);
63126 out:
63127 perf_event_mmap(vma);
63128 - mm->total_vm += len >> PAGE_SHIFT;
63129 + mm->total_vm += charged;
63130 if (flags & VM_LOCKED) {
63131 if (!mlock_vma_pages_range(vma, addr, addr + len))
63132 - mm->locked_vm += (len >> PAGE_SHIFT);
63133 + mm->locked_vm += charged;
63134 }
63135 + track_exec_limit(mm, addr, addr + len, flags);
63136 return addr;
63137 }
63138
63139 @@ -2309,8 +2740,10 @@ void exit_mmap(struct mm_struct *mm)
63140 * Walk the list again, actually closing and freeing it,
63141 * with preemption enabled, without holding any MM locks.
63142 */
63143 - while (vma)
63144 + while (vma) {
63145 + vma->vm_mirror = NULL;
63146 vma = remove_vma(vma);
63147 + }
63148
63149 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
63150 }
63151 @@ -2324,6 +2757,13 @@ int insert_vm_struct(struct mm_struct *
63152 struct vm_area_struct * __vma, * prev;
63153 struct rb_node ** rb_link, * rb_parent;
63154
63155 +#ifdef CONFIG_PAX_SEGMEXEC
63156 + struct vm_area_struct *vma_m = NULL;
63157 +#endif
63158 +
63159 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
63160 + return -EPERM;
63161 +
63162 /*
63163 * The vm_pgoff of a purely anonymous vma should be irrelevant
63164 * until its first write fault, when page's anon_vma and index
63165 @@ -2346,7 +2786,22 @@ int insert_vm_struct(struct mm_struct *
63166 if ((vma->vm_flags & VM_ACCOUNT) &&
63167 security_vm_enough_memory_mm(mm, vma_pages(vma)))
63168 return -ENOMEM;
63169 +
63170 +#ifdef CONFIG_PAX_SEGMEXEC
63171 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
63172 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
63173 + if (!vma_m)
63174 + return -ENOMEM;
63175 + }
63176 +#endif
63177 +
63178 vma_link(mm, vma, prev, rb_link, rb_parent);
63179 +
63180 +#ifdef CONFIG_PAX_SEGMEXEC
63181 + if (vma_m)
63182 + BUG_ON(pax_mirror_vma(vma_m, vma));
63183 +#endif
63184 +
63185 return 0;
63186 }
63187
63188 @@ -2364,6 +2819,8 @@ struct vm_area_struct *copy_vma(struct v
63189 struct rb_node **rb_link, *rb_parent;
63190 struct mempolicy *pol;
63191
63192 + BUG_ON(vma->vm_mirror);
63193 +
63194 /*
63195 * If anonymous vma has not yet been faulted, update new pgoff
63196 * to match new location, to increase its chance of merging.
63197 @@ -2414,6 +2871,39 @@ struct vm_area_struct *copy_vma(struct v
63198 return NULL;
63199 }
63200
63201 +#ifdef CONFIG_PAX_SEGMEXEC
63202 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
63203 +{
63204 + struct vm_area_struct *prev_m;
63205 + struct rb_node **rb_link_m, *rb_parent_m;
63206 + struct mempolicy *pol_m;
63207 +
63208 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
63209 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
63210 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
63211 + *vma_m = *vma;
63212 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
63213 + if (anon_vma_clone(vma_m, vma))
63214 + return -ENOMEM;
63215 + pol_m = vma_policy(vma_m);
63216 + mpol_get(pol_m);
63217 + vma_set_policy(vma_m, pol_m);
63218 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
63219 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
63220 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
63221 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
63222 + if (vma_m->vm_file)
63223 + get_file(vma_m->vm_file);
63224 + if (vma_m->vm_ops && vma_m->vm_ops->open)
63225 + vma_m->vm_ops->open(vma_m);
63226 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
63227 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
63228 + vma_m->vm_mirror = vma;
63229 + vma->vm_mirror = vma_m;
63230 + return 0;
63231 +}
63232 +#endif
63233 +
63234 /*
63235 * Return true if the calling process may expand its vm space by the passed
63236 * number of pages
63237 @@ -2424,7 +2914,7 @@ int may_expand_vm(struct mm_struct *mm,
63238 unsigned long lim;
63239
63240 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
63241 -
63242 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
63243 if (cur + npages > lim)
63244 return 0;
63245 return 1;
63246 @@ -2495,6 +2985,22 @@ int install_special_mapping(struct mm_st
63247 vma->vm_start = addr;
63248 vma->vm_end = addr + len;
63249
63250 +#ifdef CONFIG_PAX_MPROTECT
63251 + if (mm->pax_flags & MF_PAX_MPROTECT) {
63252 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
63253 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
63254 + return -EPERM;
63255 + if (!(vm_flags & VM_EXEC))
63256 + vm_flags &= ~VM_MAYEXEC;
63257 +#else
63258 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
63259 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
63260 +#endif
63261 + else
63262 + vm_flags &= ~VM_MAYWRITE;
63263 + }
63264 +#endif
63265 +
63266 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
63267 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
63268
63269 diff -urNp linux-2.6.39.4/mm/mprotect.c linux-2.6.39.4/mm/mprotect.c
63270 --- linux-2.6.39.4/mm/mprotect.c 2011-05-19 00:06:34.000000000 -0400
63271 +++ linux-2.6.39.4/mm/mprotect.c 2011-08-05 19:44:37.000000000 -0400
63272 @@ -23,10 +23,16 @@
63273 #include <linux/mmu_notifier.h>
63274 #include <linux/migrate.h>
63275 #include <linux/perf_event.h>
63276 +
63277 +#ifdef CONFIG_PAX_MPROTECT
63278 +#include <linux/elf.h>
63279 +#endif
63280 +
63281 #include <asm/uaccess.h>
63282 #include <asm/pgtable.h>
63283 #include <asm/cacheflush.h>
63284 #include <asm/tlbflush.h>
63285 +#include <asm/mmu_context.h>
63286
63287 #ifndef pgprot_modify
63288 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
63289 @@ -141,6 +147,48 @@ static void change_protection(struct vm_
63290 flush_tlb_range(vma, start, end);
63291 }
63292
63293 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63294 +/* called while holding the mmap semaphor for writing except stack expansion */
63295 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
63296 +{
63297 + unsigned long oldlimit, newlimit = 0UL;
63298 +
63299 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
63300 + return;
63301 +
63302 + spin_lock(&mm->page_table_lock);
63303 + oldlimit = mm->context.user_cs_limit;
63304 + if ((prot & VM_EXEC) && oldlimit < end)
63305 + /* USER_CS limit moved up */
63306 + newlimit = end;
63307 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
63308 + /* USER_CS limit moved down */
63309 + newlimit = start;
63310 +
63311 + if (newlimit) {
63312 + mm->context.user_cs_limit = newlimit;
63313 +
63314 +#ifdef CONFIG_SMP
63315 + wmb();
63316 + cpus_clear(mm->context.cpu_user_cs_mask);
63317 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
63318 +#endif
63319 +
63320 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
63321 + }
63322 + spin_unlock(&mm->page_table_lock);
63323 + if (newlimit == end) {
63324 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
63325 +
63326 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
63327 + if (is_vm_hugetlb_page(vma))
63328 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
63329 + else
63330 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
63331 + }
63332 +}
63333 +#endif
63334 +
63335 int
63336 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
63337 unsigned long start, unsigned long end, unsigned long newflags)
63338 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
63339 int error;
63340 int dirty_accountable = 0;
63341
63342 +#ifdef CONFIG_PAX_SEGMEXEC
63343 + struct vm_area_struct *vma_m = NULL;
63344 + unsigned long start_m, end_m;
63345 +
63346 + start_m = start + SEGMEXEC_TASK_SIZE;
63347 + end_m = end + SEGMEXEC_TASK_SIZE;
63348 +#endif
63349 +
63350 if (newflags == oldflags) {
63351 *pprev = vma;
63352 return 0;
63353 }
63354
63355 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
63356 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
63357 +
63358 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
63359 + return -ENOMEM;
63360 +
63361 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
63362 + return -ENOMEM;
63363 + }
63364 +
63365 /*
63366 * If we make a private mapping writable we increase our commit;
63367 * but (without finer accounting) cannot reduce our commit if we
63368 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
63369 }
63370 }
63371
63372 +#ifdef CONFIG_PAX_SEGMEXEC
63373 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
63374 + if (start != vma->vm_start) {
63375 + error = split_vma(mm, vma, start, 1);
63376 + if (error)
63377 + goto fail;
63378 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
63379 + *pprev = (*pprev)->vm_next;
63380 + }
63381 +
63382 + if (end != vma->vm_end) {
63383 + error = split_vma(mm, vma, end, 0);
63384 + if (error)
63385 + goto fail;
63386 + }
63387 +
63388 + if (pax_find_mirror_vma(vma)) {
63389 + error = __do_munmap(mm, start_m, end_m - start_m);
63390 + if (error)
63391 + goto fail;
63392 + } else {
63393 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
63394 + if (!vma_m) {
63395 + error = -ENOMEM;
63396 + goto fail;
63397 + }
63398 + vma->vm_flags = newflags;
63399 + error = pax_mirror_vma(vma_m, vma);
63400 + if (error) {
63401 + vma->vm_flags = oldflags;
63402 + goto fail;
63403 + }
63404 + }
63405 + }
63406 +#endif
63407 +
63408 /*
63409 * First try to merge with previous and/or next vma.
63410 */
63411 @@ -204,9 +306,21 @@ success:
63412 * vm_flags and vm_page_prot are protected by the mmap_sem
63413 * held in write mode.
63414 */
63415 +
63416 +#ifdef CONFIG_PAX_SEGMEXEC
63417 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
63418 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
63419 +#endif
63420 +
63421 vma->vm_flags = newflags;
63422 +
63423 +#ifdef CONFIG_PAX_MPROTECT
63424 + if (mm->binfmt && mm->binfmt->handle_mprotect)
63425 + mm->binfmt->handle_mprotect(vma, newflags);
63426 +#endif
63427 +
63428 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
63429 - vm_get_page_prot(newflags));
63430 + vm_get_page_prot(vma->vm_flags));
63431
63432 if (vma_wants_writenotify(vma)) {
63433 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
63434 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63435 end = start + len;
63436 if (end <= start)
63437 return -ENOMEM;
63438 +
63439 +#ifdef CONFIG_PAX_SEGMEXEC
63440 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
63441 + if (end > SEGMEXEC_TASK_SIZE)
63442 + return -EINVAL;
63443 + } else
63444 +#endif
63445 +
63446 + if (end > TASK_SIZE)
63447 + return -EINVAL;
63448 +
63449 if (!arch_validate_prot(prot))
63450 return -EINVAL;
63451
63452 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63453 /*
63454 * Does the application expect PROT_READ to imply PROT_EXEC:
63455 */
63456 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
63457 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
63458 prot |= PROT_EXEC;
63459
63460 vm_flags = calc_vm_prot_bits(prot);
63461 @@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63462 if (start > vma->vm_start)
63463 prev = vma;
63464
63465 +#ifdef CONFIG_PAX_MPROTECT
63466 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
63467 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
63468 +#endif
63469 +
63470 for (nstart = start ; ; ) {
63471 unsigned long newflags;
63472
63473 @@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63474
63475 /* newflags >> 4 shift VM_MAY% in place of VM_% */
63476 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
63477 + if (prot & (PROT_WRITE | PROT_EXEC))
63478 + gr_log_rwxmprotect(vma->vm_file);
63479 +
63480 + error = -EACCES;
63481 + goto out;
63482 + }
63483 +
63484 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
63485 error = -EACCES;
63486 goto out;
63487 }
63488 @@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63489 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
63490 if (error)
63491 goto out;
63492 +
63493 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
63494 +
63495 nstart = tmp;
63496
63497 if (nstart < prev->vm_end)
63498 diff -urNp linux-2.6.39.4/mm/mremap.c linux-2.6.39.4/mm/mremap.c
63499 --- linux-2.6.39.4/mm/mremap.c 2011-05-19 00:06:34.000000000 -0400
63500 +++ linux-2.6.39.4/mm/mremap.c 2011-08-05 19:44:37.000000000 -0400
63501 @@ -114,6 +114,12 @@ static void move_ptes(struct vm_area_str
63502 continue;
63503 pte = ptep_clear_flush(vma, old_addr, old_pte);
63504 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
63505 +
63506 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63507 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
63508 + pte = pte_exprotect(pte);
63509 +#endif
63510 +
63511 set_pte_at(mm, new_addr, new_pte, pte);
63512 }
63513
63514 @@ -273,6 +279,11 @@ static struct vm_area_struct *vma_to_res
63515 if (is_vm_hugetlb_page(vma))
63516 goto Einval;
63517
63518 +#ifdef CONFIG_PAX_SEGMEXEC
63519 + if (pax_find_mirror_vma(vma))
63520 + goto Einval;
63521 +#endif
63522 +
63523 /* We can't remap across vm area boundaries */
63524 if (old_len > vma->vm_end - addr)
63525 goto Efault;
63526 @@ -329,20 +340,25 @@ static unsigned long mremap_to(unsigned
63527 unsigned long ret = -EINVAL;
63528 unsigned long charged = 0;
63529 unsigned long map_flags;
63530 + unsigned long pax_task_size = TASK_SIZE;
63531
63532 if (new_addr & ~PAGE_MASK)
63533 goto out;
63534
63535 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
63536 +#ifdef CONFIG_PAX_SEGMEXEC
63537 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
63538 + pax_task_size = SEGMEXEC_TASK_SIZE;
63539 +#endif
63540 +
63541 + pax_task_size -= PAGE_SIZE;
63542 +
63543 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
63544 goto out;
63545
63546 /* Check if the location we're moving into overlaps the
63547 * old location at all, and fail if it does.
63548 */
63549 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
63550 - goto out;
63551 -
63552 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
63553 + if (addr + old_len > new_addr && new_addr + new_len > addr)
63554 goto out;
63555
63556 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
63557 @@ -414,6 +430,7 @@ unsigned long do_mremap(unsigned long ad
63558 struct vm_area_struct *vma;
63559 unsigned long ret = -EINVAL;
63560 unsigned long charged = 0;
63561 + unsigned long pax_task_size = TASK_SIZE;
63562
63563 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
63564 goto out;
63565 @@ -432,6 +449,17 @@ unsigned long do_mremap(unsigned long ad
63566 if (!new_len)
63567 goto out;
63568
63569 +#ifdef CONFIG_PAX_SEGMEXEC
63570 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
63571 + pax_task_size = SEGMEXEC_TASK_SIZE;
63572 +#endif
63573 +
63574 + pax_task_size -= PAGE_SIZE;
63575 +
63576 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
63577 + old_len > pax_task_size || addr > pax_task_size-old_len)
63578 + goto out;
63579 +
63580 if (flags & MREMAP_FIXED) {
63581 if (flags & MREMAP_MAYMOVE)
63582 ret = mremap_to(addr, old_len, new_addr, new_len);
63583 @@ -481,6 +509,7 @@ unsigned long do_mremap(unsigned long ad
63584 addr + new_len);
63585 }
63586 ret = addr;
63587 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
63588 goto out;
63589 }
63590 }
63591 @@ -507,7 +536,13 @@ unsigned long do_mremap(unsigned long ad
63592 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
63593 if (ret)
63594 goto out;
63595 +
63596 + map_flags = vma->vm_flags;
63597 ret = move_vma(vma, addr, old_len, new_len, new_addr);
63598 + if (!(ret & ~PAGE_MASK)) {
63599 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
63600 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
63601 + }
63602 }
63603 out:
63604 if (ret & ~PAGE_MASK)
63605 diff -urNp linux-2.6.39.4/mm/nobootmem.c linux-2.6.39.4/mm/nobootmem.c
63606 --- linux-2.6.39.4/mm/nobootmem.c 2011-05-19 00:06:34.000000000 -0400
63607 +++ linux-2.6.39.4/mm/nobootmem.c 2011-08-05 19:44:37.000000000 -0400
63608 @@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
63609 unsigned long __init free_all_memory_core_early(int nodeid)
63610 {
63611 int i;
63612 - u64 start, end;
63613 + u64 start, end, startrange, endrange;
63614 unsigned long count = 0;
63615 - struct range *range = NULL;
63616 + struct range *range = NULL, rangerange = { 0, 0 };
63617 int nr_range;
63618
63619 nr_range = get_free_all_memory_range(&range, nodeid);
63620 + startrange = __pa(range) >> PAGE_SHIFT;
63621 + endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
63622
63623 for (i = 0; i < nr_range; i++) {
63624 start = range[i].start;
63625 end = range[i].end;
63626 + if (start <= endrange && startrange < end) {
63627 + BUG_ON(rangerange.start | rangerange.end);
63628 + rangerange = range[i];
63629 + continue;
63630 + }
63631 count += end - start;
63632 __free_pages_memory(start, end);
63633 }
63634 + start = rangerange.start;
63635 + end = rangerange.end;
63636 + count += end - start;
63637 + __free_pages_memory(start, end);
63638
63639 return count;
63640 }
63641 diff -urNp linux-2.6.39.4/mm/nommu.c linux-2.6.39.4/mm/nommu.c
63642 --- linux-2.6.39.4/mm/nommu.c 2011-08-05 21:11:51.000000000 -0400
63643 +++ linux-2.6.39.4/mm/nommu.c 2011-08-05 21:12:20.000000000 -0400
63644 @@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMI
63645 int sysctl_overcommit_ratio = 50; /* default is 50% */
63646 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
63647 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
63648 -int heap_stack_gap = 0;
63649
63650 atomic_long_t mmap_pages_allocated;
63651
63652 @@ -833,15 +832,6 @@ struct vm_area_struct *find_vma(struct m
63653 EXPORT_SYMBOL(find_vma);
63654
63655 /*
63656 - * find a VMA
63657 - * - we don't extend stack VMAs under NOMMU conditions
63658 - */
63659 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
63660 -{
63661 - return find_vma(mm, addr);
63662 -}
63663 -
63664 -/*
63665 * expand a stack to a given address
63666 * - not supported under NOMMU conditions
63667 */
63668 @@ -1563,6 +1553,7 @@ int split_vma(struct mm_struct *mm, stru
63669
63670 /* most fields are the same, copy all, and then fixup */
63671 *new = *vma;
63672 + INIT_LIST_HEAD(&new->anon_vma_chain);
63673 *region = *vma->vm_region;
63674 new->vm_region = region;
63675
63676 diff -urNp linux-2.6.39.4/mm/page_alloc.c linux-2.6.39.4/mm/page_alloc.c
63677 --- linux-2.6.39.4/mm/page_alloc.c 2011-06-03 00:04:14.000000000 -0400
63678 +++ linux-2.6.39.4/mm/page_alloc.c 2011-08-05 19:44:37.000000000 -0400
63679 @@ -337,7 +337,7 @@ out:
63680 * This usage means that zero-order pages may not be compound.
63681 */
63682
63683 -static void free_compound_page(struct page *page)
63684 +void free_compound_page(struct page *page)
63685 {
63686 __free_pages_ok(page, compound_order(page));
63687 }
63688 @@ -650,6 +650,10 @@ static bool free_pages_prepare(struct pa
63689 int i;
63690 int bad = 0;
63691
63692 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
63693 + unsigned long index = 1UL << order;
63694 +#endif
63695 +
63696 trace_mm_page_free_direct(page, order);
63697 kmemcheck_free_shadow(page, order);
63698
63699 @@ -665,6 +669,12 @@ static bool free_pages_prepare(struct pa
63700 debug_check_no_obj_freed(page_address(page),
63701 PAGE_SIZE << order);
63702 }
63703 +
63704 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
63705 + for (; index; --index)
63706 + sanitize_highpage(page + index - 1);
63707 +#endif
63708 +
63709 arch_free_page(page, order);
63710 kernel_map_pages(page, 1 << order, 0);
63711
63712 @@ -780,8 +790,10 @@ static int prep_new_page(struct page *pa
63713 arch_alloc_page(page, order);
63714 kernel_map_pages(page, 1 << order, 1);
63715
63716 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
63717 if (gfp_flags & __GFP_ZERO)
63718 prep_zero_page(page, order, gfp_flags);
63719 +#endif
63720
63721 if (order && (gfp_flags & __GFP_COMP))
63722 prep_compound_page(page, order);
63723 @@ -2504,6 +2516,8 @@ void __show_free_areas(unsigned int filt
63724 int cpu;
63725 struct zone *zone;
63726
63727 + pax_track_stack();
63728 +
63729 for_each_populated_zone(zone) {
63730 if (skip_free_areas_zone(filter, zone))
63731 continue;
63732 diff -urNp linux-2.6.39.4/mm/percpu.c linux-2.6.39.4/mm/percpu.c
63733 --- linux-2.6.39.4/mm/percpu.c 2011-05-19 00:06:34.000000000 -0400
63734 +++ linux-2.6.39.4/mm/percpu.c 2011-08-05 19:44:37.000000000 -0400
63735 @@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
63736 static unsigned int pcpu_last_unit_cpu __read_mostly;
63737
63738 /* the address of the first chunk which starts with the kernel static area */
63739 -void *pcpu_base_addr __read_mostly;
63740 +void *pcpu_base_addr __read_only;
63741 EXPORT_SYMBOL_GPL(pcpu_base_addr);
63742
63743 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
63744 diff -urNp linux-2.6.39.4/mm/rmap.c linux-2.6.39.4/mm/rmap.c
63745 --- linux-2.6.39.4/mm/rmap.c 2011-05-19 00:06:34.000000000 -0400
63746 +++ linux-2.6.39.4/mm/rmap.c 2011-08-05 19:44:37.000000000 -0400
63747 @@ -131,6 +131,10 @@ int anon_vma_prepare(struct vm_area_stru
63748 struct anon_vma *anon_vma = vma->anon_vma;
63749 struct anon_vma_chain *avc;
63750
63751 +#ifdef CONFIG_PAX_SEGMEXEC
63752 + struct anon_vma_chain *avc_m = NULL;
63753 +#endif
63754 +
63755 might_sleep();
63756 if (unlikely(!anon_vma)) {
63757 struct mm_struct *mm = vma->vm_mm;
63758 @@ -140,6 +144,12 @@ int anon_vma_prepare(struct vm_area_stru
63759 if (!avc)
63760 goto out_enomem;
63761
63762 +#ifdef CONFIG_PAX_SEGMEXEC
63763 + avc_m = anon_vma_chain_alloc();
63764 + if (!avc_m)
63765 + goto out_enomem_free_avc;
63766 +#endif
63767 +
63768 anon_vma = find_mergeable_anon_vma(vma);
63769 allocated = NULL;
63770 if (!anon_vma) {
63771 @@ -153,6 +163,21 @@ int anon_vma_prepare(struct vm_area_stru
63772 /* page_table_lock to protect against threads */
63773 spin_lock(&mm->page_table_lock);
63774 if (likely(!vma->anon_vma)) {
63775 +
63776 +#ifdef CONFIG_PAX_SEGMEXEC
63777 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
63778 +
63779 + if (vma_m) {
63780 + BUG_ON(vma_m->anon_vma);
63781 + vma_m->anon_vma = anon_vma;
63782 + avc_m->anon_vma = anon_vma;
63783 + avc_m->vma = vma;
63784 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
63785 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
63786 + avc_m = NULL;
63787 + }
63788 +#endif
63789 +
63790 vma->anon_vma = anon_vma;
63791 avc->anon_vma = anon_vma;
63792 avc->vma = vma;
63793 @@ -166,12 +191,24 @@ int anon_vma_prepare(struct vm_area_stru
63794
63795 if (unlikely(allocated))
63796 put_anon_vma(allocated);
63797 +
63798 +#ifdef CONFIG_PAX_SEGMEXEC
63799 + if (unlikely(avc_m))
63800 + anon_vma_chain_free(avc_m);
63801 +#endif
63802 +
63803 if (unlikely(avc))
63804 anon_vma_chain_free(avc);
63805 }
63806 return 0;
63807
63808 out_enomem_free_avc:
63809 +
63810 +#ifdef CONFIG_PAX_SEGMEXEC
63811 + if (avc_m)
63812 + anon_vma_chain_free(avc_m);
63813 +#endif
63814 +
63815 anon_vma_chain_free(avc);
63816 out_enomem:
63817 return -ENOMEM;
63818 @@ -198,7 +235,7 @@ static void anon_vma_chain_link(struct v
63819 * Attach the anon_vmas from src to dst.
63820 * Returns 0 on success, -ENOMEM on failure.
63821 */
63822 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
63823 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
63824 {
63825 struct anon_vma_chain *avc, *pavc;
63826
63827 @@ -220,7 +257,7 @@ int anon_vma_clone(struct vm_area_struct
63828 * the corresponding VMA in the parent process is attached to.
63829 * Returns 0 on success, non-zero on failure.
63830 */
63831 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
63832 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
63833 {
63834 struct anon_vma_chain *avc;
63835 struct anon_vma *anon_vma;
63836 diff -urNp linux-2.6.39.4/mm/shmem.c linux-2.6.39.4/mm/shmem.c
63837 --- linux-2.6.39.4/mm/shmem.c 2011-06-03 00:04:14.000000000 -0400
63838 +++ linux-2.6.39.4/mm/shmem.c 2011-08-05 19:44:37.000000000 -0400
63839 @@ -31,7 +31,7 @@
63840 #include <linux/percpu_counter.h>
63841 #include <linux/swap.h>
63842
63843 -static struct vfsmount *shm_mnt;
63844 +struct vfsmount *shm_mnt;
63845
63846 #ifdef CONFIG_SHMEM
63847 /*
63848 @@ -1087,6 +1087,8 @@ static int shmem_writepage(struct page *
63849 goto unlock;
63850 }
63851 entry = shmem_swp_entry(info, index, NULL);
63852 + if (!entry)
63853 + goto unlock;
63854 if (entry->val) {
63855 /*
63856 * The more uptodate page coming down from a stacked
63857 @@ -1158,6 +1160,8 @@ static struct page *shmem_swapin(swp_ent
63858 struct vm_area_struct pvma;
63859 struct page *page;
63860
63861 + pax_track_stack();
63862 +
63863 spol = mpol_cond_copy(&mpol,
63864 mpol_shared_policy_lookup(&info->policy, idx));
63865
63866 @@ -2014,7 +2018,7 @@ static int shmem_symlink(struct inode *d
63867
63868 info = SHMEM_I(inode);
63869 inode->i_size = len-1;
63870 - if (len <= (char *)inode - (char *)info) {
63871 + if (len <= (char *)inode - (char *)info && len <= 64) {
63872 /* do it inline */
63873 memcpy(info, symname, len);
63874 inode->i_op = &shmem_symlink_inline_operations;
63875 @@ -2362,8 +2366,7 @@ int shmem_fill_super(struct super_block
63876 int err = -ENOMEM;
63877
63878 /* Round up to L1_CACHE_BYTES to resist false sharing */
63879 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
63880 - L1_CACHE_BYTES), GFP_KERNEL);
63881 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
63882 if (!sbinfo)
63883 return -ENOMEM;
63884
63885 diff -urNp linux-2.6.39.4/mm/slab.c linux-2.6.39.4/mm/slab.c
63886 --- linux-2.6.39.4/mm/slab.c 2011-05-19 00:06:34.000000000 -0400
63887 +++ linux-2.6.39.4/mm/slab.c 2011-08-05 19:44:37.000000000 -0400
63888 @@ -150,7 +150,7 @@
63889
63890 /* Legal flag mask for kmem_cache_create(). */
63891 #if DEBUG
63892 -# define CREATE_MASK (SLAB_RED_ZONE | \
63893 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
63894 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
63895 SLAB_CACHE_DMA | \
63896 SLAB_STORE_USER | \
63897 @@ -158,7 +158,7 @@
63898 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
63899 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
63900 #else
63901 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
63902 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
63903 SLAB_CACHE_DMA | \
63904 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
63905 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
63906 @@ -287,7 +287,7 @@ struct kmem_list3 {
63907 * Need this for bootstrapping a per node allocator.
63908 */
63909 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
63910 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
63911 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
63912 #define CACHE_CACHE 0
63913 #define SIZE_AC MAX_NUMNODES
63914 #define SIZE_L3 (2 * MAX_NUMNODES)
63915 @@ -388,10 +388,10 @@ static void kmem_list3_init(struct kmem_
63916 if ((x)->max_freeable < i) \
63917 (x)->max_freeable = i; \
63918 } while (0)
63919 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
63920 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
63921 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
63922 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
63923 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
63924 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
63925 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
63926 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
63927 #else
63928 #define STATS_INC_ACTIVE(x) do { } while (0)
63929 #define STATS_DEC_ACTIVE(x) do { } while (0)
63930 @@ -537,7 +537,7 @@ static inline void *index_to_obj(struct
63931 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
63932 */
63933 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
63934 - const struct slab *slab, void *obj)
63935 + const struct slab *slab, const void *obj)
63936 {
63937 u32 offset = (obj - slab->s_mem);
63938 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
63939 @@ -563,7 +563,7 @@ struct cache_names {
63940 static struct cache_names __initdata cache_names[] = {
63941 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
63942 #include <linux/kmalloc_sizes.h>
63943 - {NULL,}
63944 + {NULL}
63945 #undef CACHE
63946 };
63947
63948 @@ -1529,7 +1529,7 @@ void __init kmem_cache_init(void)
63949 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
63950 sizes[INDEX_AC].cs_size,
63951 ARCH_KMALLOC_MINALIGN,
63952 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63953 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63954 NULL);
63955
63956 if (INDEX_AC != INDEX_L3) {
63957 @@ -1537,7 +1537,7 @@ void __init kmem_cache_init(void)
63958 kmem_cache_create(names[INDEX_L3].name,
63959 sizes[INDEX_L3].cs_size,
63960 ARCH_KMALLOC_MINALIGN,
63961 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63962 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63963 NULL);
63964 }
63965
63966 @@ -1555,7 +1555,7 @@ void __init kmem_cache_init(void)
63967 sizes->cs_cachep = kmem_cache_create(names->name,
63968 sizes->cs_size,
63969 ARCH_KMALLOC_MINALIGN,
63970 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63971 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63972 NULL);
63973 }
63974 #ifdef CONFIG_ZONE_DMA
63975 @@ -4270,10 +4270,10 @@ static int s_show(struct seq_file *m, vo
63976 }
63977 /* cpu stats */
63978 {
63979 - unsigned long allochit = atomic_read(&cachep->allochit);
63980 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
63981 - unsigned long freehit = atomic_read(&cachep->freehit);
63982 - unsigned long freemiss = atomic_read(&cachep->freemiss);
63983 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
63984 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
63985 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
63986 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
63987
63988 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
63989 allochit, allocmiss, freehit, freemiss);
63990 @@ -4530,15 +4530,66 @@ static const struct file_operations proc
63991
63992 static int __init slab_proc_init(void)
63993 {
63994 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
63995 + mode_t gr_mode = S_IRUGO;
63996 +
63997 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
63998 + gr_mode = S_IRUSR;
63999 +#endif
64000 +
64001 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
64002 #ifdef CONFIG_DEBUG_SLAB_LEAK
64003 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
64004 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
64005 #endif
64006 return 0;
64007 }
64008 module_init(slab_proc_init);
64009 #endif
64010
64011 +void check_object_size(const void *ptr, unsigned long n, bool to)
64012 +{
64013 +
64014 +#ifdef CONFIG_PAX_USERCOPY
64015 + struct page *page;
64016 + struct kmem_cache *cachep = NULL;
64017 + struct slab *slabp;
64018 + unsigned int objnr;
64019 + unsigned long offset;
64020 +
64021 + if (!n)
64022 + return;
64023 +
64024 + if (ZERO_OR_NULL_PTR(ptr))
64025 + goto report;
64026 +
64027 + if (!virt_addr_valid(ptr))
64028 + return;
64029 +
64030 + page = virt_to_head_page(ptr);
64031 +
64032 + if (!PageSlab(page)) {
64033 + if (object_is_on_stack(ptr, n) == -1)
64034 + goto report;
64035 + return;
64036 + }
64037 +
64038 + cachep = page_get_cache(page);
64039 + if (!(cachep->flags & SLAB_USERCOPY))
64040 + goto report;
64041 +
64042 + slabp = page_get_slab(page);
64043 + objnr = obj_to_index(cachep, slabp, ptr);
64044 + BUG_ON(objnr >= cachep->num);
64045 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
64046 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
64047 + return;
64048 +
64049 +report:
64050 + pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
64051 +#endif
64052 +
64053 +}
64054 +EXPORT_SYMBOL(check_object_size);
64055 +
64056 /**
64057 * ksize - get the actual amount of memory allocated for a given object
64058 * @objp: Pointer to the object
64059 diff -urNp linux-2.6.39.4/mm/slob.c linux-2.6.39.4/mm/slob.c
64060 --- linux-2.6.39.4/mm/slob.c 2011-05-19 00:06:34.000000000 -0400
64061 +++ linux-2.6.39.4/mm/slob.c 2011-08-05 19:44:37.000000000 -0400
64062 @@ -29,7 +29,7 @@
64063 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
64064 * alloc_pages() directly, allocating compound pages so the page order
64065 * does not have to be separately tracked, and also stores the exact
64066 - * allocation size in page->private so that it can be used to accurately
64067 + * allocation size in slob_page->size so that it can be used to accurately
64068 * provide ksize(). These objects are detected in kfree() because slob_page()
64069 * is false for them.
64070 *
64071 @@ -58,6 +58,7 @@
64072 */
64073
64074 #include <linux/kernel.h>
64075 +#include <linux/sched.h>
64076 #include <linux/slab.h>
64077 #include <linux/mm.h>
64078 #include <linux/swap.h> /* struct reclaim_state */
64079 @@ -102,7 +103,8 @@ struct slob_page {
64080 unsigned long flags; /* mandatory */
64081 atomic_t _count; /* mandatory */
64082 slobidx_t units; /* free units left in page */
64083 - unsigned long pad[2];
64084 + unsigned long pad[1];
64085 + unsigned long size; /* size when >=PAGE_SIZE */
64086 slob_t *free; /* first free slob_t in page */
64087 struct list_head list; /* linked list of free pages */
64088 };
64089 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
64090 */
64091 static inline int is_slob_page(struct slob_page *sp)
64092 {
64093 - return PageSlab((struct page *)sp);
64094 + return PageSlab((struct page *)sp) && !sp->size;
64095 }
64096
64097 static inline void set_slob_page(struct slob_page *sp)
64098 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
64099
64100 static inline struct slob_page *slob_page(const void *addr)
64101 {
64102 - return (struct slob_page *)virt_to_page(addr);
64103 + return (struct slob_page *)virt_to_head_page(addr);
64104 }
64105
64106 /*
64107 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
64108 /*
64109 * Return the size of a slob block.
64110 */
64111 -static slobidx_t slob_units(slob_t *s)
64112 +static slobidx_t slob_units(const slob_t *s)
64113 {
64114 if (s->units > 0)
64115 return s->units;
64116 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
64117 /*
64118 * Return the next free slob block pointer after this one.
64119 */
64120 -static slob_t *slob_next(slob_t *s)
64121 +static slob_t *slob_next(const slob_t *s)
64122 {
64123 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
64124 slobidx_t next;
64125 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
64126 /*
64127 * Returns true if s is the last free block in its page.
64128 */
64129 -static int slob_last(slob_t *s)
64130 +static int slob_last(const slob_t *s)
64131 {
64132 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
64133 }
64134 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
64135 if (!page)
64136 return NULL;
64137
64138 + set_slob_page(page);
64139 return page_address(page);
64140 }
64141
64142 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
64143 if (!b)
64144 return NULL;
64145 sp = slob_page(b);
64146 - set_slob_page(sp);
64147
64148 spin_lock_irqsave(&slob_lock, flags);
64149 sp->units = SLOB_UNITS(PAGE_SIZE);
64150 sp->free = b;
64151 + sp->size = 0;
64152 INIT_LIST_HEAD(&sp->list);
64153 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
64154 set_slob_page_free(sp, slob_list);
64155 @@ -476,10 +479,9 @@ out:
64156 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
64157 */
64158
64159 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
64160 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
64161 {
64162 - unsigned int *m;
64163 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64164 + slob_t *m;
64165 void *ret;
64166
64167 lockdep_trace_alloc(gfp);
64168 @@ -492,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t
64169
64170 if (!m)
64171 return NULL;
64172 - *m = size;
64173 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
64174 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
64175 + m[0].units = size;
64176 + m[1].units = align;
64177 ret = (void *)m + align;
64178
64179 trace_kmalloc_node(_RET_IP_, ret,
64180 @@ -504,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t
64181 gfp |= __GFP_COMP;
64182 ret = slob_new_pages(gfp, order, node);
64183 if (ret) {
64184 - struct page *page;
64185 - page = virt_to_page(ret);
64186 - page->private = size;
64187 + struct slob_page *sp;
64188 + sp = slob_page(ret);
64189 + sp->size = size;
64190 }
64191
64192 trace_kmalloc_node(_RET_IP_, ret,
64193 size, PAGE_SIZE << order, gfp, node);
64194 }
64195
64196 - kmemleak_alloc(ret, size, 1, gfp);
64197 + return ret;
64198 +}
64199 +
64200 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
64201 +{
64202 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64203 + void *ret = __kmalloc_node_align(size, gfp, node, align);
64204 +
64205 + if (!ZERO_OR_NULL_PTR(ret))
64206 + kmemleak_alloc(ret, size, 1, gfp);
64207 return ret;
64208 }
64209 EXPORT_SYMBOL(__kmalloc_node);
64210 @@ -531,13 +545,88 @@ void kfree(const void *block)
64211 sp = slob_page(block);
64212 if (is_slob_page(sp)) {
64213 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64214 - unsigned int *m = (unsigned int *)(block - align);
64215 - slob_free(m, *m + align);
64216 - } else
64217 + slob_t *m = (slob_t *)(block - align);
64218 + slob_free(m, m[0].units + align);
64219 + } else {
64220 + clear_slob_page(sp);
64221 + free_slob_page(sp);
64222 + sp->size = 0;
64223 put_page(&sp->page);
64224 + }
64225 }
64226 EXPORT_SYMBOL(kfree);
64227
64228 +void check_object_size(const void *ptr, unsigned long n, bool to)
64229 +{
64230 +
64231 +#ifdef CONFIG_PAX_USERCOPY
64232 + struct slob_page *sp;
64233 + const slob_t *free;
64234 + const void *base;
64235 + unsigned long flags;
64236 +
64237 + if (!n)
64238 + return;
64239 +
64240 + if (ZERO_OR_NULL_PTR(ptr))
64241 + goto report;
64242 +
64243 + if (!virt_addr_valid(ptr))
64244 + return;
64245 +
64246 + sp = slob_page(ptr);
64247 + if (!PageSlab((struct page*)sp)) {
64248 + if (object_is_on_stack(ptr, n) == -1)
64249 + goto report;
64250 + return;
64251 + }
64252 +
64253 + if (sp->size) {
64254 + base = page_address(&sp->page);
64255 + if (base <= ptr && n <= sp->size - (ptr - base))
64256 + return;
64257 + goto report;
64258 + }
64259 +
64260 + /* some tricky double walking to find the chunk */
64261 + spin_lock_irqsave(&slob_lock, flags);
64262 + base = (void *)((unsigned long)ptr & PAGE_MASK);
64263 + free = sp->free;
64264 +
64265 + while (!slob_last(free) && (void *)free <= ptr) {
64266 + base = free + slob_units(free);
64267 + free = slob_next(free);
64268 + }
64269 +
64270 + while (base < (void *)free) {
64271 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
64272 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
64273 + int offset;
64274 +
64275 + if (ptr < base + align)
64276 + break;
64277 +
64278 + offset = ptr - base - align;
64279 + if (offset >= m) {
64280 + base += size;
64281 + continue;
64282 + }
64283 +
64284 + if (n > m - offset)
64285 + break;
64286 +
64287 + spin_unlock_irqrestore(&slob_lock, flags);
64288 + return;
64289 + }
64290 +
64291 + spin_unlock_irqrestore(&slob_lock, flags);
64292 +report:
64293 + pax_report_usercopy(ptr, n, to, NULL);
64294 +#endif
64295 +
64296 +}
64297 +EXPORT_SYMBOL(check_object_size);
64298 +
64299 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
64300 size_t ksize(const void *block)
64301 {
64302 @@ -550,10 +639,10 @@ size_t ksize(const void *block)
64303 sp = slob_page(block);
64304 if (is_slob_page(sp)) {
64305 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64306 - unsigned int *m = (unsigned int *)(block - align);
64307 - return SLOB_UNITS(*m) * SLOB_UNIT;
64308 + slob_t *m = (slob_t *)(block - align);
64309 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
64310 } else
64311 - return sp->page.private;
64312 + return sp->size;
64313 }
64314 EXPORT_SYMBOL(ksize);
64315
64316 @@ -569,8 +658,13 @@ struct kmem_cache *kmem_cache_create(con
64317 {
64318 struct kmem_cache *c;
64319
64320 +#ifdef CONFIG_PAX_USERCOPY
64321 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
64322 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
64323 +#else
64324 c = slob_alloc(sizeof(struct kmem_cache),
64325 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
64326 +#endif
64327
64328 if (c) {
64329 c->name = name;
64330 @@ -608,17 +702,25 @@ void *kmem_cache_alloc_node(struct kmem_
64331 {
64332 void *b;
64333
64334 +#ifdef CONFIG_PAX_USERCOPY
64335 + b = __kmalloc_node_align(c->size, flags, node, c->align);
64336 +#else
64337 if (c->size < PAGE_SIZE) {
64338 b = slob_alloc(c->size, flags, c->align, node);
64339 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
64340 SLOB_UNITS(c->size) * SLOB_UNIT,
64341 flags, node);
64342 } else {
64343 + struct slob_page *sp;
64344 +
64345 b = slob_new_pages(flags, get_order(c->size), node);
64346 + sp = slob_page(b);
64347 + sp->size = c->size;
64348 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
64349 PAGE_SIZE << get_order(c->size),
64350 flags, node);
64351 }
64352 +#endif
64353
64354 if (c->ctor)
64355 c->ctor(b);
64356 @@ -630,10 +732,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
64357
64358 static void __kmem_cache_free(void *b, int size)
64359 {
64360 - if (size < PAGE_SIZE)
64361 + struct slob_page *sp = slob_page(b);
64362 +
64363 + if (is_slob_page(sp))
64364 slob_free(b, size);
64365 - else
64366 + else {
64367 + clear_slob_page(sp);
64368 + free_slob_page(sp);
64369 + sp->size = 0;
64370 slob_free_pages(b, get_order(size));
64371 + }
64372 }
64373
64374 static void kmem_rcu_free(struct rcu_head *head)
64375 @@ -646,17 +754,31 @@ static void kmem_rcu_free(struct rcu_hea
64376
64377 void kmem_cache_free(struct kmem_cache *c, void *b)
64378 {
64379 + int size = c->size;
64380 +
64381 +#ifdef CONFIG_PAX_USERCOPY
64382 + if (size + c->align < PAGE_SIZE) {
64383 + size += c->align;
64384 + b -= c->align;
64385 + }
64386 +#endif
64387 +
64388 kmemleak_free_recursive(b, c->flags);
64389 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
64390 struct slob_rcu *slob_rcu;
64391 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
64392 - slob_rcu->size = c->size;
64393 + slob_rcu = b + (size - sizeof(struct slob_rcu));
64394 + slob_rcu->size = size;
64395 call_rcu(&slob_rcu->head, kmem_rcu_free);
64396 } else {
64397 - __kmem_cache_free(b, c->size);
64398 + __kmem_cache_free(b, size);
64399 }
64400
64401 +#ifdef CONFIG_PAX_USERCOPY
64402 + trace_kfree(_RET_IP_, b);
64403 +#else
64404 trace_kmem_cache_free(_RET_IP_, b);
64405 +#endif
64406 +
64407 }
64408 EXPORT_SYMBOL(kmem_cache_free);
64409
64410 diff -urNp linux-2.6.39.4/mm/slub.c linux-2.6.39.4/mm/slub.c
64411 --- linux-2.6.39.4/mm/slub.c 2011-06-03 00:04:14.000000000 -0400
64412 +++ linux-2.6.39.4/mm/slub.c 2011-08-05 19:44:37.000000000 -0400
64413 @@ -431,7 +431,7 @@ static void print_track(const char *s, s
64414 if (!t->addr)
64415 return;
64416
64417 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
64418 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
64419 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
64420 }
64421
64422 @@ -2183,6 +2183,8 @@ void kmem_cache_free(struct kmem_cache *
64423
64424 page = virt_to_head_page(x);
64425
64426 + BUG_ON(!PageSlab(page));
64427 +
64428 slab_free(s, page, x, _RET_IP_);
64429
64430 trace_kmem_cache_free(_RET_IP_, x);
64431 @@ -2216,7 +2218,7 @@ static int slub_min_objects;
64432 * Merge control. If this is set then no merging of slab caches will occur.
64433 * (Could be removed. This was introduced to pacify the merge skeptics.)
64434 */
64435 -static int slub_nomerge;
64436 +static int slub_nomerge = 1;
64437
64438 /*
64439 * Calculate the order of allocation given an slab object size.
64440 @@ -2644,7 +2646,7 @@ static int kmem_cache_open(struct kmem_c
64441 * list to avoid pounding the page allocator excessively.
64442 */
64443 set_min_partial(s, ilog2(s->size));
64444 - s->refcount = 1;
64445 + atomic_set(&s->refcount, 1);
64446 #ifdef CONFIG_NUMA
64447 s->remote_node_defrag_ratio = 1000;
64448 #endif
64449 @@ -2750,8 +2752,7 @@ static inline int kmem_cache_close(struc
64450 void kmem_cache_destroy(struct kmem_cache *s)
64451 {
64452 down_write(&slub_lock);
64453 - s->refcount--;
64454 - if (!s->refcount) {
64455 + if (atomic_dec_and_test(&s->refcount)) {
64456 list_del(&s->list);
64457 if (kmem_cache_close(s)) {
64458 printk(KERN_ERR "SLUB %s: %s called for cache that "
64459 @@ -2961,6 +2962,46 @@ void *__kmalloc_node(size_t size, gfp_t
64460 EXPORT_SYMBOL(__kmalloc_node);
64461 #endif
64462
64463 +void check_object_size(const void *ptr, unsigned long n, bool to)
64464 +{
64465 +
64466 +#ifdef CONFIG_PAX_USERCOPY
64467 + struct page *page;
64468 + struct kmem_cache *s = NULL;
64469 + unsigned long offset;
64470 +
64471 + if (!n)
64472 + return;
64473 +
64474 + if (ZERO_OR_NULL_PTR(ptr))
64475 + goto report;
64476 +
64477 + if (!virt_addr_valid(ptr))
64478 + return;
64479 +
64480 + page = virt_to_head_page(ptr);
64481 +
64482 + if (!PageSlab(page)) {
64483 + if (object_is_on_stack(ptr, n) == -1)
64484 + goto report;
64485 + return;
64486 + }
64487 +
64488 + s = page->slab;
64489 + if (!(s->flags & SLAB_USERCOPY))
64490 + goto report;
64491 +
64492 + offset = (ptr - page_address(page)) % s->size;
64493 + if (offset <= s->objsize && n <= s->objsize - offset)
64494 + return;
64495 +
64496 +report:
64497 + pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
64498 +#endif
64499 +
64500 +}
64501 +EXPORT_SYMBOL(check_object_size);
64502 +
64503 size_t ksize(const void *object)
64504 {
64505 struct page *page;
64506 @@ -3205,7 +3246,7 @@ static void __init kmem_cache_bootstrap_
64507 int node;
64508
64509 list_add(&s->list, &slab_caches);
64510 - s->refcount = -1;
64511 + atomic_set(&s->refcount, -1);
64512
64513 for_each_node_state(node, N_NORMAL_MEMORY) {
64514 struct kmem_cache_node *n = get_node(s, node);
64515 @@ -3322,17 +3363,17 @@ void __init kmem_cache_init(void)
64516
64517 /* Caches that are not of the two-to-the-power-of size */
64518 if (KMALLOC_MIN_SIZE <= 32) {
64519 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
64520 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
64521 caches++;
64522 }
64523
64524 if (KMALLOC_MIN_SIZE <= 64) {
64525 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
64526 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
64527 caches++;
64528 }
64529
64530 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
64531 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
64532 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
64533 caches++;
64534 }
64535
64536 @@ -3400,7 +3441,7 @@ static int slab_unmergeable(struct kmem_
64537 /*
64538 * We may have set a slab to be unmergeable during bootstrap.
64539 */
64540 - if (s->refcount < 0)
64541 + if (atomic_read(&s->refcount) < 0)
64542 return 1;
64543
64544 return 0;
64545 @@ -3459,7 +3500,7 @@ struct kmem_cache *kmem_cache_create(con
64546 down_write(&slub_lock);
64547 s = find_mergeable(size, align, flags, name, ctor);
64548 if (s) {
64549 - s->refcount++;
64550 + atomic_inc(&s->refcount);
64551 /*
64552 * Adjust the object sizes so that we clear
64553 * the complete object on kzalloc.
64554 @@ -3468,7 +3509,7 @@ struct kmem_cache *kmem_cache_create(con
64555 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
64556
64557 if (sysfs_slab_alias(s, name)) {
64558 - s->refcount--;
64559 + atomic_dec(&s->refcount);
64560 goto err;
64561 }
64562 up_write(&slub_lock);
64563 @@ -4201,7 +4242,7 @@ SLAB_ATTR_RO(ctor);
64564
64565 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
64566 {
64567 - return sprintf(buf, "%d\n", s->refcount - 1);
64568 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
64569 }
64570 SLAB_ATTR_RO(aliases);
64571
64572 @@ -4945,7 +4986,13 @@ static const struct file_operations proc
64573
64574 static int __init slab_proc_init(void)
64575 {
64576 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
64577 + mode_t gr_mode = S_IRUGO;
64578 +
64579 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
64580 + gr_mode = S_IRUSR;
64581 +#endif
64582 +
64583 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
64584 return 0;
64585 }
64586 module_init(slab_proc_init);
64587 diff -urNp linux-2.6.39.4/mm/swap.c linux-2.6.39.4/mm/swap.c
64588 --- linux-2.6.39.4/mm/swap.c 2011-05-19 00:06:34.000000000 -0400
64589 +++ linux-2.6.39.4/mm/swap.c 2011-08-05 19:44:37.000000000 -0400
64590 @@ -31,6 +31,7 @@
64591 #include <linux/backing-dev.h>
64592 #include <linux/memcontrol.h>
64593 #include <linux/gfp.h>
64594 +#include <linux/hugetlb.h>
64595
64596 #include "internal.h"
64597
64598 @@ -71,6 +72,8 @@ static void __put_compound_page(struct p
64599
64600 __page_cache_release(page);
64601 dtor = get_compound_page_dtor(page);
64602 + if (!PageHuge(page))
64603 + BUG_ON(dtor != free_compound_page);
64604 (*dtor)(page);
64605 }
64606
64607 diff -urNp linux-2.6.39.4/mm/swapfile.c linux-2.6.39.4/mm/swapfile.c
64608 --- linux-2.6.39.4/mm/swapfile.c 2011-05-19 00:06:34.000000000 -0400
64609 +++ linux-2.6.39.4/mm/swapfile.c 2011-08-05 19:44:37.000000000 -0400
64610 @@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
64611
64612 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
64613 /* Activity counter to indicate that a swapon or swapoff has occurred */
64614 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
64615 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
64616
64617 static inline unsigned char swap_count(unsigned char ent)
64618 {
64619 @@ -1669,7 +1669,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
64620 }
64621 filp_close(swap_file, NULL);
64622 err = 0;
64623 - atomic_inc(&proc_poll_event);
64624 + atomic_inc_unchecked(&proc_poll_event);
64625 wake_up_interruptible(&proc_poll_wait);
64626
64627 out_dput:
64628 @@ -1690,8 +1690,8 @@ static unsigned swaps_poll(struct file *
64629
64630 poll_wait(file, &proc_poll_wait, wait);
64631
64632 - if (s->event != atomic_read(&proc_poll_event)) {
64633 - s->event = atomic_read(&proc_poll_event);
64634 + if (s->event != atomic_read_unchecked(&proc_poll_event)) {
64635 + s->event = atomic_read_unchecked(&proc_poll_event);
64636 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
64637 }
64638
64639 @@ -1797,7 +1797,7 @@ static int swaps_open(struct inode *inod
64640 }
64641
64642 s->seq.private = s;
64643 - s->event = atomic_read(&proc_poll_event);
64644 + s->event = atomic_read_unchecked(&proc_poll_event);
64645 return ret;
64646 }
64647
64648 @@ -2131,7 +2131,7 @@ SYSCALL_DEFINE2(swapon, const char __use
64649 (p->flags & SWP_DISCARDABLE) ? "D" : "");
64650
64651 mutex_unlock(&swapon_mutex);
64652 - atomic_inc(&proc_poll_event);
64653 + atomic_inc_unchecked(&proc_poll_event);
64654 wake_up_interruptible(&proc_poll_wait);
64655
64656 if (S_ISREG(inode->i_mode))
64657 diff -urNp linux-2.6.39.4/mm/util.c linux-2.6.39.4/mm/util.c
64658 --- linux-2.6.39.4/mm/util.c 2011-05-19 00:06:34.000000000 -0400
64659 +++ linux-2.6.39.4/mm/util.c 2011-08-05 19:44:37.000000000 -0400
64660 @@ -112,6 +112,7 @@ EXPORT_SYMBOL(memdup_user);
64661 * allocated buffer. Use this if you don't want to free the buffer immediately
64662 * like, for example, with RCU.
64663 */
64664 +#undef __krealloc
64665 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
64666 {
64667 void *ret;
64668 @@ -145,6 +146,7 @@ EXPORT_SYMBOL(__krealloc);
64669 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
64670 * %NULL pointer, the object pointed to is freed.
64671 */
64672 +#undef krealloc
64673 void *krealloc(const void *p, size_t new_size, gfp_t flags)
64674 {
64675 void *ret;
64676 @@ -219,6 +221,12 @@ EXPORT_SYMBOL(strndup_user);
64677 void arch_pick_mmap_layout(struct mm_struct *mm)
64678 {
64679 mm->mmap_base = TASK_UNMAPPED_BASE;
64680 +
64681 +#ifdef CONFIG_PAX_RANDMMAP
64682 + if (mm->pax_flags & MF_PAX_RANDMMAP)
64683 + mm->mmap_base += mm->delta_mmap;
64684 +#endif
64685 +
64686 mm->get_unmapped_area = arch_get_unmapped_area;
64687 mm->unmap_area = arch_unmap_area;
64688 }
64689 diff -urNp linux-2.6.39.4/mm/vmalloc.c linux-2.6.39.4/mm/vmalloc.c
64690 --- linux-2.6.39.4/mm/vmalloc.c 2011-05-19 00:06:34.000000000 -0400
64691 +++ linux-2.6.39.4/mm/vmalloc.c 2011-08-05 19:44:37.000000000 -0400
64692 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
64693
64694 pte = pte_offset_kernel(pmd, addr);
64695 do {
64696 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
64697 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64698 +
64699 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64700 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
64701 + BUG_ON(!pte_exec(*pte));
64702 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
64703 + continue;
64704 + }
64705 +#endif
64706 +
64707 + {
64708 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
64709 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64710 + }
64711 } while (pte++, addr += PAGE_SIZE, addr != end);
64712 }
64713
64714 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
64715 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
64716 {
64717 pte_t *pte;
64718 + int ret = -ENOMEM;
64719
64720 /*
64721 * nr is a running index into the array which helps higher level
64722 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
64723 pte = pte_alloc_kernel(pmd, addr);
64724 if (!pte)
64725 return -ENOMEM;
64726 +
64727 + pax_open_kernel();
64728 do {
64729 struct page *page = pages[*nr];
64730
64731 - if (WARN_ON(!pte_none(*pte)))
64732 - return -EBUSY;
64733 - if (WARN_ON(!page))
64734 - return -ENOMEM;
64735 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64736 + if (pgprot_val(prot) & _PAGE_NX)
64737 +#endif
64738 +
64739 + if (WARN_ON(!pte_none(*pte))) {
64740 + ret = -EBUSY;
64741 + goto out;
64742 + }
64743 + if (WARN_ON(!page)) {
64744 + ret = -ENOMEM;
64745 + goto out;
64746 + }
64747 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
64748 (*nr)++;
64749 } while (pte++, addr += PAGE_SIZE, addr != end);
64750 - return 0;
64751 + ret = 0;
64752 +out:
64753 + pax_close_kernel();
64754 + return ret;
64755 }
64756
64757 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
64758 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
64759 * and fall back on vmalloc() if that fails. Others
64760 * just put it in the vmalloc space.
64761 */
64762 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
64763 +#ifdef CONFIG_MODULES
64764 +#ifdef MODULES_VADDR
64765 unsigned long addr = (unsigned long)x;
64766 if (addr >= MODULES_VADDR && addr < MODULES_END)
64767 return 1;
64768 #endif
64769 +
64770 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64771 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
64772 + return 1;
64773 +#endif
64774 +
64775 +#endif
64776 +
64777 return is_vmalloc_addr(x);
64778 }
64779
64780 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
64781
64782 if (!pgd_none(*pgd)) {
64783 pud_t *pud = pud_offset(pgd, addr);
64784 +#ifdef CONFIG_X86
64785 + if (!pud_large(*pud))
64786 +#endif
64787 if (!pud_none(*pud)) {
64788 pmd_t *pmd = pmd_offset(pud, addr);
64789 +#ifdef CONFIG_X86
64790 + if (!pmd_large(*pmd))
64791 +#endif
64792 if (!pmd_none(*pmd)) {
64793 pte_t *ptep, pte;
64794
64795 @@ -1296,6 +1336,16 @@ static struct vm_struct *__get_vm_area_n
64796 struct vm_struct *area;
64797
64798 BUG_ON(in_interrupt());
64799 +
64800 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64801 + if (flags & VM_KERNEXEC) {
64802 + if (start != VMALLOC_START || end != VMALLOC_END)
64803 + return NULL;
64804 + start = (unsigned long)MODULES_EXEC_VADDR;
64805 + end = (unsigned long)MODULES_EXEC_END;
64806 + }
64807 +#endif
64808 +
64809 if (flags & VM_IOREMAP) {
64810 int bit = fls(size);
64811
64812 @@ -1514,6 +1564,11 @@ void *vmap(struct page **pages, unsigned
64813 if (count > totalram_pages)
64814 return NULL;
64815
64816 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64817 + if (!(pgprot_val(prot) & _PAGE_NX))
64818 + flags |= VM_KERNEXEC;
64819 +#endif
64820 +
64821 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
64822 __builtin_return_address(0));
64823 if (!area)
64824 @@ -1610,6 +1665,13 @@ void *__vmalloc_node_range(unsigned long
64825 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
64826 return NULL;
64827
64828 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64829 + if (!(pgprot_val(prot) & _PAGE_NX))
64830 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
64831 + node, gfp_mask, caller);
64832 + else
64833 +#endif
64834 +
64835 area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
64836 gfp_mask, caller);
64837
64838 @@ -1649,6 +1711,7 @@ static void *__vmalloc_node(unsigned lon
64839 gfp_mask, prot, node, caller);
64840 }
64841
64842 +#undef __vmalloc
64843 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
64844 {
64845 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
64846 @@ -1672,6 +1735,7 @@ static inline void *__vmalloc_node_flags
64847 * For tight control over page level allocator and protection flags
64848 * use __vmalloc() instead.
64849 */
64850 +#undef vmalloc
64851 void *vmalloc(unsigned long size)
64852 {
64853 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
64854 @@ -1688,6 +1752,7 @@ EXPORT_SYMBOL(vmalloc);
64855 * For tight control over page level allocator and protection flags
64856 * use __vmalloc() instead.
64857 */
64858 +#undef vzalloc
64859 void *vzalloc(unsigned long size)
64860 {
64861 return __vmalloc_node_flags(size, -1,
64862 @@ -1702,6 +1767,7 @@ EXPORT_SYMBOL(vzalloc);
64863 * The resulting memory area is zeroed so it can be mapped to userspace
64864 * without leaking data.
64865 */
64866 +#undef vmalloc_user
64867 void *vmalloc_user(unsigned long size)
64868 {
64869 struct vm_struct *area;
64870 @@ -1729,6 +1795,7 @@ EXPORT_SYMBOL(vmalloc_user);
64871 * For tight control over page level allocator and protection flags
64872 * use __vmalloc() instead.
64873 */
64874 +#undef vmalloc_node
64875 void *vmalloc_node(unsigned long size, int node)
64876 {
64877 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
64878 @@ -1748,6 +1815,7 @@ EXPORT_SYMBOL(vmalloc_node);
64879 * For tight control over page level allocator and protection flags
64880 * use __vmalloc_node() instead.
64881 */
64882 +#undef vzalloc_node
64883 void *vzalloc_node(unsigned long size, int node)
64884 {
64885 return __vmalloc_node_flags(size, node,
64886 @@ -1770,10 +1838,10 @@ EXPORT_SYMBOL(vzalloc_node);
64887 * For tight control over page level allocator and protection flags
64888 * use __vmalloc() instead.
64889 */
64890 -
64891 +#undef vmalloc_exec
64892 void *vmalloc_exec(unsigned long size)
64893 {
64894 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
64895 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
64896 -1, __builtin_return_address(0));
64897 }
64898
64899 @@ -1792,6 +1860,7 @@ void *vmalloc_exec(unsigned long size)
64900 * Allocate enough 32bit PA addressable pages to cover @size from the
64901 * page level allocator and map them into contiguous kernel virtual space.
64902 */
64903 +#undef vmalloc_32
64904 void *vmalloc_32(unsigned long size)
64905 {
64906 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
64907 @@ -1806,6 +1875,7 @@ EXPORT_SYMBOL(vmalloc_32);
64908 * The resulting memory area is 32bit addressable and zeroed so it can be
64909 * mapped to userspace without leaking data.
64910 */
64911 +#undef vmalloc_32_user
64912 void *vmalloc_32_user(unsigned long size)
64913 {
64914 struct vm_struct *area;
64915 @@ -2068,6 +2138,8 @@ int remap_vmalloc_range(struct vm_area_s
64916 unsigned long uaddr = vma->vm_start;
64917 unsigned long usize = vma->vm_end - vma->vm_start;
64918
64919 + BUG_ON(vma->vm_mirror);
64920 +
64921 if ((PAGE_SIZE-1) & (unsigned long)addr)
64922 return -EINVAL;
64923
64924 diff -urNp linux-2.6.39.4/mm/vmstat.c linux-2.6.39.4/mm/vmstat.c
64925 --- linux-2.6.39.4/mm/vmstat.c 2011-05-19 00:06:34.000000000 -0400
64926 +++ linux-2.6.39.4/mm/vmstat.c 2011-08-05 19:44:37.000000000 -0400
64927 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
64928 *
64929 * vm_stat contains the global counters
64930 */
64931 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64932 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64933 EXPORT_SYMBOL(vm_stat);
64934
64935 #ifdef CONFIG_SMP
64936 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
64937 v = p->vm_stat_diff[i];
64938 p->vm_stat_diff[i] = 0;
64939 local_irq_restore(flags);
64940 - atomic_long_add(v, &zone->vm_stat[i]);
64941 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
64942 global_diff[i] += v;
64943 #ifdef CONFIG_NUMA
64944 /* 3 seconds idle till flush */
64945 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
64946
64947 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
64948 if (global_diff[i])
64949 - atomic_long_add(global_diff[i], &vm_stat[i]);
64950 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
64951 }
64952
64953 #endif
64954 @@ -1205,10 +1205,20 @@ static int __init setup_vmstat(void)
64955 start_cpu_timer(cpu);
64956 #endif
64957 #ifdef CONFIG_PROC_FS
64958 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
64959 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
64960 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
64961 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
64962 + {
64963 + mode_t gr_mode = S_IRUGO;
64964 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
64965 + gr_mode = S_IRUSR;
64966 +#endif
64967 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
64968 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
64969 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
64970 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
64971 +#else
64972 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
64973 +#endif
64974 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
64975 + }
64976 #endif
64977 return 0;
64978 }
64979 diff -urNp linux-2.6.39.4/net/8021q/vlan.c linux-2.6.39.4/net/8021q/vlan.c
64980 --- linux-2.6.39.4/net/8021q/vlan.c 2011-05-19 00:06:34.000000000 -0400
64981 +++ linux-2.6.39.4/net/8021q/vlan.c 2011-08-05 19:44:37.000000000 -0400
64982 @@ -592,8 +592,7 @@ static int vlan_ioctl_handler(struct net
64983 err = -EPERM;
64984 if (!capable(CAP_NET_ADMIN))
64985 break;
64986 - if ((args.u.name_type >= 0) &&
64987 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
64988 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
64989 struct vlan_net *vn;
64990
64991 vn = net_generic(net, vlan_net_id);
64992 diff -urNp linux-2.6.39.4/net/atm/atm_misc.c linux-2.6.39.4/net/atm/atm_misc.c
64993 --- linux-2.6.39.4/net/atm/atm_misc.c 2011-05-19 00:06:34.000000000 -0400
64994 +++ linux-2.6.39.4/net/atm/atm_misc.c 2011-08-05 19:44:37.000000000 -0400
64995 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
64996 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
64997 return 1;
64998 atm_return(vcc, truesize);
64999 - atomic_inc(&vcc->stats->rx_drop);
65000 + atomic_inc_unchecked(&vcc->stats->rx_drop);
65001 return 0;
65002 }
65003 EXPORT_SYMBOL(atm_charge);
65004 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
65005 }
65006 }
65007 atm_return(vcc, guess);
65008 - atomic_inc(&vcc->stats->rx_drop);
65009 + atomic_inc_unchecked(&vcc->stats->rx_drop);
65010 return NULL;
65011 }
65012 EXPORT_SYMBOL(atm_alloc_charge);
65013 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
65014
65015 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
65016 {
65017 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
65018 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
65019 __SONET_ITEMS
65020 #undef __HANDLE_ITEM
65021 }
65022 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
65023
65024 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
65025 {
65026 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
65027 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
65028 __SONET_ITEMS
65029 #undef __HANDLE_ITEM
65030 }
65031 diff -urNp linux-2.6.39.4/net/atm/lec.h linux-2.6.39.4/net/atm/lec.h
65032 --- linux-2.6.39.4/net/atm/lec.h 2011-05-19 00:06:34.000000000 -0400
65033 +++ linux-2.6.39.4/net/atm/lec.h 2011-08-05 20:34:06.000000000 -0400
65034 @@ -48,7 +48,7 @@ struct lane2_ops {
65035 const u8 *tlvs, u32 sizeoftlvs);
65036 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
65037 const u8 *tlvs, u32 sizeoftlvs);
65038 -};
65039 +} __no_const;
65040
65041 /*
65042 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
65043 diff -urNp linux-2.6.39.4/net/atm/mpc.h linux-2.6.39.4/net/atm/mpc.h
65044 --- linux-2.6.39.4/net/atm/mpc.h 2011-05-19 00:06:34.000000000 -0400
65045 +++ linux-2.6.39.4/net/atm/mpc.h 2011-08-05 20:34:06.000000000 -0400
65046 @@ -33,7 +33,7 @@ struct mpoa_client {
65047 struct mpc_parameters parameters; /* parameters for this client */
65048
65049 const struct net_device_ops *old_ops;
65050 - struct net_device_ops new_ops;
65051 + net_device_ops_no_const new_ops;
65052 };
65053
65054
65055 diff -urNp linux-2.6.39.4/net/atm/mpoa_caches.c linux-2.6.39.4/net/atm/mpoa_caches.c
65056 --- linux-2.6.39.4/net/atm/mpoa_caches.c 2011-05-19 00:06:34.000000000 -0400
65057 +++ linux-2.6.39.4/net/atm/mpoa_caches.c 2011-08-05 19:44:37.000000000 -0400
65058 @@ -255,6 +255,8 @@ static void check_resolving_entries(stru
65059 struct timeval now;
65060 struct k_message msg;
65061
65062 + pax_track_stack();
65063 +
65064 do_gettimeofday(&now);
65065
65066 read_lock_bh(&client->ingress_lock);
65067 diff -urNp linux-2.6.39.4/net/atm/proc.c linux-2.6.39.4/net/atm/proc.c
65068 --- linux-2.6.39.4/net/atm/proc.c 2011-05-19 00:06:34.000000000 -0400
65069 +++ linux-2.6.39.4/net/atm/proc.c 2011-08-05 19:44:37.000000000 -0400
65070 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
65071 const struct k_atm_aal_stats *stats)
65072 {
65073 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
65074 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
65075 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
65076 - atomic_read(&stats->rx_drop));
65077 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
65078 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
65079 + atomic_read_unchecked(&stats->rx_drop));
65080 }
65081
65082 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
65083 @@ -191,7 +191,12 @@ static void vcc_info(struct seq_file *se
65084 {
65085 struct sock *sk = sk_atm(vcc);
65086
65087 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65088 + seq_printf(seq, "%p ", NULL);
65089 +#else
65090 seq_printf(seq, "%p ", vcc);
65091 +#endif
65092 +
65093 if (!vcc->dev)
65094 seq_printf(seq, "Unassigned ");
65095 else
65096 @@ -218,7 +223,11 @@ static void svc_info(struct seq_file *se
65097 {
65098 if (!vcc->dev)
65099 seq_printf(seq, sizeof(void *) == 4 ?
65100 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65101 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
65102 +#else
65103 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
65104 +#endif
65105 else
65106 seq_printf(seq, "%3d %3d %5d ",
65107 vcc->dev->number, vcc->vpi, vcc->vci);
65108 diff -urNp linux-2.6.39.4/net/atm/resources.c linux-2.6.39.4/net/atm/resources.c
65109 --- linux-2.6.39.4/net/atm/resources.c 2011-05-19 00:06:34.000000000 -0400
65110 +++ linux-2.6.39.4/net/atm/resources.c 2011-08-05 19:44:37.000000000 -0400
65111 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
65112 static void copy_aal_stats(struct k_atm_aal_stats *from,
65113 struct atm_aal_stats *to)
65114 {
65115 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
65116 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
65117 __AAL_STAT_ITEMS
65118 #undef __HANDLE_ITEM
65119 }
65120 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
65121 static void subtract_aal_stats(struct k_atm_aal_stats *from,
65122 struct atm_aal_stats *to)
65123 {
65124 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
65125 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
65126 __AAL_STAT_ITEMS
65127 #undef __HANDLE_ITEM
65128 }
65129 diff -urNp linux-2.6.39.4/net/batman-adv/hard-interface.c linux-2.6.39.4/net/batman-adv/hard-interface.c
65130 --- linux-2.6.39.4/net/batman-adv/hard-interface.c 2011-05-19 00:06:34.000000000 -0400
65131 +++ linux-2.6.39.4/net/batman-adv/hard-interface.c 2011-08-05 19:44:37.000000000 -0400
65132 @@ -339,8 +339,8 @@ int hardif_enable_interface(struct hard_
65133 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
65134 dev_add_pack(&hard_iface->batman_adv_ptype);
65135
65136 - atomic_set(&hard_iface->seqno, 1);
65137 - atomic_set(&hard_iface->frag_seqno, 1);
65138 + atomic_set_unchecked(&hard_iface->seqno, 1);
65139 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
65140 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
65141 hard_iface->net_dev->name);
65142
65143 diff -urNp linux-2.6.39.4/net/batman-adv/routing.c linux-2.6.39.4/net/batman-adv/routing.c
65144 --- linux-2.6.39.4/net/batman-adv/routing.c 2011-05-19 00:06:34.000000000 -0400
65145 +++ linux-2.6.39.4/net/batman-adv/routing.c 2011-08-05 19:44:37.000000000 -0400
65146 @@ -625,7 +625,7 @@ void receive_bat_packet(struct ethhdr *e
65147 return;
65148
65149 /* could be changed by schedule_own_packet() */
65150 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
65151 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
65152
65153 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
65154
65155 diff -urNp linux-2.6.39.4/net/batman-adv/send.c linux-2.6.39.4/net/batman-adv/send.c
65156 --- linux-2.6.39.4/net/batman-adv/send.c 2011-05-19 00:06:34.000000000 -0400
65157 +++ linux-2.6.39.4/net/batman-adv/send.c 2011-08-05 19:44:37.000000000 -0400
65158 @@ -277,7 +277,7 @@ void schedule_own_packet(struct hard_ifa
65159
65160 /* change sequence number to network order */
65161 batman_packet->seqno =
65162 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
65163 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
65164
65165 if (vis_server == VIS_TYPE_SERVER_SYNC)
65166 batman_packet->flags |= VIS_SERVER;
65167 @@ -291,7 +291,7 @@ void schedule_own_packet(struct hard_ifa
65168 else
65169 batman_packet->gw_flags = 0;
65170
65171 - atomic_inc(&hard_iface->seqno);
65172 + atomic_inc_unchecked(&hard_iface->seqno);
65173
65174 slide_own_bcast_window(hard_iface);
65175 send_time = own_send_time(bat_priv);
65176 diff -urNp linux-2.6.39.4/net/batman-adv/soft-interface.c linux-2.6.39.4/net/batman-adv/soft-interface.c
65177 --- linux-2.6.39.4/net/batman-adv/soft-interface.c 2011-05-19 00:06:34.000000000 -0400
65178 +++ linux-2.6.39.4/net/batman-adv/soft-interface.c 2011-08-05 19:44:37.000000000 -0400
65179 @@ -386,7 +386,7 @@ int interface_tx(struct sk_buff *skb, st
65180
65181 /* set broadcast sequence number */
65182 bcast_packet->seqno =
65183 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
65184 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
65185
65186 add_bcast_packet_to_list(bat_priv, skb);
65187
65188 @@ -579,7 +579,7 @@ struct net_device *softif_create(char *n
65189 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
65190
65191 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
65192 - atomic_set(&bat_priv->bcast_seqno, 1);
65193 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
65194 atomic_set(&bat_priv->hna_local_changed, 0);
65195
65196 bat_priv->primary_if = NULL;
65197 diff -urNp linux-2.6.39.4/net/batman-adv/types.h linux-2.6.39.4/net/batman-adv/types.h
65198 --- linux-2.6.39.4/net/batman-adv/types.h 2011-05-19 00:06:34.000000000 -0400
65199 +++ linux-2.6.39.4/net/batman-adv/types.h 2011-08-05 19:44:37.000000000 -0400
65200 @@ -38,8 +38,8 @@ struct hard_iface {
65201 int16_t if_num;
65202 char if_status;
65203 struct net_device *net_dev;
65204 - atomic_t seqno;
65205 - atomic_t frag_seqno;
65206 + atomic_unchecked_t seqno;
65207 + atomic_unchecked_t frag_seqno;
65208 unsigned char *packet_buff;
65209 int packet_len;
65210 struct kobject *hardif_obj;
65211 @@ -141,7 +141,7 @@ struct bat_priv {
65212 atomic_t orig_interval; /* uint */
65213 atomic_t hop_penalty; /* uint */
65214 atomic_t log_level; /* uint */
65215 - atomic_t bcast_seqno;
65216 + atomic_unchecked_t bcast_seqno;
65217 atomic_t bcast_queue_left;
65218 atomic_t batman_queue_left;
65219 char num_ifaces;
65220 diff -urNp linux-2.6.39.4/net/batman-adv/unicast.c linux-2.6.39.4/net/batman-adv/unicast.c
65221 --- linux-2.6.39.4/net/batman-adv/unicast.c 2011-05-19 00:06:34.000000000 -0400
65222 +++ linux-2.6.39.4/net/batman-adv/unicast.c 2011-08-05 19:44:37.000000000 -0400
65223 @@ -263,7 +263,7 @@ int frag_send_skb(struct sk_buff *skb, s
65224 frag1->flags = UNI_FRAG_HEAD | large_tail;
65225 frag2->flags = large_tail;
65226
65227 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
65228 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
65229 frag1->seqno = htons(seqno - 1);
65230 frag2->seqno = htons(seqno);
65231
65232 diff -urNp linux-2.6.39.4/net/bluetooth/l2cap_core.c linux-2.6.39.4/net/bluetooth/l2cap_core.c
65233 --- linux-2.6.39.4/net/bluetooth/l2cap_core.c 2011-05-19 00:06:34.000000000 -0400
65234 +++ linux-2.6.39.4/net/bluetooth/l2cap_core.c 2011-08-05 19:44:37.000000000 -0400
65235 @@ -2202,7 +2202,7 @@ static inline int l2cap_config_req(struc
65236
65237 /* Reject if config buffer is too small. */
65238 len = cmd_len - sizeof(*req);
65239 - if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
65240 + if (len < 0 || l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
65241 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
65242 l2cap_build_conf_rsp(sk, rsp,
65243 L2CAP_CONF_REJECT, flags), rsp);
65244 diff -urNp linux-2.6.39.4/net/bluetooth/l2cap_sock.c linux-2.6.39.4/net/bluetooth/l2cap_sock.c
65245 --- linux-2.6.39.4/net/bluetooth/l2cap_sock.c 2011-05-19 00:06:34.000000000 -0400
65246 +++ linux-2.6.39.4/net/bluetooth/l2cap_sock.c 2011-08-05 19:44:37.000000000 -0400
65247 @@ -446,6 +446,7 @@ static int l2cap_sock_getsockopt_old(str
65248 break;
65249 }
65250
65251 + memset(&cinfo, 0, sizeof(cinfo));
65252 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
65253 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
65254
65255 diff -urNp linux-2.6.39.4/net/bluetooth/rfcomm/sock.c linux-2.6.39.4/net/bluetooth/rfcomm/sock.c
65256 --- linux-2.6.39.4/net/bluetooth/rfcomm/sock.c 2011-05-19 00:06:34.000000000 -0400
65257 +++ linux-2.6.39.4/net/bluetooth/rfcomm/sock.c 2011-08-05 19:44:37.000000000 -0400
65258 @@ -787,6 +787,7 @@ static int rfcomm_sock_getsockopt_old(st
65259
65260 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
65261
65262 + memset(&cinfo, 0, sizeof(cinfo));
65263 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
65264 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
65265
65266 diff -urNp linux-2.6.39.4/net/bridge/br_multicast.c linux-2.6.39.4/net/bridge/br_multicast.c
65267 --- linux-2.6.39.4/net/bridge/br_multicast.c 2011-05-19 00:06:34.000000000 -0400
65268 +++ linux-2.6.39.4/net/bridge/br_multicast.c 2011-08-05 19:44:37.000000000 -0400
65269 @@ -1482,7 +1482,7 @@ static int br_multicast_ipv6_rcv(struct
65270 nexthdr = ip6h->nexthdr;
65271 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
65272
65273 - if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
65274 + if (nexthdr != IPPROTO_ICMPV6)
65275 return 0;
65276
65277 /* Okay, we found ICMPv6 header */
65278 diff -urNp linux-2.6.39.4/net/bridge/netfilter/ebtables.c linux-2.6.39.4/net/bridge/netfilter/ebtables.c
65279 --- linux-2.6.39.4/net/bridge/netfilter/ebtables.c 2011-05-19 00:06:34.000000000 -0400
65280 +++ linux-2.6.39.4/net/bridge/netfilter/ebtables.c 2011-08-05 19:44:37.000000000 -0400
65281 @@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *s
65282 tmp.valid_hooks = t->table->valid_hooks;
65283 }
65284 mutex_unlock(&ebt_mutex);
65285 - if (copy_to_user(user, &tmp, *len) != 0){
65286 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
65287 BUGPRINT("c2u Didn't work\n");
65288 ret = -EFAULT;
65289 break;
65290 @@ -1780,6 +1780,8 @@ static int compat_copy_everything_to_use
65291 int ret;
65292 void __user *pos;
65293
65294 + pax_track_stack();
65295 +
65296 memset(&tinfo, 0, sizeof(tinfo));
65297
65298 if (cmd == EBT_SO_GET_ENTRIES) {
65299 diff -urNp linux-2.6.39.4/net/caif/caif_socket.c linux-2.6.39.4/net/caif/caif_socket.c
65300 --- linux-2.6.39.4/net/caif/caif_socket.c 2011-05-19 00:06:34.000000000 -0400
65301 +++ linux-2.6.39.4/net/caif/caif_socket.c 2011-08-05 19:44:37.000000000 -0400
65302 @@ -48,18 +48,19 @@ static struct dentry *debugfsdir;
65303 #ifdef CONFIG_DEBUG_FS
65304 struct debug_fs_counter {
65305 atomic_t caif_nr_socks;
65306 - atomic_t num_connect_req;
65307 - atomic_t num_connect_resp;
65308 - atomic_t num_connect_fail_resp;
65309 - atomic_t num_disconnect;
65310 - atomic_t num_remote_shutdown_ind;
65311 - atomic_t num_tx_flow_off_ind;
65312 - atomic_t num_tx_flow_on_ind;
65313 - atomic_t num_rx_flow_off;
65314 - atomic_t num_rx_flow_on;
65315 + atomic_unchecked_t num_connect_req;
65316 + atomic_unchecked_t num_connect_resp;
65317 + atomic_unchecked_t num_connect_fail_resp;
65318 + atomic_unchecked_t num_disconnect;
65319 + atomic_unchecked_t num_remote_shutdown_ind;
65320 + atomic_unchecked_t num_tx_flow_off_ind;
65321 + atomic_unchecked_t num_tx_flow_on_ind;
65322 + atomic_unchecked_t num_rx_flow_off;
65323 + atomic_unchecked_t num_rx_flow_on;
65324 };
65325 static struct debug_fs_counter cnt;
65326 #define dbfs_atomic_inc(v) atomic_inc(v)
65327 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_unchecked(v)
65328 #define dbfs_atomic_dec(v) atomic_dec(v)
65329 #else
65330 #define dbfs_atomic_inc(v)
65331 @@ -159,7 +160,7 @@ static int caif_queue_rcv_skb(struct soc
65332 atomic_read(&cf_sk->sk.sk_rmem_alloc),
65333 sk_rcvbuf_lowwater(cf_sk));
65334 set_rx_flow_off(cf_sk);
65335 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
65336 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
65337 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
65338 }
65339
65340 @@ -169,7 +170,7 @@ static int caif_queue_rcv_skb(struct soc
65341 if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
65342 set_rx_flow_off(cf_sk);
65343 pr_debug("sending flow OFF due to rmem_schedule\n");
65344 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
65345 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
65346 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
65347 }
65348 skb->dev = NULL;
65349 @@ -218,21 +219,21 @@ static void caif_ctrl_cb(struct cflayer
65350 switch (flow) {
65351 case CAIF_CTRLCMD_FLOW_ON_IND:
65352 /* OK from modem to start sending again */
65353 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
65354 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
65355 set_tx_flow_on(cf_sk);
65356 cf_sk->sk.sk_state_change(&cf_sk->sk);
65357 break;
65358
65359 case CAIF_CTRLCMD_FLOW_OFF_IND:
65360 /* Modem asks us to shut up */
65361 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
65362 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
65363 set_tx_flow_off(cf_sk);
65364 cf_sk->sk.sk_state_change(&cf_sk->sk);
65365 break;
65366
65367 case CAIF_CTRLCMD_INIT_RSP:
65368 /* We're now connected */
65369 - dbfs_atomic_inc(&cnt.num_connect_resp);
65370 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
65371 cf_sk->sk.sk_state = CAIF_CONNECTED;
65372 set_tx_flow_on(cf_sk);
65373 cf_sk->sk.sk_state_change(&cf_sk->sk);
65374 @@ -247,7 +248,7 @@ static void caif_ctrl_cb(struct cflayer
65375
65376 case CAIF_CTRLCMD_INIT_FAIL_RSP:
65377 /* Connect request failed */
65378 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
65379 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
65380 cf_sk->sk.sk_err = ECONNREFUSED;
65381 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
65382 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
65383 @@ -261,7 +262,7 @@ static void caif_ctrl_cb(struct cflayer
65384
65385 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
65386 /* Modem has closed this connection, or device is down. */
65387 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
65388 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
65389 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
65390 cf_sk->sk.sk_err = ECONNRESET;
65391 set_rx_flow_on(cf_sk);
65392 @@ -281,7 +282,7 @@ static void caif_check_flow_release(stru
65393 return;
65394
65395 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
65396 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
65397 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
65398 set_rx_flow_on(cf_sk);
65399 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
65400 }
65401 @@ -864,7 +865,7 @@ static int caif_connect(struct socket *s
65402 /*ifindex = id of the interface.*/
65403 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
65404
65405 - dbfs_atomic_inc(&cnt.num_connect_req);
65406 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
65407 cf_sk->layer.receive = caif_sktrecv_cb;
65408 err = caif_connect_client(&cf_sk->conn_req,
65409 &cf_sk->layer, &ifindex, &headroom, &tailroom);
65410 @@ -952,7 +953,7 @@ static int caif_release(struct socket *s
65411 spin_unlock(&sk->sk_receive_queue.lock);
65412 sock->sk = NULL;
65413
65414 - dbfs_atomic_inc(&cnt.num_disconnect);
65415 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
65416
65417 if (cf_sk->debugfs_socket_dir != NULL)
65418 debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
65419 diff -urNp linux-2.6.39.4/net/caif/cfctrl.c linux-2.6.39.4/net/caif/cfctrl.c
65420 --- linux-2.6.39.4/net/caif/cfctrl.c 2011-05-19 00:06:34.000000000 -0400
65421 +++ linux-2.6.39.4/net/caif/cfctrl.c 2011-08-05 19:44:37.000000000 -0400
65422 @@ -9,6 +9,7 @@
65423 #include <linux/stddef.h>
65424 #include <linux/spinlock.h>
65425 #include <linux/slab.h>
65426 +#include <linux/sched.h>
65427 #include <net/caif/caif_layer.h>
65428 #include <net/caif/cfpkt.h>
65429 #include <net/caif/cfctrl.h>
65430 @@ -46,8 +47,8 @@ struct cflayer *cfctrl_create(void)
65431 dev_info.id = 0xff;
65432 memset(this, 0, sizeof(*this));
65433 cfsrvl_init(&this->serv, 0, &dev_info, false);
65434 - atomic_set(&this->req_seq_no, 1);
65435 - atomic_set(&this->rsp_seq_no, 1);
65436 + atomic_set_unchecked(&this->req_seq_no, 1);
65437 + atomic_set_unchecked(&this->rsp_seq_no, 1);
65438 this->serv.layer.receive = cfctrl_recv;
65439 sprintf(this->serv.layer.name, "ctrl");
65440 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
65441 @@ -116,8 +117,8 @@ void cfctrl_insert_req(struct cfctrl *ct
65442 struct cfctrl_request_info *req)
65443 {
65444 spin_lock(&ctrl->info_list_lock);
65445 - atomic_inc(&ctrl->req_seq_no);
65446 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
65447 + atomic_inc_unchecked(&ctrl->req_seq_no);
65448 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
65449 list_add_tail(&req->list, &ctrl->list);
65450 spin_unlock(&ctrl->info_list_lock);
65451 }
65452 @@ -136,7 +137,7 @@ struct cfctrl_request_info *cfctrl_remov
65453 if (p != first)
65454 pr_warn("Requests are not received in order\n");
65455
65456 - atomic_set(&ctrl->rsp_seq_no,
65457 + atomic_set_unchecked(&ctrl->rsp_seq_no,
65458 p->sequence_no);
65459 list_del(&p->list);
65460 goto out;
65461 @@ -385,6 +386,7 @@ static int cfctrl_recv(struct cflayer *l
65462 struct cfctrl *cfctrl = container_obj(layer);
65463 struct cfctrl_request_info rsp, *req;
65464
65465 + pax_track_stack();
65466
65467 cfpkt_extr_head(pkt, &cmdrsp, 1);
65468 cmd = cmdrsp & CFCTRL_CMD_MASK;
65469 diff -urNp linux-2.6.39.4/net/can/bcm.c linux-2.6.39.4/net/can/bcm.c
65470 --- linux-2.6.39.4/net/can/bcm.c 2011-05-19 00:06:34.000000000 -0400
65471 +++ linux-2.6.39.4/net/can/bcm.c 2011-08-05 19:44:37.000000000 -0400
65472 @@ -165,9 +165,15 @@ static int bcm_proc_show(struct seq_file
65473 struct bcm_sock *bo = bcm_sk(sk);
65474 struct bcm_op *op;
65475
65476 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65477 + seq_printf(m, ">>> socket %p", NULL);
65478 + seq_printf(m, " / sk %p", NULL);
65479 + seq_printf(m, " / bo %p", NULL);
65480 +#else
65481 seq_printf(m, ">>> socket %p", sk->sk_socket);
65482 seq_printf(m, " / sk %p", sk);
65483 seq_printf(m, " / bo %p", bo);
65484 +#endif
65485 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
65486 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
65487 seq_printf(m, " <<<\n");
65488 diff -urNp linux-2.6.39.4/net/core/datagram.c linux-2.6.39.4/net/core/datagram.c
65489 --- linux-2.6.39.4/net/core/datagram.c 2011-05-19 00:06:34.000000000 -0400
65490 +++ linux-2.6.39.4/net/core/datagram.c 2011-08-05 19:44:37.000000000 -0400
65491 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
65492 }
65493
65494 kfree_skb(skb);
65495 - atomic_inc(&sk->sk_drops);
65496 + atomic_inc_unchecked(&sk->sk_drops);
65497 sk_mem_reclaim_partial(sk);
65498
65499 return err;
65500 diff -urNp linux-2.6.39.4/net/core/dev.c linux-2.6.39.4/net/core/dev.c
65501 --- linux-2.6.39.4/net/core/dev.c 2011-06-03 00:04:14.000000000 -0400
65502 +++ linux-2.6.39.4/net/core/dev.c 2011-08-05 20:34:06.000000000 -0400
65503 @@ -1125,10 +1125,14 @@ void dev_load(struct net *net, const cha
65504 if (no_module && capable(CAP_NET_ADMIN))
65505 no_module = request_module("netdev-%s", name);
65506 if (no_module && capable(CAP_SYS_MODULE)) {
65507 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65508 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
65509 +#else
65510 if (!request_module("%s", name))
65511 pr_err("Loading kernel module for a network device "
65512 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
65513 "instead\n", name);
65514 +#endif
65515 }
65516 }
65517 EXPORT_SYMBOL(dev_load);
65518 @@ -1951,7 +1955,7 @@ static int illegal_highdma(struct net_de
65519
65520 struct dev_gso_cb {
65521 void (*destructor)(struct sk_buff *skb);
65522 -};
65523 +} __no_const;
65524
65525 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
65526
65527 @@ -2901,7 +2905,7 @@ int netif_rx_ni(struct sk_buff *skb)
65528 }
65529 EXPORT_SYMBOL(netif_rx_ni);
65530
65531 -static void net_tx_action(struct softirq_action *h)
65532 +static void net_tx_action(void)
65533 {
65534 struct softnet_data *sd = &__get_cpu_var(softnet_data);
65535
65536 @@ -3765,7 +3769,7 @@ void netif_napi_del(struct napi_struct *
65537 }
65538 EXPORT_SYMBOL(netif_napi_del);
65539
65540 -static void net_rx_action(struct softirq_action *h)
65541 +static void net_rx_action(void)
65542 {
65543 struct softnet_data *sd = &__get_cpu_var(softnet_data);
65544 unsigned long time_limit = jiffies + 2;
65545 diff -urNp linux-2.6.39.4/net/core/flow.c linux-2.6.39.4/net/core/flow.c
65546 --- linux-2.6.39.4/net/core/flow.c 2011-05-19 00:06:34.000000000 -0400
65547 +++ linux-2.6.39.4/net/core/flow.c 2011-08-05 19:44:37.000000000 -0400
65548 @@ -60,7 +60,7 @@ struct flow_cache {
65549 struct timer_list rnd_timer;
65550 };
65551
65552 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
65553 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
65554 EXPORT_SYMBOL(flow_cache_genid);
65555 static struct flow_cache flow_cache_global;
65556 static struct kmem_cache *flow_cachep __read_mostly;
65557 @@ -85,7 +85,7 @@ static void flow_cache_new_hashrnd(unsig
65558
65559 static int flow_entry_valid(struct flow_cache_entry *fle)
65560 {
65561 - if (atomic_read(&flow_cache_genid) != fle->genid)
65562 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
65563 return 0;
65564 if (fle->object && !fle->object->ops->check(fle->object))
65565 return 0;
65566 @@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const
65567 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
65568 fcp->hash_count++;
65569 }
65570 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
65571 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
65572 flo = fle->object;
65573 if (!flo)
65574 goto ret_object;
65575 @@ -274,7 +274,7 @@ nocache:
65576 }
65577 flo = resolver(net, key, family, dir, flo, ctx);
65578 if (fle) {
65579 - fle->genid = atomic_read(&flow_cache_genid);
65580 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
65581 if (!IS_ERR(flo))
65582 fle->object = flo;
65583 else
65584 diff -urNp linux-2.6.39.4/net/core/rtnetlink.c linux-2.6.39.4/net/core/rtnetlink.c
65585 --- linux-2.6.39.4/net/core/rtnetlink.c 2011-05-19 00:06:34.000000000 -0400
65586 +++ linux-2.6.39.4/net/core/rtnetlink.c 2011-08-05 20:34:06.000000000 -0400
65587 @@ -56,7 +56,7 @@
65588 struct rtnl_link {
65589 rtnl_doit_func doit;
65590 rtnl_dumpit_func dumpit;
65591 -};
65592 +} __no_const;
65593
65594 static DEFINE_MUTEX(rtnl_mutex);
65595
65596 diff -urNp linux-2.6.39.4/net/core/skbuff.c linux-2.6.39.4/net/core/skbuff.c
65597 --- linux-2.6.39.4/net/core/skbuff.c 2011-06-03 00:04:14.000000000 -0400
65598 +++ linux-2.6.39.4/net/core/skbuff.c 2011-08-05 19:44:37.000000000 -0400
65599 @@ -1542,6 +1542,8 @@ int skb_splice_bits(struct sk_buff *skb,
65600 struct sock *sk = skb->sk;
65601 int ret = 0;
65602
65603 + pax_track_stack();
65604 +
65605 if (splice_grow_spd(pipe, &spd))
65606 return -ENOMEM;
65607
65608 diff -urNp linux-2.6.39.4/net/core/sock.c linux-2.6.39.4/net/core/sock.c
65609 --- linux-2.6.39.4/net/core/sock.c 2011-05-19 00:06:34.000000000 -0400
65610 +++ linux-2.6.39.4/net/core/sock.c 2011-08-05 19:44:37.000000000 -0400
65611 @@ -291,7 +291,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65612 */
65613 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
65614 (unsigned)sk->sk_rcvbuf) {
65615 - atomic_inc(&sk->sk_drops);
65616 + atomic_inc_unchecked(&sk->sk_drops);
65617 return -ENOMEM;
65618 }
65619
65620 @@ -300,7 +300,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65621 return err;
65622
65623 if (!sk_rmem_schedule(sk, skb->truesize)) {
65624 - atomic_inc(&sk->sk_drops);
65625 + atomic_inc_unchecked(&sk->sk_drops);
65626 return -ENOBUFS;
65627 }
65628
65629 @@ -320,7 +320,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65630 skb_dst_force(skb);
65631
65632 spin_lock_irqsave(&list->lock, flags);
65633 - skb->dropcount = atomic_read(&sk->sk_drops);
65634 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
65635 __skb_queue_tail(list, skb);
65636 spin_unlock_irqrestore(&list->lock, flags);
65637
65638 @@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
65639 skb->dev = NULL;
65640
65641 if (sk_rcvqueues_full(sk, skb)) {
65642 - atomic_inc(&sk->sk_drops);
65643 + atomic_inc_unchecked(&sk->sk_drops);
65644 goto discard_and_relse;
65645 }
65646 if (nested)
65647 @@ -358,7 +358,7 @@ int sk_receive_skb(struct sock *sk, stru
65648 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
65649 } else if (sk_add_backlog(sk, skb)) {
65650 bh_unlock_sock(sk);
65651 - atomic_inc(&sk->sk_drops);
65652 + atomic_inc_unchecked(&sk->sk_drops);
65653 goto discard_and_relse;
65654 }
65655
65656 @@ -934,7 +934,7 @@ int sock_getsockopt(struct socket *sock,
65657 return -ENOTCONN;
65658 if (lv < len)
65659 return -EINVAL;
65660 - if (copy_to_user(optval, address, len))
65661 + if (len > sizeof(address) || copy_to_user(optval, address, len))
65662 return -EFAULT;
65663 goto lenout;
65664 }
65665 @@ -967,7 +967,7 @@ int sock_getsockopt(struct socket *sock,
65666
65667 if (len > lv)
65668 len = lv;
65669 - if (copy_to_user(optval, &v, len))
65670 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
65671 return -EFAULT;
65672 lenout:
65673 if (put_user(len, optlen))
65674 @@ -2023,7 +2023,7 @@ void sock_init_data(struct socket *sock,
65675 */
65676 smp_wmb();
65677 atomic_set(&sk->sk_refcnt, 1);
65678 - atomic_set(&sk->sk_drops, 0);
65679 + atomic_set_unchecked(&sk->sk_drops, 0);
65680 }
65681 EXPORT_SYMBOL(sock_init_data);
65682
65683 diff -urNp linux-2.6.39.4/net/decnet/sysctl_net_decnet.c linux-2.6.39.4/net/decnet/sysctl_net_decnet.c
65684 --- linux-2.6.39.4/net/decnet/sysctl_net_decnet.c 2011-05-19 00:06:34.000000000 -0400
65685 +++ linux-2.6.39.4/net/decnet/sysctl_net_decnet.c 2011-08-05 19:44:37.000000000 -0400
65686 @@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
65687
65688 if (len > *lenp) len = *lenp;
65689
65690 - if (copy_to_user(buffer, addr, len))
65691 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
65692 return -EFAULT;
65693
65694 *lenp = len;
65695 @@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
65696
65697 if (len > *lenp) len = *lenp;
65698
65699 - if (copy_to_user(buffer, devname, len))
65700 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
65701 return -EFAULT;
65702
65703 *lenp = len;
65704 diff -urNp linux-2.6.39.4/net/econet/Kconfig linux-2.6.39.4/net/econet/Kconfig
65705 --- linux-2.6.39.4/net/econet/Kconfig 2011-05-19 00:06:34.000000000 -0400
65706 +++ linux-2.6.39.4/net/econet/Kconfig 2011-08-05 19:44:37.000000000 -0400
65707 @@ -4,7 +4,7 @@
65708
65709 config ECONET
65710 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
65711 - depends on EXPERIMENTAL && INET
65712 + depends on EXPERIMENTAL && INET && BROKEN
65713 ---help---
65714 Econet is a fairly old and slow networking protocol mainly used by
65715 Acorn computers to access file and print servers. It uses native
65716 diff -urNp linux-2.6.39.4/net/ipv4/fib_frontend.c linux-2.6.39.4/net/ipv4/fib_frontend.c
65717 --- linux-2.6.39.4/net/ipv4/fib_frontend.c 2011-05-19 00:06:34.000000000 -0400
65718 +++ linux-2.6.39.4/net/ipv4/fib_frontend.c 2011-08-05 19:44:37.000000000 -0400
65719 @@ -968,12 +968,12 @@ static int fib_inetaddr_event(struct not
65720 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65721 fib_sync_up(dev);
65722 #endif
65723 - atomic_inc(&net->ipv4.dev_addr_genid);
65724 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65725 rt_cache_flush(dev_net(dev), -1);
65726 break;
65727 case NETDEV_DOWN:
65728 fib_del_ifaddr(ifa, NULL);
65729 - atomic_inc(&net->ipv4.dev_addr_genid);
65730 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65731 if (ifa->ifa_dev->ifa_list == NULL) {
65732 /* Last address was deleted from this interface.
65733 * Disable IP.
65734 @@ -1009,7 +1009,7 @@ static int fib_netdev_event(struct notif
65735 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65736 fib_sync_up(dev);
65737 #endif
65738 - atomic_inc(&net->ipv4.dev_addr_genid);
65739 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65740 rt_cache_flush(dev_net(dev), -1);
65741 break;
65742 case NETDEV_DOWN:
65743 diff -urNp linux-2.6.39.4/net/ipv4/fib_semantics.c linux-2.6.39.4/net/ipv4/fib_semantics.c
65744 --- linux-2.6.39.4/net/ipv4/fib_semantics.c 2011-05-19 00:06:34.000000000 -0400
65745 +++ linux-2.6.39.4/net/ipv4/fib_semantics.c 2011-08-05 19:44:37.000000000 -0400
65746 @@ -701,7 +701,7 @@ __be32 fib_info_update_nh_saddr(struct n
65747 nh->nh_saddr = inet_select_addr(nh->nh_dev,
65748 nh->nh_gw,
65749 nh->nh_parent->fib_scope);
65750 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
65751 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
65752
65753 return nh->nh_saddr;
65754 }
65755 diff -urNp linux-2.6.39.4/net/ipv4/inet_diag.c linux-2.6.39.4/net/ipv4/inet_diag.c
65756 --- linux-2.6.39.4/net/ipv4/inet_diag.c 2011-07-09 09:18:51.000000000 -0400
65757 +++ linux-2.6.39.4/net/ipv4/inet_diag.c 2011-08-05 19:44:37.000000000 -0400
65758 @@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
65759 r->idiag_retrans = 0;
65760
65761 r->id.idiag_if = sk->sk_bound_dev_if;
65762 +
65763 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65764 + r->id.idiag_cookie[0] = 0;
65765 + r->id.idiag_cookie[1] = 0;
65766 +#else
65767 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
65768 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
65769 +#endif
65770
65771 r->id.idiag_sport = inet->inet_sport;
65772 r->id.idiag_dport = inet->inet_dport;
65773 @@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
65774 r->idiag_family = tw->tw_family;
65775 r->idiag_retrans = 0;
65776 r->id.idiag_if = tw->tw_bound_dev_if;
65777 +
65778 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65779 + r->id.idiag_cookie[0] = 0;
65780 + r->id.idiag_cookie[1] = 0;
65781 +#else
65782 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
65783 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
65784 +#endif
65785 +
65786 r->id.idiag_sport = tw->tw_sport;
65787 r->id.idiag_dport = tw->tw_dport;
65788 r->id.idiag_src[0] = tw->tw_rcv_saddr;
65789 @@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
65790 if (sk == NULL)
65791 goto unlock;
65792
65793 +#ifndef CONFIG_GRKERNSEC_HIDESYM
65794 err = -ESTALE;
65795 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
65796 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
65797 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
65798 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
65799 goto out;
65800 +#endif
65801
65802 err = -ENOMEM;
65803 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
65804 @@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
65805 r->idiag_retrans = req->retrans;
65806
65807 r->id.idiag_if = sk->sk_bound_dev_if;
65808 +
65809 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65810 + r->id.idiag_cookie[0] = 0;
65811 + r->id.idiag_cookie[1] = 0;
65812 +#else
65813 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
65814 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
65815 +#endif
65816
65817 tmo = req->expires - jiffies;
65818 if (tmo < 0)
65819 diff -urNp linux-2.6.39.4/net/ipv4/inet_hashtables.c linux-2.6.39.4/net/ipv4/inet_hashtables.c
65820 --- linux-2.6.39.4/net/ipv4/inet_hashtables.c 2011-05-19 00:06:34.000000000 -0400
65821 +++ linux-2.6.39.4/net/ipv4/inet_hashtables.c 2011-08-05 19:44:37.000000000 -0400
65822 @@ -18,11 +18,14 @@
65823 #include <linux/sched.h>
65824 #include <linux/slab.h>
65825 #include <linux/wait.h>
65826 +#include <linux/security.h>
65827
65828 #include <net/inet_connection_sock.h>
65829 #include <net/inet_hashtables.h>
65830 #include <net/ip.h>
65831
65832 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
65833 +
65834 /*
65835 * Allocate and initialize a new local port bind bucket.
65836 * The bindhash mutex for snum's hash chain must be held here.
65837 @@ -529,6 +532,8 @@ ok:
65838 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
65839 spin_unlock(&head->lock);
65840
65841 + gr_update_task_in_ip_table(current, inet_sk(sk));
65842 +
65843 if (tw) {
65844 inet_twsk_deschedule(tw, death_row);
65845 while (twrefcnt) {
65846 diff -urNp linux-2.6.39.4/net/ipv4/inetpeer.c linux-2.6.39.4/net/ipv4/inetpeer.c
65847 --- linux-2.6.39.4/net/ipv4/inetpeer.c 2011-07-09 09:18:51.000000000 -0400
65848 +++ linux-2.6.39.4/net/ipv4/inetpeer.c 2011-08-05 19:44:37.000000000 -0400
65849 @@ -480,6 +480,8 @@ struct inet_peer *inet_getpeer(struct in
65850 unsigned int sequence;
65851 int invalidated, newrefcnt = 0;
65852
65853 + pax_track_stack();
65854 +
65855 /* Look up for the address quickly, lockless.
65856 * Because of a concurrent writer, we might not find an existing entry.
65857 */
65858 @@ -516,8 +518,8 @@ found: /* The existing node has been fo
65859 if (p) {
65860 p->daddr = *daddr;
65861 atomic_set(&p->refcnt, 1);
65862 - atomic_set(&p->rid, 0);
65863 - atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
65864 + atomic_set_unchecked(&p->rid, 0);
65865 + atomic_set_unchecked(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
65866 p->tcp_ts_stamp = 0;
65867 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
65868 p->rate_tokens = 0;
65869 diff -urNp linux-2.6.39.4/net/ipv4/ip_fragment.c linux-2.6.39.4/net/ipv4/ip_fragment.c
65870 --- linux-2.6.39.4/net/ipv4/ip_fragment.c 2011-05-19 00:06:34.000000000 -0400
65871 +++ linux-2.6.39.4/net/ipv4/ip_fragment.c 2011-08-05 19:44:37.000000000 -0400
65872 @@ -297,7 +297,7 @@ static inline int ip_frag_too_far(struct
65873 return 0;
65874
65875 start = qp->rid;
65876 - end = atomic_inc_return(&peer->rid);
65877 + end = atomic_inc_return_unchecked(&peer->rid);
65878 qp->rid = end;
65879
65880 rc = qp->q.fragments && (end - start) > max;
65881 diff -urNp linux-2.6.39.4/net/ipv4/ip_sockglue.c linux-2.6.39.4/net/ipv4/ip_sockglue.c
65882 --- linux-2.6.39.4/net/ipv4/ip_sockglue.c 2011-05-19 00:06:34.000000000 -0400
65883 +++ linux-2.6.39.4/net/ipv4/ip_sockglue.c 2011-08-05 19:44:37.000000000 -0400
65884 @@ -1064,6 +1064,8 @@ static int do_ip_getsockopt(struct sock
65885 int val;
65886 int len;
65887
65888 + pax_track_stack();
65889 +
65890 if (level != SOL_IP)
65891 return -EOPNOTSUPP;
65892
65893 diff -urNp linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c
65894 --- linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-05-19 00:06:34.000000000 -0400
65895 +++ linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-08-05 19:44:37.000000000 -0400
65896 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
65897
65898 *len = 0;
65899
65900 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
65901 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
65902 if (*octets == NULL) {
65903 if (net_ratelimit())
65904 pr_notice("OOM in bsalg (%d)\n", __LINE__);
65905 diff -urNp linux-2.6.39.4/net/ipv4/raw.c linux-2.6.39.4/net/ipv4/raw.c
65906 --- linux-2.6.39.4/net/ipv4/raw.c 2011-05-19 00:06:34.000000000 -0400
65907 +++ linux-2.6.39.4/net/ipv4/raw.c 2011-08-14 11:22:59.000000000 -0400
65908 @@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
65909 int raw_rcv(struct sock *sk, struct sk_buff *skb)
65910 {
65911 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
65912 - atomic_inc(&sk->sk_drops);
65913 + atomic_inc_unchecked(&sk->sk_drops);
65914 kfree_skb(skb);
65915 return NET_RX_DROP;
65916 }
65917 @@ -730,16 +730,20 @@ static int raw_init(struct sock *sk)
65918
65919 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
65920 {
65921 + struct icmp_filter filter;
65922 +
65923 if (optlen > sizeof(struct icmp_filter))
65924 optlen = sizeof(struct icmp_filter);
65925 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
65926 + if (copy_from_user(&filter, optval, optlen))
65927 return -EFAULT;
65928 + raw_sk(sk)->filter = filter;
65929 return 0;
65930 }
65931
65932 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
65933 {
65934 int len, ret = -EFAULT;
65935 + struct icmp_filter filter;
65936
65937 if (get_user(len, optlen))
65938 goto out;
65939 @@ -749,8 +753,9 @@ static int raw_geticmpfilter(struct sock
65940 if (len > sizeof(struct icmp_filter))
65941 len = sizeof(struct icmp_filter);
65942 ret = -EFAULT;
65943 - if (put_user(len, optlen) ||
65944 - copy_to_user(optval, &raw_sk(sk)->filter, len))
65945 + filter = raw_sk(sk)->filter;
65946 + if (put_user(len, optlen) || len > sizeof filter ||
65947 + copy_to_user(optval, &filter, len))
65948 goto out;
65949 ret = 0;
65950 out: return ret;
65951 @@ -978,7 +983,13 @@ static void raw_sock_seq_show(struct seq
65952 sk_wmem_alloc_get(sp),
65953 sk_rmem_alloc_get(sp),
65954 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
65955 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
65956 + atomic_read(&sp->sk_refcnt),
65957 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65958 + NULL,
65959 +#else
65960 + sp,
65961 +#endif
65962 + atomic_read_unchecked(&sp->sk_drops));
65963 }
65964
65965 static int raw_seq_show(struct seq_file *seq, void *v)
65966 diff -urNp linux-2.6.39.4/net/ipv4/route.c linux-2.6.39.4/net/ipv4/route.c
65967 --- linux-2.6.39.4/net/ipv4/route.c 2011-07-09 09:18:51.000000000 -0400
65968 +++ linux-2.6.39.4/net/ipv4/route.c 2011-08-05 19:44:37.000000000 -0400
65969 @@ -303,7 +303,7 @@ static inline unsigned int rt_hash(__be3
65970
65971 static inline int rt_genid(struct net *net)
65972 {
65973 - return atomic_read(&net->ipv4.rt_genid);
65974 + return atomic_read_unchecked(&net->ipv4.rt_genid);
65975 }
65976
65977 #ifdef CONFIG_PROC_FS
65978 @@ -831,7 +831,7 @@ static void rt_cache_invalidate(struct n
65979 unsigned char shuffle;
65980
65981 get_random_bytes(&shuffle, sizeof(shuffle));
65982 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
65983 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
65984 }
65985
65986 /*
65987 @@ -2833,7 +2833,7 @@ static int rt_fill_info(struct net *net,
65988 rt->peer->pmtu_expires - jiffies : 0;
65989 if (rt->peer) {
65990 inet_peer_refcheck(rt->peer);
65991 - id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
65992 + id = atomic_read_unchecked(&rt->peer->ip_id_count) & 0xffff;
65993 if (rt->peer->tcp_ts_stamp) {
65994 ts = rt->peer->tcp_ts;
65995 tsage = get_seconds() - rt->peer->tcp_ts_stamp;
65996 diff -urNp linux-2.6.39.4/net/ipv4/tcp.c linux-2.6.39.4/net/ipv4/tcp.c
65997 --- linux-2.6.39.4/net/ipv4/tcp.c 2011-05-19 00:06:34.000000000 -0400
65998 +++ linux-2.6.39.4/net/ipv4/tcp.c 2011-08-05 19:44:37.000000000 -0400
65999 @@ -2121,6 +2121,8 @@ static int do_tcp_setsockopt(struct sock
66000 int val;
66001 int err = 0;
66002
66003 + pax_track_stack();
66004 +
66005 /* These are data/string values, all the others are ints */
66006 switch (optname) {
66007 case TCP_CONGESTION: {
66008 @@ -2500,6 +2502,8 @@ static int do_tcp_getsockopt(struct sock
66009 struct tcp_sock *tp = tcp_sk(sk);
66010 int val, len;
66011
66012 + pax_track_stack();
66013 +
66014 if (get_user(len, optlen))
66015 return -EFAULT;
66016
66017 diff -urNp linux-2.6.39.4/net/ipv4/tcp_ipv4.c linux-2.6.39.4/net/ipv4/tcp_ipv4.c
66018 --- linux-2.6.39.4/net/ipv4/tcp_ipv4.c 2011-05-19 00:06:34.000000000 -0400
66019 +++ linux-2.6.39.4/net/ipv4/tcp_ipv4.c 2011-08-05 19:44:37.000000000 -0400
66020 @@ -86,6 +86,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
66021 int sysctl_tcp_low_latency __read_mostly;
66022 EXPORT_SYMBOL(sysctl_tcp_low_latency);
66023
66024 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66025 +extern int grsec_enable_blackhole;
66026 +#endif
66027
66028 #ifdef CONFIG_TCP_MD5SIG
66029 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
66030 @@ -1594,6 +1597,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
66031 return 0;
66032
66033 reset:
66034 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66035 + if (!grsec_enable_blackhole)
66036 +#endif
66037 tcp_v4_send_reset(rsk, skb);
66038 discard:
66039 kfree_skb(skb);
66040 @@ -1656,12 +1662,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
66041 TCP_SKB_CB(skb)->sacked = 0;
66042
66043 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
66044 - if (!sk)
66045 + if (!sk) {
66046 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66047 + ret = 1;
66048 +#endif
66049 goto no_tcp_socket;
66050 -
66051 + }
66052 process:
66053 - if (sk->sk_state == TCP_TIME_WAIT)
66054 + if (sk->sk_state == TCP_TIME_WAIT) {
66055 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66056 + ret = 2;
66057 +#endif
66058 goto do_time_wait;
66059 + }
66060
66061 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
66062 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
66063 @@ -1711,6 +1724,10 @@ no_tcp_socket:
66064 bad_packet:
66065 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
66066 } else {
66067 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66068 + if (!grsec_enable_blackhole || (ret == 1 &&
66069 + (skb->dev->flags & IFF_LOOPBACK)))
66070 +#endif
66071 tcp_v4_send_reset(NULL, skb);
66072 }
66073
66074 @@ -2374,7 +2391,11 @@ static void get_openreq4(struct sock *sk
66075 0, /* non standard timer */
66076 0, /* open_requests have no inode */
66077 atomic_read(&sk->sk_refcnt),
66078 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66079 + NULL,
66080 +#else
66081 req,
66082 +#endif
66083 len);
66084 }
66085
66086 @@ -2424,7 +2445,12 @@ static void get_tcp4_sock(struct sock *s
66087 sock_i_uid(sk),
66088 icsk->icsk_probes_out,
66089 sock_i_ino(sk),
66090 - atomic_read(&sk->sk_refcnt), sk,
66091 + atomic_read(&sk->sk_refcnt),
66092 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66093 + NULL,
66094 +#else
66095 + sk,
66096 +#endif
66097 jiffies_to_clock_t(icsk->icsk_rto),
66098 jiffies_to_clock_t(icsk->icsk_ack.ato),
66099 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
66100 @@ -2452,7 +2478,13 @@ static void get_timewait4_sock(struct in
66101 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
66102 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
66103 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
66104 - atomic_read(&tw->tw_refcnt), tw, len);
66105 + atomic_read(&tw->tw_refcnt),
66106 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66107 + NULL,
66108 +#else
66109 + tw,
66110 +#endif
66111 + len);
66112 }
66113
66114 #define TMPSZ 150
66115 diff -urNp linux-2.6.39.4/net/ipv4/tcp_minisocks.c linux-2.6.39.4/net/ipv4/tcp_minisocks.c
66116 --- linux-2.6.39.4/net/ipv4/tcp_minisocks.c 2011-05-19 00:06:34.000000000 -0400
66117 +++ linux-2.6.39.4/net/ipv4/tcp_minisocks.c 2011-08-05 19:44:37.000000000 -0400
66118 @@ -27,6 +27,10 @@
66119 #include <net/inet_common.h>
66120 #include <net/xfrm.h>
66121
66122 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66123 +extern int grsec_enable_blackhole;
66124 +#endif
66125 +
66126 int sysctl_tcp_syncookies __read_mostly = 1;
66127 EXPORT_SYMBOL(sysctl_tcp_syncookies);
66128
66129 @@ -745,6 +749,10 @@ listen_overflow:
66130
66131 embryonic_reset:
66132 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
66133 +
66134 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66135 + if (!grsec_enable_blackhole)
66136 +#endif
66137 if (!(flg & TCP_FLAG_RST))
66138 req->rsk_ops->send_reset(sk, skb);
66139
66140 diff -urNp linux-2.6.39.4/net/ipv4/tcp_output.c linux-2.6.39.4/net/ipv4/tcp_output.c
66141 --- linux-2.6.39.4/net/ipv4/tcp_output.c 2011-05-19 00:06:34.000000000 -0400
66142 +++ linux-2.6.39.4/net/ipv4/tcp_output.c 2011-08-05 19:44:37.000000000 -0400
66143 @@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
66144 int mss;
66145 int s_data_desired = 0;
66146
66147 + pax_track_stack();
66148 +
66149 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
66150 s_data_desired = cvp->s_data_desired;
66151 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
66152 diff -urNp linux-2.6.39.4/net/ipv4/tcp_probe.c linux-2.6.39.4/net/ipv4/tcp_probe.c
66153 --- linux-2.6.39.4/net/ipv4/tcp_probe.c 2011-05-19 00:06:34.000000000 -0400
66154 +++ linux-2.6.39.4/net/ipv4/tcp_probe.c 2011-08-05 19:44:37.000000000 -0400
66155 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
66156 if (cnt + width >= len)
66157 break;
66158
66159 - if (copy_to_user(buf + cnt, tbuf, width))
66160 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
66161 return -EFAULT;
66162 cnt += width;
66163 }
66164 diff -urNp linux-2.6.39.4/net/ipv4/tcp_timer.c linux-2.6.39.4/net/ipv4/tcp_timer.c
66165 --- linux-2.6.39.4/net/ipv4/tcp_timer.c 2011-05-19 00:06:34.000000000 -0400
66166 +++ linux-2.6.39.4/net/ipv4/tcp_timer.c 2011-08-05 19:44:37.000000000 -0400
66167 @@ -22,6 +22,10 @@
66168 #include <linux/gfp.h>
66169 #include <net/tcp.h>
66170
66171 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66172 +extern int grsec_lastack_retries;
66173 +#endif
66174 +
66175 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
66176 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
66177 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
66178 @@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
66179 }
66180 }
66181
66182 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66183 + if ((sk->sk_state == TCP_LAST_ACK) &&
66184 + (grsec_lastack_retries > 0) &&
66185 + (grsec_lastack_retries < retry_until))
66186 + retry_until = grsec_lastack_retries;
66187 +#endif
66188 +
66189 if (retransmits_timed_out(sk, retry_until,
66190 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
66191 /* Has it gone just too far? */
66192 diff -urNp linux-2.6.39.4/net/ipv4/udp.c linux-2.6.39.4/net/ipv4/udp.c
66193 --- linux-2.6.39.4/net/ipv4/udp.c 2011-07-09 09:18:51.000000000 -0400
66194 +++ linux-2.6.39.4/net/ipv4/udp.c 2011-08-05 19:44:37.000000000 -0400
66195 @@ -86,6 +86,7 @@
66196 #include <linux/types.h>
66197 #include <linux/fcntl.h>
66198 #include <linux/module.h>
66199 +#include <linux/security.h>
66200 #include <linux/socket.h>
66201 #include <linux/sockios.h>
66202 #include <linux/igmp.h>
66203 @@ -107,6 +108,10 @@
66204 #include <net/xfrm.h>
66205 #include "udp_impl.h"
66206
66207 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66208 +extern int grsec_enable_blackhole;
66209 +#endif
66210 +
66211 struct udp_table udp_table __read_mostly;
66212 EXPORT_SYMBOL(udp_table);
66213
66214 @@ -564,6 +569,9 @@ found:
66215 return s;
66216 }
66217
66218 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
66219 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
66220 +
66221 /*
66222 * This routine is called by the ICMP module when it gets some
66223 * sort of error condition. If err < 0 then the socket should
66224 @@ -853,9 +861,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
66225 dport = usin->sin_port;
66226 if (dport == 0)
66227 return -EINVAL;
66228 +
66229 + err = gr_search_udp_sendmsg(sk, usin);
66230 + if (err)
66231 + return err;
66232 } else {
66233 if (sk->sk_state != TCP_ESTABLISHED)
66234 return -EDESTADDRREQ;
66235 +
66236 + err = gr_search_udp_sendmsg(sk, NULL);
66237 + if (err)
66238 + return err;
66239 +
66240 daddr = inet->inet_daddr;
66241 dport = inet->inet_dport;
66242 /* Open fast path for connected socket.
66243 @@ -1090,7 +1107,7 @@ static unsigned int first_packet_length(
66244 udp_lib_checksum_complete(skb)) {
66245 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
66246 IS_UDPLITE(sk));
66247 - atomic_inc(&sk->sk_drops);
66248 + atomic_inc_unchecked(&sk->sk_drops);
66249 __skb_unlink(skb, rcvq);
66250 __skb_queue_tail(&list_kill, skb);
66251 }
66252 @@ -1176,6 +1193,10 @@ try_again:
66253 if (!skb)
66254 goto out;
66255
66256 + err = gr_search_udp_recvmsg(sk, skb);
66257 + if (err)
66258 + goto out_free;
66259 +
66260 ulen = skb->len - sizeof(struct udphdr);
66261 if (len > ulen)
66262 len = ulen;
66263 @@ -1475,7 +1496,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
66264
66265 drop:
66266 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
66267 - atomic_inc(&sk->sk_drops);
66268 + atomic_inc_unchecked(&sk->sk_drops);
66269 kfree_skb(skb);
66270 return -1;
66271 }
66272 @@ -1494,7 +1515,7 @@ static void flush_stack(struct sock **st
66273 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
66274
66275 if (!skb1) {
66276 - atomic_inc(&sk->sk_drops);
66277 + atomic_inc_unchecked(&sk->sk_drops);
66278 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
66279 IS_UDPLITE(sk));
66280 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
66281 @@ -1663,6 +1684,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
66282 goto csum_error;
66283
66284 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
66285 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66286 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
66287 +#endif
66288 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
66289
66290 /*
66291 @@ -2090,8 +2114,13 @@ static void udp4_format_sock(struct sock
66292 sk_wmem_alloc_get(sp),
66293 sk_rmem_alloc_get(sp),
66294 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
66295 - atomic_read(&sp->sk_refcnt), sp,
66296 - atomic_read(&sp->sk_drops), len);
66297 + atomic_read(&sp->sk_refcnt),
66298 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66299 + NULL,
66300 +#else
66301 + sp,
66302 +#endif
66303 + atomic_read_unchecked(&sp->sk_drops), len);
66304 }
66305
66306 int udp4_seq_show(struct seq_file *seq, void *v)
66307 diff -urNp linux-2.6.39.4/net/ipv6/inet6_connection_sock.c linux-2.6.39.4/net/ipv6/inet6_connection_sock.c
66308 --- linux-2.6.39.4/net/ipv6/inet6_connection_sock.c 2011-05-19 00:06:34.000000000 -0400
66309 +++ linux-2.6.39.4/net/ipv6/inet6_connection_sock.c 2011-08-05 19:44:37.000000000 -0400
66310 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
66311 #ifdef CONFIG_XFRM
66312 {
66313 struct rt6_info *rt = (struct rt6_info *)dst;
66314 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
66315 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
66316 }
66317 #endif
66318 }
66319 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
66320 #ifdef CONFIG_XFRM
66321 if (dst) {
66322 struct rt6_info *rt = (struct rt6_info *)dst;
66323 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
66324 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
66325 __sk_dst_reset(sk);
66326 dst = NULL;
66327 }
66328 diff -urNp linux-2.6.39.4/net/ipv6/ipv6_sockglue.c linux-2.6.39.4/net/ipv6/ipv6_sockglue.c
66329 --- linux-2.6.39.4/net/ipv6/ipv6_sockglue.c 2011-05-19 00:06:34.000000000 -0400
66330 +++ linux-2.6.39.4/net/ipv6/ipv6_sockglue.c 2011-08-05 19:44:37.000000000 -0400
66331 @@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
66332 int val, valbool;
66333 int retv = -ENOPROTOOPT;
66334
66335 + pax_track_stack();
66336 +
66337 if (optval == NULL)
66338 val=0;
66339 else {
66340 @@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
66341 int len;
66342 int val;
66343
66344 + pax_track_stack();
66345 +
66346 if (ip6_mroute_opt(optname))
66347 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
66348
66349 diff -urNp linux-2.6.39.4/net/ipv6/raw.c linux-2.6.39.4/net/ipv6/raw.c
66350 --- linux-2.6.39.4/net/ipv6/raw.c 2011-05-19 00:06:34.000000000 -0400
66351 +++ linux-2.6.39.4/net/ipv6/raw.c 2011-08-14 11:25:44.000000000 -0400
66352 @@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
66353 {
66354 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
66355 skb_checksum_complete(skb)) {
66356 - atomic_inc(&sk->sk_drops);
66357 + atomic_inc_unchecked(&sk->sk_drops);
66358 kfree_skb(skb);
66359 return NET_RX_DROP;
66360 }
66361 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
66362 struct raw6_sock *rp = raw6_sk(sk);
66363
66364 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
66365 - atomic_inc(&sk->sk_drops);
66366 + atomic_inc_unchecked(&sk->sk_drops);
66367 kfree_skb(skb);
66368 return NET_RX_DROP;
66369 }
66370 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
66371
66372 if (inet->hdrincl) {
66373 if (skb_checksum_complete(skb)) {
66374 - atomic_inc(&sk->sk_drops);
66375 + atomic_inc_unchecked(&sk->sk_drops);
66376 kfree_skb(skb);
66377 return NET_RX_DROP;
66378 }
66379 @@ -601,7 +601,7 @@ out:
66380 return err;
66381 }
66382
66383 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
66384 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
66385 struct flowi6 *fl6, struct dst_entry **dstp,
66386 unsigned int flags)
66387 {
66388 @@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
66389 u16 proto;
66390 int err;
66391
66392 + pax_track_stack();
66393 +
66394 /* Rough check on arithmetic overflow,
66395 better check is made in ip6_append_data().
66396 */
66397 @@ -909,12 +911,15 @@ do_confirm:
66398 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
66399 char __user *optval, int optlen)
66400 {
66401 + struct icmp6_filter filter;
66402 +
66403 switch (optname) {
66404 case ICMPV6_FILTER:
66405 if (optlen > sizeof(struct icmp6_filter))
66406 optlen = sizeof(struct icmp6_filter);
66407 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
66408 + if (copy_from_user(&filter, optval, optlen))
66409 return -EFAULT;
66410 + raw6_sk(sk)->filter = filter;
66411 return 0;
66412 default:
66413 return -ENOPROTOOPT;
66414 @@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
66415 char __user *optval, int __user *optlen)
66416 {
66417 int len;
66418 + struct icmp6_filter filter;
66419
66420 switch (optname) {
66421 case ICMPV6_FILTER:
66422 @@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
66423 len = sizeof(struct icmp6_filter);
66424 if (put_user(len, optlen))
66425 return -EFAULT;
66426 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
66427 + filter = raw6_sk(sk)->filter;
66428 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
66429 return -EFAULT;
66430 return 0;
66431 default:
66432 @@ -1252,7 +1259,13 @@ static void raw6_sock_seq_show(struct se
66433 0, 0L, 0,
66434 sock_i_uid(sp), 0,
66435 sock_i_ino(sp),
66436 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
66437 + atomic_read(&sp->sk_refcnt),
66438 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66439 + NULL,
66440 +#else
66441 + sp,
66442 +#endif
66443 + atomic_read_unchecked(&sp->sk_drops));
66444 }
66445
66446 static int raw6_seq_show(struct seq_file *seq, void *v)
66447 diff -urNp linux-2.6.39.4/net/ipv6/tcp_ipv6.c linux-2.6.39.4/net/ipv6/tcp_ipv6.c
66448 --- linux-2.6.39.4/net/ipv6/tcp_ipv6.c 2011-05-19 00:06:34.000000000 -0400
66449 +++ linux-2.6.39.4/net/ipv6/tcp_ipv6.c 2011-08-05 19:44:37.000000000 -0400
66450 @@ -92,6 +92,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
66451 }
66452 #endif
66453
66454 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66455 +extern int grsec_enable_blackhole;
66456 +#endif
66457 +
66458 static void tcp_v6_hash(struct sock *sk)
66459 {
66460 if (sk->sk_state != TCP_CLOSE) {
66461 @@ -1660,6 +1664,9 @@ static int tcp_v6_do_rcv(struct sock *sk
66462 return 0;
66463
66464 reset:
66465 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66466 + if (!grsec_enable_blackhole)
66467 +#endif
66468 tcp_v6_send_reset(sk, skb);
66469 discard:
66470 if (opt_skb)
66471 @@ -1739,12 +1746,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
66472 TCP_SKB_CB(skb)->sacked = 0;
66473
66474 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
66475 - if (!sk)
66476 + if (!sk) {
66477 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66478 + ret = 1;
66479 +#endif
66480 goto no_tcp_socket;
66481 + }
66482
66483 process:
66484 - if (sk->sk_state == TCP_TIME_WAIT)
66485 + if (sk->sk_state == TCP_TIME_WAIT) {
66486 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66487 + ret = 2;
66488 +#endif
66489 goto do_time_wait;
66490 + }
66491
66492 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
66493 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
66494 @@ -1792,6 +1807,10 @@ no_tcp_socket:
66495 bad_packet:
66496 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
66497 } else {
66498 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66499 + if (!grsec_enable_blackhole || (ret == 1 &&
66500 + (skb->dev->flags & IFF_LOOPBACK)))
66501 +#endif
66502 tcp_v6_send_reset(NULL, skb);
66503 }
66504
66505 @@ -2052,7 +2071,13 @@ static void get_openreq6(struct seq_file
66506 uid,
66507 0, /* non standard timer */
66508 0, /* open_requests have no inode */
66509 - 0, req);
66510 + 0,
66511 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66512 + NULL
66513 +#else
66514 + req
66515 +#endif
66516 + );
66517 }
66518
66519 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
66520 @@ -2102,7 +2127,12 @@ static void get_tcp6_sock(struct seq_fil
66521 sock_i_uid(sp),
66522 icsk->icsk_probes_out,
66523 sock_i_ino(sp),
66524 - atomic_read(&sp->sk_refcnt), sp,
66525 + atomic_read(&sp->sk_refcnt),
66526 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66527 + NULL,
66528 +#else
66529 + sp,
66530 +#endif
66531 jiffies_to_clock_t(icsk->icsk_rto),
66532 jiffies_to_clock_t(icsk->icsk_ack.ato),
66533 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
66534 @@ -2137,7 +2167,13 @@ static void get_timewait6_sock(struct se
66535 dest->s6_addr32[2], dest->s6_addr32[3], destp,
66536 tw->tw_substate, 0, 0,
66537 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
66538 - atomic_read(&tw->tw_refcnt), tw);
66539 + atomic_read(&tw->tw_refcnt),
66540 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66541 + NULL
66542 +#else
66543 + tw
66544 +#endif
66545 + );
66546 }
66547
66548 static int tcp6_seq_show(struct seq_file *seq, void *v)
66549 diff -urNp linux-2.6.39.4/net/ipv6/udp.c linux-2.6.39.4/net/ipv6/udp.c
66550 --- linux-2.6.39.4/net/ipv6/udp.c 2011-07-09 09:18:51.000000000 -0400
66551 +++ linux-2.6.39.4/net/ipv6/udp.c 2011-08-05 19:44:37.000000000 -0400
66552 @@ -50,6 +50,10 @@
66553 #include <linux/seq_file.h>
66554 #include "udp_impl.h"
66555
66556 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66557 +extern int grsec_enable_blackhole;
66558 +#endif
66559 +
66560 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
66561 {
66562 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
66563 @@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
66564
66565 return 0;
66566 drop:
66567 - atomic_inc(&sk->sk_drops);
66568 + atomic_inc_unchecked(&sk->sk_drops);
66569 drop_no_sk_drops_inc:
66570 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
66571 kfree_skb(skb);
66572 @@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
66573 continue;
66574 }
66575 drop:
66576 - atomic_inc(&sk->sk_drops);
66577 + atomic_inc_unchecked(&sk->sk_drops);
66578 UDP6_INC_STATS_BH(sock_net(sk),
66579 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
66580 UDP6_INC_STATS_BH(sock_net(sk),
66581 @@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
66582 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
66583 proto == IPPROTO_UDPLITE);
66584
66585 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66586 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
66587 +#endif
66588 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
66589
66590 kfree_skb(skb);
66591 @@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
66592 if (!sock_owned_by_user(sk))
66593 udpv6_queue_rcv_skb(sk, skb);
66594 else if (sk_add_backlog(sk, skb)) {
66595 - atomic_inc(&sk->sk_drops);
66596 + atomic_inc_unchecked(&sk->sk_drops);
66597 bh_unlock_sock(sk);
66598 sock_put(sk);
66599 goto discard;
66600 @@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct se
66601 0, 0L, 0,
66602 sock_i_uid(sp), 0,
66603 sock_i_ino(sp),
66604 - atomic_read(&sp->sk_refcnt), sp,
66605 - atomic_read(&sp->sk_drops));
66606 + atomic_read(&sp->sk_refcnt),
66607 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66608 + NULL,
66609 +#else
66610 + sp,
66611 +#endif
66612 + atomic_read_unchecked(&sp->sk_drops));
66613 }
66614
66615 int udp6_seq_show(struct seq_file *seq, void *v)
66616 diff -urNp linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c
66617 --- linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c 2011-05-19 00:06:34.000000000 -0400
66618 +++ linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c 2011-08-05 19:44:37.000000000 -0400
66619 @@ -281,16 +281,16 @@ static int ircomm_tty_block_til_ready(st
66620 add_wait_queue(&self->open_wait, &wait);
66621
66622 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
66623 - __FILE__,__LINE__, tty->driver->name, self->open_count );
66624 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
66625
66626 /* As far as I can see, we protect open_count - Jean II */
66627 spin_lock_irqsave(&self->spinlock, flags);
66628 if (!tty_hung_up_p(filp)) {
66629 extra_count = 1;
66630 - self->open_count--;
66631 + local_dec(&self->open_count);
66632 }
66633 spin_unlock_irqrestore(&self->spinlock, flags);
66634 - self->blocked_open++;
66635 + local_inc(&self->blocked_open);
66636
66637 while (1) {
66638 if (tty->termios->c_cflag & CBAUD) {
66639 @@ -330,7 +330,7 @@ static int ircomm_tty_block_til_ready(st
66640 }
66641
66642 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
66643 - __FILE__,__LINE__, tty->driver->name, self->open_count );
66644 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
66645
66646 schedule();
66647 }
66648 @@ -341,13 +341,13 @@ static int ircomm_tty_block_til_ready(st
66649 if (extra_count) {
66650 /* ++ is not atomic, so this should be protected - Jean II */
66651 spin_lock_irqsave(&self->spinlock, flags);
66652 - self->open_count++;
66653 + local_inc(&self->open_count);
66654 spin_unlock_irqrestore(&self->spinlock, flags);
66655 }
66656 - self->blocked_open--;
66657 + local_dec(&self->blocked_open);
66658
66659 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
66660 - __FILE__,__LINE__, tty->driver->name, self->open_count);
66661 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
66662
66663 if (!retval)
66664 self->flags |= ASYNC_NORMAL_ACTIVE;
66665 @@ -416,14 +416,14 @@ static int ircomm_tty_open(struct tty_st
66666 }
66667 /* ++ is not atomic, so this should be protected - Jean II */
66668 spin_lock_irqsave(&self->spinlock, flags);
66669 - self->open_count++;
66670 + local_inc(&self->open_count);
66671
66672 tty->driver_data = self;
66673 self->tty = tty;
66674 spin_unlock_irqrestore(&self->spinlock, flags);
66675
66676 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
66677 - self->line, self->open_count);
66678 + self->line, local_read(&self->open_count));
66679
66680 /* Not really used by us, but lets do it anyway */
66681 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
66682 @@ -509,7 +509,7 @@ static void ircomm_tty_close(struct tty_
66683 return;
66684 }
66685
66686 - if ((tty->count == 1) && (self->open_count != 1)) {
66687 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
66688 /*
66689 * Uh, oh. tty->count is 1, which means that the tty
66690 * structure will be freed. state->count should always
66691 @@ -519,16 +519,16 @@ static void ircomm_tty_close(struct tty_
66692 */
66693 IRDA_DEBUG(0, "%s(), bad serial port count; "
66694 "tty->count is 1, state->count is %d\n", __func__ ,
66695 - self->open_count);
66696 - self->open_count = 1;
66697 + local_read(&self->open_count));
66698 + local_set(&self->open_count, 1);
66699 }
66700
66701 - if (--self->open_count < 0) {
66702 + if (local_dec_return(&self->open_count) < 0) {
66703 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
66704 - __func__, self->line, self->open_count);
66705 - self->open_count = 0;
66706 + __func__, self->line, local_read(&self->open_count));
66707 + local_set(&self->open_count, 0);
66708 }
66709 - if (self->open_count) {
66710 + if (local_read(&self->open_count)) {
66711 spin_unlock_irqrestore(&self->spinlock, flags);
66712
66713 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
66714 @@ -560,7 +560,7 @@ static void ircomm_tty_close(struct tty_
66715 tty->closing = 0;
66716 self->tty = NULL;
66717
66718 - if (self->blocked_open) {
66719 + if (local_read(&self->blocked_open)) {
66720 if (self->close_delay)
66721 schedule_timeout_interruptible(self->close_delay);
66722 wake_up_interruptible(&self->open_wait);
66723 @@ -1012,7 +1012,7 @@ static void ircomm_tty_hangup(struct tty
66724 spin_lock_irqsave(&self->spinlock, flags);
66725 self->flags &= ~ASYNC_NORMAL_ACTIVE;
66726 self->tty = NULL;
66727 - self->open_count = 0;
66728 + local_set(&self->open_count, 0);
66729 spin_unlock_irqrestore(&self->spinlock, flags);
66730
66731 wake_up_interruptible(&self->open_wait);
66732 @@ -1364,7 +1364,7 @@ static void ircomm_tty_line_info(struct
66733 seq_putc(m, '\n');
66734
66735 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
66736 - seq_printf(m, "Open count: %d\n", self->open_count);
66737 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
66738 seq_printf(m, "Max data size: %d\n", self->max_data_size);
66739 seq_printf(m, "Max header size: %d\n", self->max_header_size);
66740
66741 diff -urNp linux-2.6.39.4/net/iucv/af_iucv.c linux-2.6.39.4/net/iucv/af_iucv.c
66742 --- linux-2.6.39.4/net/iucv/af_iucv.c 2011-05-19 00:06:34.000000000 -0400
66743 +++ linux-2.6.39.4/net/iucv/af_iucv.c 2011-08-05 19:44:37.000000000 -0400
66744 @@ -653,10 +653,10 @@ static int iucv_sock_autobind(struct soc
66745
66746 write_lock_bh(&iucv_sk_list.lock);
66747
66748 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
66749 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
66750 while (__iucv_get_sock_by_name(name)) {
66751 sprintf(name, "%08x",
66752 - atomic_inc_return(&iucv_sk_list.autobind_name));
66753 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
66754 }
66755
66756 write_unlock_bh(&iucv_sk_list.lock);
66757 diff -urNp linux-2.6.39.4/net/key/af_key.c linux-2.6.39.4/net/key/af_key.c
66758 --- linux-2.6.39.4/net/key/af_key.c 2011-05-19 00:06:34.000000000 -0400
66759 +++ linux-2.6.39.4/net/key/af_key.c 2011-08-05 19:44:37.000000000 -0400
66760 @@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
66761 struct xfrm_migrate m[XFRM_MAX_DEPTH];
66762 struct xfrm_kmaddress k;
66763
66764 + pax_track_stack();
66765 +
66766 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
66767 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
66768 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
66769 @@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
66770 static u32 get_acqseq(void)
66771 {
66772 u32 res;
66773 - static atomic_t acqseq;
66774 + static atomic_unchecked_t acqseq;
66775
66776 do {
66777 - res = atomic_inc_return(&acqseq);
66778 + res = atomic_inc_return_unchecked(&acqseq);
66779 } while (!res);
66780 return res;
66781 }
66782 @@ -3657,7 +3659,11 @@ static int pfkey_seq_show(struct seq_fil
66783 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
66784 else
66785 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
66786 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66787 + NULL,
66788 +#else
66789 s,
66790 +#endif
66791 atomic_read(&s->sk_refcnt),
66792 sk_rmem_alloc_get(s),
66793 sk_wmem_alloc_get(s),
66794 diff -urNp linux-2.6.39.4/net/lapb/lapb_iface.c linux-2.6.39.4/net/lapb/lapb_iface.c
66795 --- linux-2.6.39.4/net/lapb/lapb_iface.c 2011-05-19 00:06:34.000000000 -0400
66796 +++ linux-2.6.39.4/net/lapb/lapb_iface.c 2011-08-05 20:34:06.000000000 -0400
66797 @@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
66798 goto out;
66799
66800 lapb->dev = dev;
66801 - lapb->callbacks = *callbacks;
66802 + lapb->callbacks = callbacks;
66803
66804 __lapb_insert_cb(lapb);
66805
66806 @@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
66807
66808 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
66809 {
66810 - if (lapb->callbacks.connect_confirmation)
66811 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
66812 + if (lapb->callbacks->connect_confirmation)
66813 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
66814 }
66815
66816 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
66817 {
66818 - if (lapb->callbacks.connect_indication)
66819 - lapb->callbacks.connect_indication(lapb->dev, reason);
66820 + if (lapb->callbacks->connect_indication)
66821 + lapb->callbacks->connect_indication(lapb->dev, reason);
66822 }
66823
66824 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
66825 {
66826 - if (lapb->callbacks.disconnect_confirmation)
66827 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
66828 + if (lapb->callbacks->disconnect_confirmation)
66829 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
66830 }
66831
66832 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
66833 {
66834 - if (lapb->callbacks.disconnect_indication)
66835 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
66836 + if (lapb->callbacks->disconnect_indication)
66837 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
66838 }
66839
66840 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
66841 {
66842 - if (lapb->callbacks.data_indication)
66843 - return lapb->callbacks.data_indication(lapb->dev, skb);
66844 + if (lapb->callbacks->data_indication)
66845 + return lapb->callbacks->data_indication(lapb->dev, skb);
66846
66847 kfree_skb(skb);
66848 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
66849 @@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
66850 {
66851 int used = 0;
66852
66853 - if (lapb->callbacks.data_transmit) {
66854 - lapb->callbacks.data_transmit(lapb->dev, skb);
66855 + if (lapb->callbacks->data_transmit) {
66856 + lapb->callbacks->data_transmit(lapb->dev, skb);
66857 used = 1;
66858 }
66859
66860 diff -urNp linux-2.6.39.4/net/mac80211/debugfs_sta.c linux-2.6.39.4/net/mac80211/debugfs_sta.c
66861 --- linux-2.6.39.4/net/mac80211/debugfs_sta.c 2011-05-19 00:06:34.000000000 -0400
66862 +++ linux-2.6.39.4/net/mac80211/debugfs_sta.c 2011-08-05 19:44:37.000000000 -0400
66863 @@ -115,6 +115,8 @@ static ssize_t sta_agg_status_read(struc
66864 struct tid_ampdu_rx *tid_rx;
66865 struct tid_ampdu_tx *tid_tx;
66866
66867 + pax_track_stack();
66868 +
66869 rcu_read_lock();
66870
66871 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
66872 @@ -215,6 +217,8 @@ static ssize_t sta_ht_capa_read(struct f
66873 struct sta_info *sta = file->private_data;
66874 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
66875
66876 + pax_track_stack();
66877 +
66878 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
66879 htc->ht_supported ? "" : "not ");
66880 if (htc->ht_supported) {
66881 diff -urNp linux-2.6.39.4/net/mac80211/ieee80211_i.h linux-2.6.39.4/net/mac80211/ieee80211_i.h
66882 --- linux-2.6.39.4/net/mac80211/ieee80211_i.h 2011-05-19 00:06:34.000000000 -0400
66883 +++ linux-2.6.39.4/net/mac80211/ieee80211_i.h 2011-08-05 19:44:37.000000000 -0400
66884 @@ -27,6 +27,7 @@
66885 #include <net/ieee80211_radiotap.h>
66886 #include <net/cfg80211.h>
66887 #include <net/mac80211.h>
66888 +#include <asm/local.h>
66889 #include "key.h"
66890 #include "sta_info.h"
66891
66892 @@ -714,7 +715,7 @@ struct ieee80211_local {
66893 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
66894 spinlock_t queue_stop_reason_lock;
66895
66896 - int open_count;
66897 + local_t open_count;
66898 int monitors, cooked_mntrs;
66899 /* number of interfaces with corresponding FIF_ flags */
66900 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
66901 diff -urNp linux-2.6.39.4/net/mac80211/iface.c linux-2.6.39.4/net/mac80211/iface.c
66902 --- linux-2.6.39.4/net/mac80211/iface.c 2011-05-19 00:06:34.000000000 -0400
66903 +++ linux-2.6.39.4/net/mac80211/iface.c 2011-08-05 19:44:37.000000000 -0400
66904 @@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
66905 break;
66906 }
66907
66908 - if (local->open_count == 0) {
66909 + if (local_read(&local->open_count) == 0) {
66910 res = drv_start(local);
66911 if (res)
66912 goto err_del_bss;
66913 @@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
66914 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
66915
66916 if (!is_valid_ether_addr(dev->dev_addr)) {
66917 - if (!local->open_count)
66918 + if (!local_read(&local->open_count))
66919 drv_stop(local);
66920 return -EADDRNOTAVAIL;
66921 }
66922 @@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
66923 mutex_unlock(&local->mtx);
66924
66925 if (coming_up)
66926 - local->open_count++;
66927 + local_inc(&local->open_count);
66928
66929 if (hw_reconf_flags) {
66930 ieee80211_hw_config(local, hw_reconf_flags);
66931 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
66932 err_del_interface:
66933 drv_remove_interface(local, &sdata->vif);
66934 err_stop:
66935 - if (!local->open_count)
66936 + if (!local_read(&local->open_count))
66937 drv_stop(local);
66938 err_del_bss:
66939 sdata->bss = NULL;
66940 @@ -474,7 +474,7 @@ static void ieee80211_do_stop(struct iee
66941 }
66942
66943 if (going_down)
66944 - local->open_count--;
66945 + local_dec(&local->open_count);
66946
66947 switch (sdata->vif.type) {
66948 case NL80211_IFTYPE_AP_VLAN:
66949 @@ -533,7 +533,7 @@ static void ieee80211_do_stop(struct iee
66950
66951 ieee80211_recalc_ps(local, -1);
66952
66953 - if (local->open_count == 0) {
66954 + if (local_read(&local->open_count) == 0) {
66955 if (local->ops->napi_poll)
66956 napi_disable(&local->napi);
66957 ieee80211_clear_tx_pending(local);
66958 diff -urNp linux-2.6.39.4/net/mac80211/main.c linux-2.6.39.4/net/mac80211/main.c
66959 --- linux-2.6.39.4/net/mac80211/main.c 2011-05-19 00:06:34.000000000 -0400
66960 +++ linux-2.6.39.4/net/mac80211/main.c 2011-08-05 19:44:37.000000000 -0400
66961 @@ -215,7 +215,7 @@ int ieee80211_hw_config(struct ieee80211
66962 local->hw.conf.power_level = power;
66963 }
66964
66965 - if (changed && local->open_count) {
66966 + if (changed && local_read(&local->open_count)) {
66967 ret = drv_config(local, changed);
66968 /*
66969 * Goal:
66970 diff -urNp linux-2.6.39.4/net/mac80211/mlme.c linux-2.6.39.4/net/mac80211/mlme.c
66971 --- linux-2.6.39.4/net/mac80211/mlme.c 2011-06-03 00:04:14.000000000 -0400
66972 +++ linux-2.6.39.4/net/mac80211/mlme.c 2011-08-05 19:44:37.000000000 -0400
66973 @@ -1431,6 +1431,8 @@ static bool ieee80211_assoc_success(stru
66974 bool have_higher_than_11mbit = false;
66975 u16 ap_ht_cap_flags;
66976
66977 + pax_track_stack();
66978 +
66979 /* AssocResp and ReassocResp have identical structure */
66980
66981 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
66982 diff -urNp linux-2.6.39.4/net/mac80211/pm.c linux-2.6.39.4/net/mac80211/pm.c
66983 --- linux-2.6.39.4/net/mac80211/pm.c 2011-05-19 00:06:34.000000000 -0400
66984 +++ linux-2.6.39.4/net/mac80211/pm.c 2011-08-05 19:44:37.000000000 -0400
66985 @@ -95,7 +95,7 @@ int __ieee80211_suspend(struct ieee80211
66986 }
66987
66988 /* stop hardware - this must stop RX */
66989 - if (local->open_count)
66990 + if (local_read(&local->open_count))
66991 ieee80211_stop_device(local);
66992
66993 local->suspended = true;
66994 diff -urNp linux-2.6.39.4/net/mac80211/rate.c linux-2.6.39.4/net/mac80211/rate.c
66995 --- linux-2.6.39.4/net/mac80211/rate.c 2011-05-19 00:06:34.000000000 -0400
66996 +++ linux-2.6.39.4/net/mac80211/rate.c 2011-08-05 19:44:37.000000000 -0400
66997 @@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
66998
66999 ASSERT_RTNL();
67000
67001 - if (local->open_count)
67002 + if (local_read(&local->open_count))
67003 return -EBUSY;
67004
67005 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
67006 diff -urNp linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c
67007 --- linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c 2011-05-19 00:06:34.000000000 -0400
67008 +++ linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c 2011-08-05 19:44:37.000000000 -0400
67009 @@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
67010
67011 spin_unlock_irqrestore(&events->lock, status);
67012
67013 - if (copy_to_user(buf, pb, p))
67014 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
67015 return -EFAULT;
67016
67017 return p;
67018 diff -urNp linux-2.6.39.4/net/mac80211/util.c linux-2.6.39.4/net/mac80211/util.c
67019 --- linux-2.6.39.4/net/mac80211/util.c 2011-05-19 00:06:34.000000000 -0400
67020 +++ linux-2.6.39.4/net/mac80211/util.c 2011-08-05 19:44:37.000000000 -0400
67021 @@ -1129,7 +1129,7 @@ int ieee80211_reconfig(struct ieee80211_
67022 local->resuming = true;
67023
67024 /* restart hardware */
67025 - if (local->open_count) {
67026 + if (local_read(&local->open_count)) {
67027 /*
67028 * Upon resume hardware can sometimes be goofy due to
67029 * various platform / driver / bus issues, so restarting
67030 diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c
67031 --- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c 2011-07-09 09:18:51.000000000 -0400
67032 +++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c 2011-08-05 19:44:37.000000000 -0400
67033 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
67034 /* Increase the refcnt counter of the dest */
67035 atomic_inc(&dest->refcnt);
67036
67037 - conn_flags = atomic_read(&dest->conn_flags);
67038 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
67039 if (cp->protocol != IPPROTO_UDP)
67040 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
67041 /* Bind with the destination and its corresponding transmitter */
67042 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
67043 atomic_set(&cp->refcnt, 1);
67044
67045 atomic_set(&cp->n_control, 0);
67046 - atomic_set(&cp->in_pkts, 0);
67047 + atomic_set_unchecked(&cp->in_pkts, 0);
67048
67049 atomic_inc(&ipvs->conn_count);
67050 if (flags & IP_VS_CONN_F_NO_CPORT)
67051 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
67052
67053 /* Don't drop the entry if its number of incoming packets is not
67054 located in [0, 8] */
67055 - i = atomic_read(&cp->in_pkts);
67056 + i = atomic_read_unchecked(&cp->in_pkts);
67057 if (i > 8 || i < 0) return 0;
67058
67059 if (!todrop_rate[i]) return 0;
67060 diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c
67061 --- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c 2011-07-09 09:18:51.000000000 -0400
67062 +++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c 2011-08-05 19:44:37.000000000 -0400
67063 @@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
67064 ret = cp->packet_xmit(skb, cp, pd->pp);
67065 /* do not touch skb anymore */
67066
67067 - atomic_inc(&cp->in_pkts);
67068 + atomic_inc_unchecked(&cp->in_pkts);
67069 ip_vs_conn_put(cp);
67070 return ret;
67071 }
67072 @@ -1633,7 +1633,7 @@ ip_vs_in(unsigned int hooknum, struct sk
67073 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
67074 pkts = sysctl_sync_threshold(ipvs);
67075 else
67076 - pkts = atomic_add_return(1, &cp->in_pkts);
67077 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
67078
67079 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
67080 cp->protocol == IPPROTO_SCTP) {
67081 diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c
67082 --- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-19 00:06:34.000000000 -0400
67083 +++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-05 19:44:37.000000000 -0400
67084 @@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
67085 ip_vs_rs_hash(ipvs, dest);
67086 write_unlock_bh(&ipvs->rs_lock);
67087 }
67088 - atomic_set(&dest->conn_flags, conn_flags);
67089 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
67090
67091 /* bind the service */
67092 if (!dest->svc) {
67093 @@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
67094 " %-7s %-6d %-10d %-10d\n",
67095 &dest->addr.in6,
67096 ntohs(dest->port),
67097 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
67098 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
67099 atomic_read(&dest->weight),
67100 atomic_read(&dest->activeconns),
67101 atomic_read(&dest->inactconns));
67102 @@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
67103 "%-7s %-6d %-10d %-10d\n",
67104 ntohl(dest->addr.ip),
67105 ntohs(dest->port),
67106 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
67107 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
67108 atomic_read(&dest->weight),
67109 atomic_read(&dest->activeconns),
67110 atomic_read(&dest->inactconns));
67111 @@ -2287,6 +2287,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
67112 struct ip_vs_dest_user *udest_compat;
67113 struct ip_vs_dest_user_kern udest;
67114
67115 + pax_track_stack();
67116 +
67117 if (!capable(CAP_NET_ADMIN))
67118 return -EPERM;
67119
67120 @@ -2501,7 +2503,7 @@ __ip_vs_get_dest_entries(struct net *net
67121
67122 entry.addr = dest->addr.ip;
67123 entry.port = dest->port;
67124 - entry.conn_flags = atomic_read(&dest->conn_flags);
67125 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
67126 entry.weight = atomic_read(&dest->weight);
67127 entry.u_threshold = dest->u_threshold;
67128 entry.l_threshold = dest->l_threshold;
67129 @@ -3029,7 +3031,7 @@ static int ip_vs_genl_fill_dest(struct s
67130 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
67131
67132 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
67133 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
67134 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
67135 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
67136 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
67137 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
67138 diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c
67139 --- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c 2011-05-19 00:06:34.000000000 -0400
67140 +++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c 2011-08-05 19:44:37.000000000 -0400
67141 @@ -648,7 +648,7 @@ control:
67142 * i.e only increment in_pkts for Templates.
67143 */
67144 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
67145 - int pkts = atomic_add_return(1, &cp->in_pkts);
67146 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
67147
67148 if (pkts % sysctl_sync_period(ipvs) != 1)
67149 return;
67150 @@ -794,7 +794,7 @@ static void ip_vs_proc_conn(struct net *
67151
67152 if (opt)
67153 memcpy(&cp->in_seq, opt, sizeof(*opt));
67154 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
67155 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
67156 cp->state = state;
67157 cp->old_state = cp->state;
67158 /*
67159 diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c
67160 --- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-19 00:06:34.000000000 -0400
67161 +++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-08-05 19:44:37.000000000 -0400
67162 @@ -1127,7 +1127,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
67163 else
67164 rc = NF_ACCEPT;
67165 /* do not touch skb anymore */
67166 - atomic_inc(&cp->in_pkts);
67167 + atomic_inc_unchecked(&cp->in_pkts);
67168 goto out;
67169 }
67170
67171 @@ -1245,7 +1245,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
67172 else
67173 rc = NF_ACCEPT;
67174 /* do not touch skb anymore */
67175 - atomic_inc(&cp->in_pkts);
67176 + atomic_inc_unchecked(&cp->in_pkts);
67177 goto out;
67178 }
67179
67180 diff -urNp linux-2.6.39.4/net/netfilter/Kconfig linux-2.6.39.4/net/netfilter/Kconfig
67181 --- linux-2.6.39.4/net/netfilter/Kconfig 2011-05-19 00:06:34.000000000 -0400
67182 +++ linux-2.6.39.4/net/netfilter/Kconfig 2011-08-05 19:44:37.000000000 -0400
67183 @@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
67184
67185 To compile it as a module, choose M here. If unsure, say N.
67186
67187 +config NETFILTER_XT_MATCH_GRADM
67188 + tristate '"gradm" match support'
67189 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
67190 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
67191 + ---help---
67192 + The gradm match allows to match on grsecurity RBAC being enabled.
67193 + It is useful when iptables rules are applied early on bootup to
67194 + prevent connections to the machine (except from a trusted host)
67195 + while the RBAC system is disabled.
67196 +
67197 config NETFILTER_XT_MATCH_HASHLIMIT
67198 tristate '"hashlimit" match support'
67199 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
67200 diff -urNp linux-2.6.39.4/net/netfilter/Makefile linux-2.6.39.4/net/netfilter/Makefile
67201 --- linux-2.6.39.4/net/netfilter/Makefile 2011-05-19 00:06:34.000000000 -0400
67202 +++ linux-2.6.39.4/net/netfilter/Makefile 2011-08-05 19:44:37.000000000 -0400
67203 @@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
67204 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
67205 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
67206 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
67207 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
67208 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
67209 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
67210 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
67211 diff -urNp linux-2.6.39.4/net/netfilter/nfnetlink_log.c linux-2.6.39.4/net/netfilter/nfnetlink_log.c
67212 --- linux-2.6.39.4/net/netfilter/nfnetlink_log.c 2011-05-19 00:06:34.000000000 -0400
67213 +++ linux-2.6.39.4/net/netfilter/nfnetlink_log.c 2011-08-05 19:44:37.000000000 -0400
67214 @@ -70,7 +70,7 @@ struct nfulnl_instance {
67215 };
67216
67217 static DEFINE_SPINLOCK(instances_lock);
67218 -static atomic_t global_seq;
67219 +static atomic_unchecked_t global_seq;
67220
67221 #define INSTANCE_BUCKETS 16
67222 static struct hlist_head instance_table[INSTANCE_BUCKETS];
67223 @@ -506,7 +506,7 @@ __build_packet_message(struct nfulnl_ins
67224 /* global sequence number */
67225 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
67226 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
67227 - htonl(atomic_inc_return(&global_seq)));
67228 + htonl(atomic_inc_return_unchecked(&global_seq)));
67229
67230 if (data_len) {
67231 struct nlattr *nla;
67232 diff -urNp linux-2.6.39.4/net/netfilter/nfnetlink_queue.c linux-2.6.39.4/net/netfilter/nfnetlink_queue.c
67233 --- linux-2.6.39.4/net/netfilter/nfnetlink_queue.c 2011-05-19 00:06:34.000000000 -0400
67234 +++ linux-2.6.39.4/net/netfilter/nfnetlink_queue.c 2011-08-05 19:44:37.000000000 -0400
67235 @@ -58,7 +58,7 @@ struct nfqnl_instance {
67236 */
67237 spinlock_t lock;
67238 unsigned int queue_total;
67239 - atomic_t id_sequence; /* 'sequence' of pkt ids */
67240 + atomic_unchecked_t id_sequence; /* 'sequence' of pkt ids */
67241 struct list_head queue_list; /* packets in queue */
67242 };
67243
67244 @@ -272,7 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_
67245 nfmsg->version = NFNETLINK_V0;
67246 nfmsg->res_id = htons(queue->queue_num);
67247
67248 - entry->id = atomic_inc_return(&queue->id_sequence);
67249 + entry->id = atomic_inc_return_unchecked(&queue->id_sequence);
67250 pmsg.packet_id = htonl(entry->id);
67251 pmsg.hw_protocol = entskb->protocol;
67252 pmsg.hook = entry->hook;
67253 @@ -869,7 +869,7 @@ static int seq_show(struct seq_file *s,
67254 inst->peer_pid, inst->queue_total,
67255 inst->copy_mode, inst->copy_range,
67256 inst->queue_dropped, inst->queue_user_dropped,
67257 - atomic_read(&inst->id_sequence), 1);
67258 + atomic_read_unchecked(&inst->id_sequence), 1);
67259 }
67260
67261 static const struct seq_operations nfqnl_seq_ops = {
67262 diff -urNp linux-2.6.39.4/net/netfilter/xt_gradm.c linux-2.6.39.4/net/netfilter/xt_gradm.c
67263 --- linux-2.6.39.4/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
67264 +++ linux-2.6.39.4/net/netfilter/xt_gradm.c 2011-08-05 19:44:37.000000000 -0400
67265 @@ -0,0 +1,51 @@
67266 +/*
67267 + * gradm match for netfilter
67268 + * Copyright © Zbigniew Krzystolik, 2010
67269 + *
67270 + * This program is free software; you can redistribute it and/or modify
67271 + * it under the terms of the GNU General Public License; either version
67272 + * 2 or 3 as published by the Free Software Foundation.
67273 + */
67274 +#include <linux/module.h>
67275 +#include <linux/moduleparam.h>
67276 +#include <linux/skbuff.h>
67277 +#include <linux/netfilter/x_tables.h>
67278 +#include <linux/grsecurity.h>
67279 +#include <linux/netfilter/xt_gradm.h>
67280 +
67281 +static bool
67282 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
67283 +{
67284 + const struct xt_gradm_mtinfo *info = par->matchinfo;
67285 + bool retval = false;
67286 + if (gr_acl_is_enabled())
67287 + retval = true;
67288 + return retval ^ info->invflags;
67289 +}
67290 +
67291 +static struct xt_match gradm_mt_reg __read_mostly = {
67292 + .name = "gradm",
67293 + .revision = 0,
67294 + .family = NFPROTO_UNSPEC,
67295 + .match = gradm_mt,
67296 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
67297 + .me = THIS_MODULE,
67298 +};
67299 +
67300 +static int __init gradm_mt_init(void)
67301 +{
67302 + return xt_register_match(&gradm_mt_reg);
67303 +}
67304 +
67305 +static void __exit gradm_mt_exit(void)
67306 +{
67307 + xt_unregister_match(&gradm_mt_reg);
67308 +}
67309 +
67310 +module_init(gradm_mt_init);
67311 +module_exit(gradm_mt_exit);
67312 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
67313 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
67314 +MODULE_LICENSE("GPL");
67315 +MODULE_ALIAS("ipt_gradm");
67316 +MODULE_ALIAS("ip6t_gradm");
67317 diff -urNp linux-2.6.39.4/net/netfilter/xt_statistic.c linux-2.6.39.4/net/netfilter/xt_statistic.c
67318 --- linux-2.6.39.4/net/netfilter/xt_statistic.c 2011-05-19 00:06:34.000000000 -0400
67319 +++ linux-2.6.39.4/net/netfilter/xt_statistic.c 2011-08-05 19:44:37.000000000 -0400
67320 @@ -18,7 +18,7 @@
67321 #include <linux/netfilter/x_tables.h>
67322
67323 struct xt_statistic_priv {
67324 - atomic_t count;
67325 + atomic_unchecked_t count;
67326 } ____cacheline_aligned_in_smp;
67327
67328 MODULE_LICENSE("GPL");
67329 @@ -41,9 +41,9 @@ statistic_mt(const struct sk_buff *skb,
67330 break;
67331 case XT_STATISTIC_MODE_NTH:
67332 do {
67333 - oval = atomic_read(&info->master->count);
67334 + oval = atomic_read_unchecked(&info->master->count);
67335 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
67336 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
67337 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
67338 if (nval == 0)
67339 ret = !ret;
67340 break;
67341 @@ -63,7 +63,7 @@ static int statistic_mt_check(const stru
67342 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
67343 if (info->master == NULL)
67344 return -ENOMEM;
67345 - atomic_set(&info->master->count, info->u.nth.count);
67346 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
67347
67348 return 0;
67349 }
67350 diff -urNp linux-2.6.39.4/net/netlink/af_netlink.c linux-2.6.39.4/net/netlink/af_netlink.c
67351 --- linux-2.6.39.4/net/netlink/af_netlink.c 2011-05-19 00:06:34.000000000 -0400
67352 +++ linux-2.6.39.4/net/netlink/af_netlink.c 2011-08-05 19:44:37.000000000 -0400
67353 @@ -742,7 +742,7 @@ static void netlink_overrun(struct sock
67354 sk->sk_error_report(sk);
67355 }
67356 }
67357 - atomic_inc(&sk->sk_drops);
67358 + atomic_inc_unchecked(&sk->sk_drops);
67359 }
67360
67361 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
67362 @@ -1992,15 +1992,23 @@ static int netlink_seq_show(struct seq_f
67363 struct netlink_sock *nlk = nlk_sk(s);
67364
67365 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d %-8lu\n",
67366 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67367 + NULL,
67368 +#else
67369 s,
67370 +#endif
67371 s->sk_protocol,
67372 nlk->pid,
67373 nlk->groups ? (u32)nlk->groups[0] : 0,
67374 sk_rmem_alloc_get(s),
67375 sk_wmem_alloc_get(s),
67376 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67377 + NULL,
67378 +#else
67379 nlk->cb,
67380 +#endif
67381 atomic_read(&s->sk_refcnt),
67382 - atomic_read(&s->sk_drops),
67383 + atomic_read_unchecked(&s->sk_drops),
67384 sock_i_ino(s)
67385 );
67386
67387 diff -urNp linux-2.6.39.4/net/netrom/af_netrom.c linux-2.6.39.4/net/netrom/af_netrom.c
67388 --- linux-2.6.39.4/net/netrom/af_netrom.c 2011-05-19 00:06:34.000000000 -0400
67389 +++ linux-2.6.39.4/net/netrom/af_netrom.c 2011-08-05 19:44:37.000000000 -0400
67390 @@ -840,6 +840,7 @@ static int nr_getname(struct socket *soc
67391 struct sock *sk = sock->sk;
67392 struct nr_sock *nr = nr_sk(sk);
67393
67394 + memset(sax, 0, sizeof(*sax));
67395 lock_sock(sk);
67396 if (peer != 0) {
67397 if (sk->sk_state != TCP_ESTABLISHED) {
67398 @@ -854,7 +855,6 @@ static int nr_getname(struct socket *soc
67399 *uaddr_len = sizeof(struct full_sockaddr_ax25);
67400 } else {
67401 sax->fsa_ax25.sax25_family = AF_NETROM;
67402 - sax->fsa_ax25.sax25_ndigis = 0;
67403 sax->fsa_ax25.sax25_call = nr->source_addr;
67404 *uaddr_len = sizeof(struct sockaddr_ax25);
67405 }
67406 diff -urNp linux-2.6.39.4/net/packet/af_packet.c linux-2.6.39.4/net/packet/af_packet.c
67407 --- linux-2.6.39.4/net/packet/af_packet.c 2011-07-09 09:18:51.000000000 -0400
67408 +++ linux-2.6.39.4/net/packet/af_packet.c 2011-08-05 19:44:37.000000000 -0400
67409 @@ -647,14 +647,14 @@ static int packet_rcv(struct sk_buff *sk
67410
67411 spin_lock(&sk->sk_receive_queue.lock);
67412 po->stats.tp_packets++;
67413 - skb->dropcount = atomic_read(&sk->sk_drops);
67414 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
67415 __skb_queue_tail(&sk->sk_receive_queue, skb);
67416 spin_unlock(&sk->sk_receive_queue.lock);
67417 sk->sk_data_ready(sk, skb->len);
67418 return 0;
67419
67420 drop_n_acct:
67421 - po->stats.tp_drops = atomic_inc_return(&sk->sk_drops);
67422 + po->stats.tp_drops = atomic_inc_return_unchecked(&sk->sk_drops);
67423
67424 drop_n_restore:
67425 if (skb_head != skb->data && skb_shared(skb)) {
67426 @@ -2159,7 +2159,7 @@ static int packet_getsockopt(struct sock
67427 case PACKET_HDRLEN:
67428 if (len > sizeof(int))
67429 len = sizeof(int);
67430 - if (copy_from_user(&val, optval, len))
67431 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
67432 return -EFAULT;
67433 switch (val) {
67434 case TPACKET_V1:
67435 @@ -2197,7 +2197,7 @@ static int packet_getsockopt(struct sock
67436
67437 if (put_user(len, optlen))
67438 return -EFAULT;
67439 - if (copy_to_user(optval, data, len))
67440 + if (len > sizeof(st) || copy_to_user(optval, data, len))
67441 return -EFAULT;
67442 return 0;
67443 }
67444 @@ -2709,7 +2709,11 @@ static int packet_seq_show(struct seq_fi
67445
67446 seq_printf(seq,
67447 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
67448 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67449 + NULL,
67450 +#else
67451 s,
67452 +#endif
67453 atomic_read(&s->sk_refcnt),
67454 s->sk_type,
67455 ntohs(po->num),
67456 diff -urNp linux-2.6.39.4/net/phonet/af_phonet.c linux-2.6.39.4/net/phonet/af_phonet.c
67457 --- linux-2.6.39.4/net/phonet/af_phonet.c 2011-05-19 00:06:34.000000000 -0400
67458 +++ linux-2.6.39.4/net/phonet/af_phonet.c 2011-08-05 20:34:06.000000000 -0400
67459 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
67460 {
67461 struct phonet_protocol *pp;
67462
67463 - if (protocol >= PHONET_NPROTO)
67464 + if (protocol < 0 || protocol >= PHONET_NPROTO)
67465 return NULL;
67466
67467 rcu_read_lock();
67468 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_regist
67469 {
67470 int err = 0;
67471
67472 - if (protocol >= PHONET_NPROTO)
67473 + if (protocol < 0 || protocol >= PHONET_NPROTO)
67474 return -EINVAL;
67475
67476 err = proto_register(pp->prot, 1);
67477 diff -urNp linux-2.6.39.4/net/phonet/pep.c linux-2.6.39.4/net/phonet/pep.c
67478 --- linux-2.6.39.4/net/phonet/pep.c 2011-05-19 00:06:34.000000000 -0400
67479 +++ linux-2.6.39.4/net/phonet/pep.c 2011-08-05 19:44:37.000000000 -0400
67480 @@ -387,7 +387,7 @@ static int pipe_do_rcv(struct sock *sk,
67481
67482 case PNS_PEP_CTRL_REQ:
67483 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
67484 - atomic_inc(&sk->sk_drops);
67485 + atomic_inc_unchecked(&sk->sk_drops);
67486 break;
67487 }
67488 __skb_pull(skb, 4);
67489 @@ -408,7 +408,7 @@ static int pipe_do_rcv(struct sock *sk,
67490 }
67491
67492 if (pn->rx_credits == 0) {
67493 - atomic_inc(&sk->sk_drops);
67494 + atomic_inc_unchecked(&sk->sk_drops);
67495 err = -ENOBUFS;
67496 break;
67497 }
67498 @@ -556,7 +556,7 @@ static int pipe_handler_do_rcv(struct so
67499 }
67500
67501 if (pn->rx_credits == 0) {
67502 - atomic_inc(&sk->sk_drops);
67503 + atomic_inc_unchecked(&sk->sk_drops);
67504 err = NET_RX_DROP;
67505 break;
67506 }
67507 diff -urNp linux-2.6.39.4/net/phonet/socket.c linux-2.6.39.4/net/phonet/socket.c
67508 --- linux-2.6.39.4/net/phonet/socket.c 2011-05-19 00:06:34.000000000 -0400
67509 +++ linux-2.6.39.4/net/phonet/socket.c 2011-08-05 19:44:37.000000000 -0400
67510 @@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_f
67511 pn->resource, sk->sk_state,
67512 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
67513 sock_i_uid(sk), sock_i_ino(sk),
67514 - atomic_read(&sk->sk_refcnt), sk,
67515 - atomic_read(&sk->sk_drops), &len);
67516 + atomic_read(&sk->sk_refcnt),
67517 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67518 + NULL,
67519 +#else
67520 + sk,
67521 +#endif
67522 + atomic_read_unchecked(&sk->sk_drops), &len);
67523 }
67524 seq_printf(seq, "%*s\n", 127 - len, "");
67525 return 0;
67526 diff -urNp linux-2.6.39.4/net/rds/cong.c linux-2.6.39.4/net/rds/cong.c
67527 --- linux-2.6.39.4/net/rds/cong.c 2011-05-19 00:06:34.000000000 -0400
67528 +++ linux-2.6.39.4/net/rds/cong.c 2011-08-05 19:44:37.000000000 -0400
67529 @@ -77,7 +77,7 @@
67530 * finds that the saved generation number is smaller than the global generation
67531 * number, it wakes up the process.
67532 */
67533 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
67534 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
67535
67536 /*
67537 * Congestion monitoring
67538 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
67539 rdsdebug("waking map %p for %pI4\n",
67540 map, &map->m_addr);
67541 rds_stats_inc(s_cong_update_received);
67542 - atomic_inc(&rds_cong_generation);
67543 + atomic_inc_unchecked(&rds_cong_generation);
67544 if (waitqueue_active(&map->m_waitq))
67545 wake_up(&map->m_waitq);
67546 if (waitqueue_active(&rds_poll_waitq))
67547 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
67548
67549 int rds_cong_updated_since(unsigned long *recent)
67550 {
67551 - unsigned long gen = atomic_read(&rds_cong_generation);
67552 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
67553
67554 if (likely(*recent == gen))
67555 return 0;
67556 diff -urNp linux-2.6.39.4/net/rds/ib_cm.c linux-2.6.39.4/net/rds/ib_cm.c
67557 --- linux-2.6.39.4/net/rds/ib_cm.c 2011-05-19 00:06:34.000000000 -0400
67558 +++ linux-2.6.39.4/net/rds/ib_cm.c 2011-08-05 19:44:37.000000000 -0400
67559 @@ -720,7 +720,7 @@ void rds_ib_conn_shutdown(struct rds_con
67560 /* Clear the ACK state */
67561 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
67562 #ifdef KERNEL_HAS_ATOMIC64
67563 - atomic64_set(&ic->i_ack_next, 0);
67564 + atomic64_set_unchecked(&ic->i_ack_next, 0);
67565 #else
67566 ic->i_ack_next = 0;
67567 #endif
67568 diff -urNp linux-2.6.39.4/net/rds/ib.h linux-2.6.39.4/net/rds/ib.h
67569 --- linux-2.6.39.4/net/rds/ib.h 2011-05-19 00:06:34.000000000 -0400
67570 +++ linux-2.6.39.4/net/rds/ib.h 2011-08-05 19:44:37.000000000 -0400
67571 @@ -127,7 +127,7 @@ struct rds_ib_connection {
67572 /* sending acks */
67573 unsigned long i_ack_flags;
67574 #ifdef KERNEL_HAS_ATOMIC64
67575 - atomic64_t i_ack_next; /* next ACK to send */
67576 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
67577 #else
67578 spinlock_t i_ack_lock; /* protect i_ack_next */
67579 u64 i_ack_next; /* next ACK to send */
67580 diff -urNp linux-2.6.39.4/net/rds/ib_recv.c linux-2.6.39.4/net/rds/ib_recv.c
67581 --- linux-2.6.39.4/net/rds/ib_recv.c 2011-05-19 00:06:34.000000000 -0400
67582 +++ linux-2.6.39.4/net/rds/ib_recv.c 2011-08-05 19:44:37.000000000 -0400
67583 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
67584 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
67585 int ack_required)
67586 {
67587 - atomic64_set(&ic->i_ack_next, seq);
67588 + atomic64_set_unchecked(&ic->i_ack_next, seq);
67589 if (ack_required) {
67590 smp_mb__before_clear_bit();
67591 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67592 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
67593 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67594 smp_mb__after_clear_bit();
67595
67596 - return atomic64_read(&ic->i_ack_next);
67597 + return atomic64_read_unchecked(&ic->i_ack_next);
67598 }
67599 #endif
67600
67601 diff -urNp linux-2.6.39.4/net/rds/iw_cm.c linux-2.6.39.4/net/rds/iw_cm.c
67602 --- linux-2.6.39.4/net/rds/iw_cm.c 2011-05-19 00:06:34.000000000 -0400
67603 +++ linux-2.6.39.4/net/rds/iw_cm.c 2011-08-05 19:44:37.000000000 -0400
67604 @@ -664,7 +664,7 @@ void rds_iw_conn_shutdown(struct rds_con
67605 /* Clear the ACK state */
67606 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
67607 #ifdef KERNEL_HAS_ATOMIC64
67608 - atomic64_set(&ic->i_ack_next, 0);
67609 + atomic64_set_unchecked(&ic->i_ack_next, 0);
67610 #else
67611 ic->i_ack_next = 0;
67612 #endif
67613 diff -urNp linux-2.6.39.4/net/rds/iw.h linux-2.6.39.4/net/rds/iw.h
67614 --- linux-2.6.39.4/net/rds/iw.h 2011-05-19 00:06:34.000000000 -0400
67615 +++ linux-2.6.39.4/net/rds/iw.h 2011-08-05 19:44:37.000000000 -0400
67616 @@ -133,7 +133,7 @@ struct rds_iw_connection {
67617 /* sending acks */
67618 unsigned long i_ack_flags;
67619 #ifdef KERNEL_HAS_ATOMIC64
67620 - atomic64_t i_ack_next; /* next ACK to send */
67621 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
67622 #else
67623 spinlock_t i_ack_lock; /* protect i_ack_next */
67624 u64 i_ack_next; /* next ACK to send */
67625 diff -urNp linux-2.6.39.4/net/rds/iw_rdma.c linux-2.6.39.4/net/rds/iw_rdma.c
67626 --- linux-2.6.39.4/net/rds/iw_rdma.c 2011-05-19 00:06:34.000000000 -0400
67627 +++ linux-2.6.39.4/net/rds/iw_rdma.c 2011-08-05 19:44:37.000000000 -0400
67628 @@ -182,6 +182,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
67629 struct rdma_cm_id *pcm_id;
67630 int rc;
67631
67632 + pax_track_stack();
67633 +
67634 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
67635 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
67636
67637 diff -urNp linux-2.6.39.4/net/rds/iw_recv.c linux-2.6.39.4/net/rds/iw_recv.c
67638 --- linux-2.6.39.4/net/rds/iw_recv.c 2011-05-19 00:06:34.000000000 -0400
67639 +++ linux-2.6.39.4/net/rds/iw_recv.c 2011-08-05 19:44:37.000000000 -0400
67640 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
67641 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
67642 int ack_required)
67643 {
67644 - atomic64_set(&ic->i_ack_next, seq);
67645 + atomic64_set_unchecked(&ic->i_ack_next, seq);
67646 if (ack_required) {
67647 smp_mb__before_clear_bit();
67648 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67649 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
67650 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67651 smp_mb__after_clear_bit();
67652
67653 - return atomic64_read(&ic->i_ack_next);
67654 + return atomic64_read_unchecked(&ic->i_ack_next);
67655 }
67656 #endif
67657
67658 diff -urNp linux-2.6.39.4/net/rxrpc/af_rxrpc.c linux-2.6.39.4/net/rxrpc/af_rxrpc.c
67659 --- linux-2.6.39.4/net/rxrpc/af_rxrpc.c 2011-05-19 00:06:34.000000000 -0400
67660 +++ linux-2.6.39.4/net/rxrpc/af_rxrpc.c 2011-08-05 19:44:37.000000000 -0400
67661 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_
67662 __be32 rxrpc_epoch;
67663
67664 /* current debugging ID */
67665 -atomic_t rxrpc_debug_id;
67666 +atomic_unchecked_t rxrpc_debug_id;
67667
67668 /* count of skbs currently in use */
67669 atomic_t rxrpc_n_skbs;
67670 diff -urNp linux-2.6.39.4/net/rxrpc/ar-ack.c linux-2.6.39.4/net/rxrpc/ar-ack.c
67671 --- linux-2.6.39.4/net/rxrpc/ar-ack.c 2011-05-19 00:06:34.000000000 -0400
67672 +++ linux-2.6.39.4/net/rxrpc/ar-ack.c 2011-08-05 19:44:37.000000000 -0400
67673 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_ca
67674
67675 _enter("{%d,%d,%d,%d},",
67676 call->acks_hard, call->acks_unacked,
67677 - atomic_read(&call->sequence),
67678 + atomic_read_unchecked(&call->sequence),
67679 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
67680
67681 stop = 0;
67682 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_ca
67683
67684 /* each Tx packet has a new serial number */
67685 sp->hdr.serial =
67686 - htonl(atomic_inc_return(&call->conn->serial));
67687 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
67688
67689 hdr = (struct rxrpc_header *) txb->head;
67690 hdr->serial = sp->hdr.serial;
67691 @@ -405,7 +405,7 @@ static void rxrpc_rotate_tx_window(struc
67692 */
67693 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
67694 {
67695 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
67696 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
67697 }
67698
67699 /*
67700 @@ -631,7 +631,7 @@ process_further:
67701
67702 latest = ntohl(sp->hdr.serial);
67703 hard = ntohl(ack.firstPacket);
67704 - tx = atomic_read(&call->sequence);
67705 + tx = atomic_read_unchecked(&call->sequence);
67706
67707 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
67708 latest,
67709 @@ -844,6 +844,8 @@ void rxrpc_process_call(struct work_stru
67710 u32 abort_code = RX_PROTOCOL_ERROR;
67711 u8 *acks = NULL;
67712
67713 + pax_track_stack();
67714 +
67715 //printk("\n--------------------\n");
67716 _enter("{%d,%s,%lx} [%lu]",
67717 call->debug_id, rxrpc_call_states[call->state], call->events,
67718 @@ -1163,7 +1165,7 @@ void rxrpc_process_call(struct work_stru
67719 goto maybe_reschedule;
67720
67721 send_ACK_with_skew:
67722 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
67723 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
67724 ntohl(ack.serial));
67725 send_ACK:
67726 mtu = call->conn->trans->peer->if_mtu;
67727 @@ -1175,7 +1177,7 @@ send_ACK:
67728 ackinfo.rxMTU = htonl(5692);
67729 ackinfo.jumbo_max = htonl(4);
67730
67731 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
67732 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
67733 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
67734 ntohl(hdr.serial),
67735 ntohs(ack.maxSkew),
67736 @@ -1193,7 +1195,7 @@ send_ACK:
67737 send_message:
67738 _debug("send message");
67739
67740 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
67741 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
67742 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
67743 send_message_2:
67744
67745 diff -urNp linux-2.6.39.4/net/rxrpc/ar-call.c linux-2.6.39.4/net/rxrpc/ar-call.c
67746 --- linux-2.6.39.4/net/rxrpc/ar-call.c 2011-05-19 00:06:34.000000000 -0400
67747 +++ linux-2.6.39.4/net/rxrpc/ar-call.c 2011-08-05 19:44:37.000000000 -0400
67748 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
67749 spin_lock_init(&call->lock);
67750 rwlock_init(&call->state_lock);
67751 atomic_set(&call->usage, 1);
67752 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
67753 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67754 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
67755
67756 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
67757 diff -urNp linux-2.6.39.4/net/rxrpc/ar-connection.c linux-2.6.39.4/net/rxrpc/ar-connection.c
67758 --- linux-2.6.39.4/net/rxrpc/ar-connection.c 2011-05-19 00:06:34.000000000 -0400
67759 +++ linux-2.6.39.4/net/rxrpc/ar-connection.c 2011-08-05 19:44:37.000000000 -0400
67760 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_al
67761 rwlock_init(&conn->lock);
67762 spin_lock_init(&conn->state_lock);
67763 atomic_set(&conn->usage, 1);
67764 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
67765 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67766 conn->avail_calls = RXRPC_MAXCALLS;
67767 conn->size_align = 4;
67768 conn->header_size = sizeof(struct rxrpc_header);
67769 diff -urNp linux-2.6.39.4/net/rxrpc/ar-connevent.c linux-2.6.39.4/net/rxrpc/ar-connevent.c
67770 --- linux-2.6.39.4/net/rxrpc/ar-connevent.c 2011-05-19 00:06:34.000000000 -0400
67771 +++ linux-2.6.39.4/net/rxrpc/ar-connevent.c 2011-08-05 19:44:37.000000000 -0400
67772 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
67773
67774 len = iov[0].iov_len + iov[1].iov_len;
67775
67776 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
67777 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
67778 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
67779
67780 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
67781 diff -urNp linux-2.6.39.4/net/rxrpc/ar-input.c linux-2.6.39.4/net/rxrpc/ar-input.c
67782 --- linux-2.6.39.4/net/rxrpc/ar-input.c 2011-05-19 00:06:34.000000000 -0400
67783 +++ linux-2.6.39.4/net/rxrpc/ar-input.c 2011-08-05 19:44:37.000000000 -0400
67784 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rx
67785 /* track the latest serial number on this connection for ACK packet
67786 * information */
67787 serial = ntohl(sp->hdr.serial);
67788 - hi_serial = atomic_read(&call->conn->hi_serial);
67789 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
67790 while (serial > hi_serial)
67791 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
67792 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
67793 serial);
67794
67795 /* request ACK generation for any ACK or DATA packet that requests
67796 diff -urNp linux-2.6.39.4/net/rxrpc/ar-internal.h linux-2.6.39.4/net/rxrpc/ar-internal.h
67797 --- linux-2.6.39.4/net/rxrpc/ar-internal.h 2011-05-19 00:06:34.000000000 -0400
67798 +++ linux-2.6.39.4/net/rxrpc/ar-internal.h 2011-08-05 19:44:37.000000000 -0400
67799 @@ -272,8 +272,8 @@ struct rxrpc_connection {
67800 int error; /* error code for local abort */
67801 int debug_id; /* debug ID for printks */
67802 unsigned call_counter; /* call ID counter */
67803 - atomic_t serial; /* packet serial number counter */
67804 - atomic_t hi_serial; /* highest serial number received */
67805 + atomic_unchecked_t serial; /* packet serial number counter */
67806 + atomic_unchecked_t hi_serial; /* highest serial number received */
67807 u8 avail_calls; /* number of calls available */
67808 u8 size_align; /* data size alignment (for security) */
67809 u8 header_size; /* rxrpc + security header size */
67810 @@ -346,7 +346,7 @@ struct rxrpc_call {
67811 spinlock_t lock;
67812 rwlock_t state_lock; /* lock for state transition */
67813 atomic_t usage;
67814 - atomic_t sequence; /* Tx data packet sequence counter */
67815 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
67816 u32 abort_code; /* local/remote abort code */
67817 enum { /* current state of call */
67818 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
67819 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
67820 */
67821 extern atomic_t rxrpc_n_skbs;
67822 extern __be32 rxrpc_epoch;
67823 -extern atomic_t rxrpc_debug_id;
67824 +extern atomic_unchecked_t rxrpc_debug_id;
67825 extern struct workqueue_struct *rxrpc_workqueue;
67826
67827 /*
67828 diff -urNp linux-2.6.39.4/net/rxrpc/ar-local.c linux-2.6.39.4/net/rxrpc/ar-local.c
67829 --- linux-2.6.39.4/net/rxrpc/ar-local.c 2011-05-19 00:06:34.000000000 -0400
67830 +++ linux-2.6.39.4/net/rxrpc/ar-local.c 2011-08-05 19:44:37.000000000 -0400
67831 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
67832 spin_lock_init(&local->lock);
67833 rwlock_init(&local->services_lock);
67834 atomic_set(&local->usage, 1);
67835 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
67836 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67837 memcpy(&local->srx, srx, sizeof(*srx));
67838 }
67839
67840 diff -urNp linux-2.6.39.4/net/rxrpc/ar-output.c linux-2.6.39.4/net/rxrpc/ar-output.c
67841 --- linux-2.6.39.4/net/rxrpc/ar-output.c 2011-05-19 00:06:34.000000000 -0400
67842 +++ linux-2.6.39.4/net/rxrpc/ar-output.c 2011-08-05 19:44:37.000000000 -0400
67843 @@ -681,9 +681,9 @@ static int rxrpc_send_data(struct kiocb
67844 sp->hdr.cid = call->cid;
67845 sp->hdr.callNumber = call->call_id;
67846 sp->hdr.seq =
67847 - htonl(atomic_inc_return(&call->sequence));
67848 + htonl(atomic_inc_return_unchecked(&call->sequence));
67849 sp->hdr.serial =
67850 - htonl(atomic_inc_return(&conn->serial));
67851 + htonl(atomic_inc_return_unchecked(&conn->serial));
67852 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
67853 sp->hdr.userStatus = 0;
67854 sp->hdr.securityIndex = conn->security_ix;
67855 diff -urNp linux-2.6.39.4/net/rxrpc/ar-peer.c linux-2.6.39.4/net/rxrpc/ar-peer.c
67856 --- linux-2.6.39.4/net/rxrpc/ar-peer.c 2011-05-19 00:06:34.000000000 -0400
67857 +++ linux-2.6.39.4/net/rxrpc/ar-peer.c 2011-08-05 19:44:37.000000000 -0400
67858 @@ -71,7 +71,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
67859 INIT_LIST_HEAD(&peer->error_targets);
67860 spin_lock_init(&peer->lock);
67861 atomic_set(&peer->usage, 1);
67862 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
67863 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67864 memcpy(&peer->srx, srx, sizeof(*srx));
67865
67866 rxrpc_assess_MTU_size(peer);
67867 diff -urNp linux-2.6.39.4/net/rxrpc/ar-proc.c linux-2.6.39.4/net/rxrpc/ar-proc.c
67868 --- linux-2.6.39.4/net/rxrpc/ar-proc.c 2011-05-19 00:06:34.000000000 -0400
67869 +++ linux-2.6.39.4/net/rxrpc/ar-proc.c 2011-08-05 19:44:37.000000000 -0400
67870 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
67871 atomic_read(&conn->usage),
67872 rxrpc_conn_states[conn->state],
67873 key_serial(conn->key),
67874 - atomic_read(&conn->serial),
67875 - atomic_read(&conn->hi_serial));
67876 + atomic_read_unchecked(&conn->serial),
67877 + atomic_read_unchecked(&conn->hi_serial));
67878
67879 return 0;
67880 }
67881 diff -urNp linux-2.6.39.4/net/rxrpc/ar-transport.c linux-2.6.39.4/net/rxrpc/ar-transport.c
67882 --- linux-2.6.39.4/net/rxrpc/ar-transport.c 2011-05-19 00:06:34.000000000 -0400
67883 +++ linux-2.6.39.4/net/rxrpc/ar-transport.c 2011-08-05 19:44:37.000000000 -0400
67884 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_all
67885 spin_lock_init(&trans->client_lock);
67886 rwlock_init(&trans->conn_lock);
67887 atomic_set(&trans->usage, 1);
67888 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
67889 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67890
67891 if (peer->srx.transport.family == AF_INET) {
67892 switch (peer->srx.transport_type) {
67893 diff -urNp linux-2.6.39.4/net/rxrpc/rxkad.c linux-2.6.39.4/net/rxrpc/rxkad.c
67894 --- linux-2.6.39.4/net/rxrpc/rxkad.c 2011-05-19 00:06:34.000000000 -0400
67895 +++ linux-2.6.39.4/net/rxrpc/rxkad.c 2011-08-05 19:44:37.000000000 -0400
67896 @@ -211,6 +211,8 @@ static int rxkad_secure_packet_encrypt(c
67897 u16 check;
67898 int nsg;
67899
67900 + pax_track_stack();
67901 +
67902 sp = rxrpc_skb(skb);
67903
67904 _enter("");
67905 @@ -338,6 +340,8 @@ static int rxkad_verify_packet_auth(cons
67906 u16 check;
67907 int nsg;
67908
67909 + pax_track_stack();
67910 +
67911 _enter("");
67912
67913 sp = rxrpc_skb(skb);
67914 @@ -610,7 +614,7 @@ static int rxkad_issue_challenge(struct
67915
67916 len = iov[0].iov_len + iov[1].iov_len;
67917
67918 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
67919 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
67920 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
67921
67922 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
67923 @@ -660,7 +664,7 @@ static int rxkad_send_response(struct rx
67924
67925 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
67926
67927 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
67928 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
67929 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
67930
67931 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
67932 diff -urNp linux-2.6.39.4/net/sctp/proc.c linux-2.6.39.4/net/sctp/proc.c
67933 --- linux-2.6.39.4/net/sctp/proc.c 2011-05-19 00:06:34.000000000 -0400
67934 +++ linux-2.6.39.4/net/sctp/proc.c 2011-08-05 19:44:37.000000000 -0400
67935 @@ -212,7 +212,12 @@ static int sctp_eps_seq_show(struct seq_
67936 sctp_for_each_hentry(epb, node, &head->chain) {
67937 ep = sctp_ep(epb);
67938 sk = epb->sk;
67939 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
67940 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
67941 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67942 + NULL, NULL,
67943 +#else
67944 + ep, sk,
67945 +#endif
67946 sctp_sk(sk)->type, sk->sk_state, hash,
67947 epb->bind_addr.port,
67948 sock_i_uid(sk), sock_i_ino(sk));
67949 @@ -318,7 +323,12 @@ static int sctp_assocs_seq_show(struct s
67950 seq_printf(seq,
67951 "%8p %8p %-3d %-3d %-2d %-4d "
67952 "%4d %8d %8d %7d %5lu %-5d %5d ",
67953 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
67954 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67955 + NULL, NULL,
67956 +#else
67957 + assoc, sk,
67958 +#endif
67959 + sctp_sk(sk)->type, sk->sk_state,
67960 assoc->state, hash,
67961 assoc->assoc_id,
67962 assoc->sndbuf_used,
67963 diff -urNp linux-2.6.39.4/net/sctp/socket.c linux-2.6.39.4/net/sctp/socket.c
67964 --- linux-2.6.39.4/net/sctp/socket.c 2011-05-19 00:06:34.000000000 -0400
67965 +++ linux-2.6.39.4/net/sctp/socket.c 2011-08-05 19:44:37.000000000 -0400
67966 @@ -4433,7 +4433,7 @@ static int sctp_getsockopt_peer_addrs(st
67967 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
67968 if (space_left < addrlen)
67969 return -ENOMEM;
67970 - if (copy_to_user(to, &temp, addrlen))
67971 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
67972 return -EFAULT;
67973 to += addrlen;
67974 cnt++;
67975 diff -urNp linux-2.6.39.4/net/socket.c linux-2.6.39.4/net/socket.c
67976 --- linux-2.6.39.4/net/socket.c 2011-06-03 00:04:14.000000000 -0400
67977 +++ linux-2.6.39.4/net/socket.c 2011-08-05 19:44:37.000000000 -0400
67978 @@ -88,6 +88,7 @@
67979 #include <linux/nsproxy.h>
67980 #include <linux/magic.h>
67981 #include <linux/slab.h>
67982 +#include <linux/in.h>
67983
67984 #include <asm/uaccess.h>
67985 #include <asm/unistd.h>
67986 @@ -105,6 +106,8 @@
67987 #include <linux/sockios.h>
67988 #include <linux/atalk.h>
67989
67990 +#include <linux/grsock.h>
67991 +
67992 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
67993 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
67994 unsigned long nr_segs, loff_t pos);
67995 @@ -330,7 +333,7 @@ static struct dentry *sockfs_mount(struc
67996 &sockfs_dentry_operations, SOCKFS_MAGIC);
67997 }
67998
67999 -static struct vfsmount *sock_mnt __read_mostly;
68000 +struct vfsmount *sock_mnt __read_mostly;
68001
68002 static struct file_system_type sock_fs_type = {
68003 .name = "sockfs",
68004 @@ -1179,6 +1182,8 @@ int __sock_create(struct net *net, int f
68005 return -EAFNOSUPPORT;
68006 if (type < 0 || type >= SOCK_MAX)
68007 return -EINVAL;
68008 + if (protocol < 0)
68009 + return -EINVAL;
68010
68011 /* Compatibility.
68012
68013 @@ -1311,6 +1316,16 @@ SYSCALL_DEFINE3(socket, int, family, int
68014 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
68015 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
68016
68017 + if(!gr_search_socket(family, type, protocol)) {
68018 + retval = -EACCES;
68019 + goto out;
68020 + }
68021 +
68022 + if (gr_handle_sock_all(family, type, protocol)) {
68023 + retval = -EACCES;
68024 + goto out;
68025 + }
68026 +
68027 retval = sock_create(family, type, protocol, &sock);
68028 if (retval < 0)
68029 goto out;
68030 @@ -1423,6 +1438,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
68031 if (sock) {
68032 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
68033 if (err >= 0) {
68034 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
68035 + err = -EACCES;
68036 + goto error;
68037 + }
68038 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
68039 + if (err)
68040 + goto error;
68041 +
68042 err = security_socket_bind(sock,
68043 (struct sockaddr *)&address,
68044 addrlen);
68045 @@ -1431,6 +1454,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
68046 (struct sockaddr *)
68047 &address, addrlen);
68048 }
68049 +error:
68050 fput_light(sock->file, fput_needed);
68051 }
68052 return err;
68053 @@ -1454,10 +1478,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
68054 if ((unsigned)backlog > somaxconn)
68055 backlog = somaxconn;
68056
68057 + if (gr_handle_sock_server_other(sock->sk)) {
68058 + err = -EPERM;
68059 + goto error;
68060 + }
68061 +
68062 + err = gr_search_listen(sock);
68063 + if (err)
68064 + goto error;
68065 +
68066 err = security_socket_listen(sock, backlog);
68067 if (!err)
68068 err = sock->ops->listen(sock, backlog);
68069
68070 +error:
68071 fput_light(sock->file, fput_needed);
68072 }
68073 return err;
68074 @@ -1501,6 +1535,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
68075 newsock->type = sock->type;
68076 newsock->ops = sock->ops;
68077
68078 + if (gr_handle_sock_server_other(sock->sk)) {
68079 + err = -EPERM;
68080 + sock_release(newsock);
68081 + goto out_put;
68082 + }
68083 +
68084 + err = gr_search_accept(sock);
68085 + if (err) {
68086 + sock_release(newsock);
68087 + goto out_put;
68088 + }
68089 +
68090 /*
68091 * We don't need try_module_get here, as the listening socket (sock)
68092 * has the protocol module (sock->ops->owner) held.
68093 @@ -1539,6 +1585,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
68094 fd_install(newfd, newfile);
68095 err = newfd;
68096
68097 + gr_attach_curr_ip(newsock->sk);
68098 +
68099 out_put:
68100 fput_light(sock->file, fput_needed);
68101 out:
68102 @@ -1571,6 +1619,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
68103 int, addrlen)
68104 {
68105 struct socket *sock;
68106 + struct sockaddr *sck;
68107 struct sockaddr_storage address;
68108 int err, fput_needed;
68109
68110 @@ -1581,6 +1630,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
68111 if (err < 0)
68112 goto out_put;
68113
68114 + sck = (struct sockaddr *)&address;
68115 +
68116 + if (gr_handle_sock_client(sck)) {
68117 + err = -EACCES;
68118 + goto out_put;
68119 + }
68120 +
68121 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
68122 + if (err)
68123 + goto out_put;
68124 +
68125 err =
68126 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
68127 if (err)
68128 @@ -1882,6 +1942,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct
68129 int err, ctl_len, iov_size, total_len;
68130 int fput_needed;
68131
68132 + pax_track_stack();
68133 +
68134 err = -EFAULT;
68135 if (MSG_CMSG_COMPAT & flags) {
68136 if (get_compat_msghdr(&msg_sys, msg_compat))
68137 diff -urNp linux-2.6.39.4/net/sunrpc/sched.c linux-2.6.39.4/net/sunrpc/sched.c
68138 --- linux-2.6.39.4/net/sunrpc/sched.c 2011-08-05 21:11:51.000000000 -0400
68139 +++ linux-2.6.39.4/net/sunrpc/sched.c 2011-08-05 21:12:20.000000000 -0400
68140 @@ -234,9 +234,9 @@ static int rpc_wait_bit_killable(void *w
68141 #ifdef RPC_DEBUG
68142 static void rpc_task_set_debuginfo(struct rpc_task *task)
68143 {
68144 - static atomic_t rpc_pid;
68145 + static atomic_unchecked_t rpc_pid;
68146
68147 - task->tk_pid = atomic_inc_return(&rpc_pid);
68148 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
68149 }
68150 #else
68151 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
68152 diff -urNp linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma.c linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma.c
68153 --- linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma.c 2011-05-19 00:06:34.000000000 -0400
68154 +++ linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma.c 2011-08-05 19:44:37.000000000 -0400
68155 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCR
68156 static unsigned int min_max_inline = 4096;
68157 static unsigned int max_max_inline = 65536;
68158
68159 -atomic_t rdma_stat_recv;
68160 -atomic_t rdma_stat_read;
68161 -atomic_t rdma_stat_write;
68162 -atomic_t rdma_stat_sq_starve;
68163 -atomic_t rdma_stat_rq_starve;
68164 -atomic_t rdma_stat_rq_poll;
68165 -atomic_t rdma_stat_rq_prod;
68166 -atomic_t rdma_stat_sq_poll;
68167 -atomic_t rdma_stat_sq_prod;
68168 +atomic_unchecked_t rdma_stat_recv;
68169 +atomic_unchecked_t rdma_stat_read;
68170 +atomic_unchecked_t rdma_stat_write;
68171 +atomic_unchecked_t rdma_stat_sq_starve;
68172 +atomic_unchecked_t rdma_stat_rq_starve;
68173 +atomic_unchecked_t rdma_stat_rq_poll;
68174 +atomic_unchecked_t rdma_stat_rq_prod;
68175 +atomic_unchecked_t rdma_stat_sq_poll;
68176 +atomic_unchecked_t rdma_stat_sq_prod;
68177
68178 /* Temporary NFS request map and context caches */
68179 struct kmem_cache *svc_rdma_map_cachep;
68180 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *ta
68181 len -= *ppos;
68182 if (len > *lenp)
68183 len = *lenp;
68184 - if (len && copy_to_user(buffer, str_buf, len))
68185 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
68186 return -EFAULT;
68187 *lenp = len;
68188 *ppos += len;
68189 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] =
68190 {
68191 .procname = "rdma_stat_read",
68192 .data = &rdma_stat_read,
68193 - .maxlen = sizeof(atomic_t),
68194 + .maxlen = sizeof(atomic_unchecked_t),
68195 .mode = 0644,
68196 .proc_handler = read_reset_stat,
68197 },
68198 {
68199 .procname = "rdma_stat_recv",
68200 .data = &rdma_stat_recv,
68201 - .maxlen = sizeof(atomic_t),
68202 + .maxlen = sizeof(atomic_unchecked_t),
68203 .mode = 0644,
68204 .proc_handler = read_reset_stat,
68205 },
68206 {
68207 .procname = "rdma_stat_write",
68208 .data = &rdma_stat_write,
68209 - .maxlen = sizeof(atomic_t),
68210 + .maxlen = sizeof(atomic_unchecked_t),
68211 .mode = 0644,
68212 .proc_handler = read_reset_stat,
68213 },
68214 {
68215 .procname = "rdma_stat_sq_starve",
68216 .data = &rdma_stat_sq_starve,
68217 - .maxlen = sizeof(atomic_t),
68218 + .maxlen = sizeof(atomic_unchecked_t),
68219 .mode = 0644,
68220 .proc_handler = read_reset_stat,
68221 },
68222 {
68223 .procname = "rdma_stat_rq_starve",
68224 .data = &rdma_stat_rq_starve,
68225 - .maxlen = sizeof(atomic_t),
68226 + .maxlen = sizeof(atomic_unchecked_t),
68227 .mode = 0644,
68228 .proc_handler = read_reset_stat,
68229 },
68230 {
68231 .procname = "rdma_stat_rq_poll",
68232 .data = &rdma_stat_rq_poll,
68233 - .maxlen = sizeof(atomic_t),
68234 + .maxlen = sizeof(atomic_unchecked_t),
68235 .mode = 0644,
68236 .proc_handler = read_reset_stat,
68237 },
68238 {
68239 .procname = "rdma_stat_rq_prod",
68240 .data = &rdma_stat_rq_prod,
68241 - .maxlen = sizeof(atomic_t),
68242 + .maxlen = sizeof(atomic_unchecked_t),
68243 .mode = 0644,
68244 .proc_handler = read_reset_stat,
68245 },
68246 {
68247 .procname = "rdma_stat_sq_poll",
68248 .data = &rdma_stat_sq_poll,
68249 - .maxlen = sizeof(atomic_t),
68250 + .maxlen = sizeof(atomic_unchecked_t),
68251 .mode = 0644,
68252 .proc_handler = read_reset_stat,
68253 },
68254 {
68255 .procname = "rdma_stat_sq_prod",
68256 .data = &rdma_stat_sq_prod,
68257 - .maxlen = sizeof(atomic_t),
68258 + .maxlen = sizeof(atomic_unchecked_t),
68259 .mode = 0644,
68260 .proc_handler = read_reset_stat,
68261 },
68262 diff -urNp linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
68263 --- linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-05-19 00:06:34.000000000 -0400
68264 +++ linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-08-05 19:44:37.000000000 -0400
68265 @@ -499,7 +499,7 @@ next_sge:
68266 svc_rdma_put_context(ctxt, 0);
68267 goto out;
68268 }
68269 - atomic_inc(&rdma_stat_read);
68270 + atomic_inc_unchecked(&rdma_stat_read);
68271
68272 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
68273 chl_map->ch[ch_no].count -= read_wr.num_sge;
68274 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
68275 dto_q);
68276 list_del_init(&ctxt->dto_q);
68277 } else {
68278 - atomic_inc(&rdma_stat_rq_starve);
68279 + atomic_inc_unchecked(&rdma_stat_rq_starve);
68280 clear_bit(XPT_DATA, &xprt->xpt_flags);
68281 ctxt = NULL;
68282 }
68283 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
68284 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
68285 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
68286 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
68287 - atomic_inc(&rdma_stat_recv);
68288 + atomic_inc_unchecked(&rdma_stat_recv);
68289
68290 /* Build up the XDR from the receive buffers. */
68291 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
68292 diff -urNp linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c
68293 --- linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-05-19 00:06:34.000000000 -0400
68294 +++ linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-08-05 19:44:37.000000000 -0400
68295 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdm
68296 write_wr.wr.rdma.remote_addr = to;
68297
68298 /* Post It */
68299 - atomic_inc(&rdma_stat_write);
68300 + atomic_inc_unchecked(&rdma_stat_write);
68301 if (svc_rdma_send(xprt, &write_wr))
68302 goto err;
68303 return 0;
68304 diff -urNp linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_transport.c
68305 --- linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-05-19 00:06:34.000000000 -0400
68306 +++ linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-08-05 19:44:37.000000000 -0400
68307 @@ -298,7 +298,7 @@ static void rq_cq_reap(struct svcxprt_rd
68308 return;
68309
68310 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
68311 - atomic_inc(&rdma_stat_rq_poll);
68312 + atomic_inc_unchecked(&rdma_stat_rq_poll);
68313
68314 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
68315 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
68316 @@ -320,7 +320,7 @@ static void rq_cq_reap(struct svcxprt_rd
68317 }
68318
68319 if (ctxt)
68320 - atomic_inc(&rdma_stat_rq_prod);
68321 + atomic_inc_unchecked(&rdma_stat_rq_prod);
68322
68323 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
68324 /*
68325 @@ -392,7 +392,7 @@ static void sq_cq_reap(struct svcxprt_rd
68326 return;
68327
68328 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
68329 - atomic_inc(&rdma_stat_sq_poll);
68330 + atomic_inc_unchecked(&rdma_stat_sq_poll);
68331 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
68332 if (wc.status != IB_WC_SUCCESS)
68333 /* Close the transport */
68334 @@ -410,7 +410,7 @@ static void sq_cq_reap(struct svcxprt_rd
68335 }
68336
68337 if (ctxt)
68338 - atomic_inc(&rdma_stat_sq_prod);
68339 + atomic_inc_unchecked(&rdma_stat_sq_prod);
68340 }
68341
68342 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
68343 @@ -1271,7 +1271,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
68344 spin_lock_bh(&xprt->sc_lock);
68345 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
68346 spin_unlock_bh(&xprt->sc_lock);
68347 - atomic_inc(&rdma_stat_sq_starve);
68348 + atomic_inc_unchecked(&rdma_stat_sq_starve);
68349
68350 /* See if we can opportunistically reap SQ WR to make room */
68351 sq_cq_reap(xprt);
68352 diff -urNp linux-2.6.39.4/net/sysctl_net.c linux-2.6.39.4/net/sysctl_net.c
68353 --- linux-2.6.39.4/net/sysctl_net.c 2011-05-19 00:06:34.000000000 -0400
68354 +++ linux-2.6.39.4/net/sysctl_net.c 2011-08-05 19:44:37.000000000 -0400
68355 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
68356 struct ctl_table *table)
68357 {
68358 /* Allow network administrator to have same access as root. */
68359 - if (capable(CAP_NET_ADMIN)) {
68360 + if (capable_nolog(CAP_NET_ADMIN)) {
68361 int mode = (table->mode >> 6) & 7;
68362 return (mode << 6) | (mode << 3) | mode;
68363 }
68364 diff -urNp linux-2.6.39.4/net/unix/af_unix.c linux-2.6.39.4/net/unix/af_unix.c
68365 --- linux-2.6.39.4/net/unix/af_unix.c 2011-05-19 00:06:34.000000000 -0400
68366 +++ linux-2.6.39.4/net/unix/af_unix.c 2011-08-05 19:44:37.000000000 -0400
68367 @@ -767,6 +767,12 @@ static struct sock *unix_find_other(stru
68368 err = -ECONNREFUSED;
68369 if (!S_ISSOCK(inode->i_mode))
68370 goto put_fail;
68371 +
68372 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
68373 + err = -EACCES;
68374 + goto put_fail;
68375 + }
68376 +
68377 u = unix_find_socket_byinode(inode);
68378 if (!u)
68379 goto put_fail;
68380 @@ -787,6 +793,13 @@ static struct sock *unix_find_other(stru
68381 if (u) {
68382 struct dentry *dentry;
68383 dentry = unix_sk(u)->dentry;
68384 +
68385 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
68386 + err = -EPERM;
68387 + sock_put(u);
68388 + goto fail;
68389 + }
68390 +
68391 if (dentry)
68392 touch_atime(unix_sk(u)->mnt, dentry);
68393 } else
68394 @@ -872,11 +885,18 @@ static int unix_bind(struct socket *sock
68395 err = security_path_mknod(&nd.path, dentry, mode, 0);
68396 if (err)
68397 goto out_mknod_drop_write;
68398 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
68399 + err = -EACCES;
68400 + goto out_mknod_drop_write;
68401 + }
68402 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
68403 out_mknod_drop_write:
68404 mnt_drop_write(nd.path.mnt);
68405 if (err)
68406 goto out_mknod_dput;
68407 +
68408 + gr_handle_create(dentry, nd.path.mnt);
68409 +
68410 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
68411 dput(nd.path.dentry);
68412 nd.path.dentry = dentry;
68413 @@ -2255,7 +2275,11 @@ static int unix_seq_show(struct seq_file
68414 unix_state_lock(s);
68415
68416 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
68417 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68418 + NULL,
68419 +#else
68420 s,
68421 +#endif
68422 atomic_read(&s->sk_refcnt),
68423 0,
68424 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
68425 diff -urNp linux-2.6.39.4/net/wireless/core.h linux-2.6.39.4/net/wireless/core.h
68426 --- linux-2.6.39.4/net/wireless/core.h 2011-05-19 00:06:34.000000000 -0400
68427 +++ linux-2.6.39.4/net/wireless/core.h 2011-08-05 20:34:06.000000000 -0400
68428 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
68429 struct mutex mtx;
68430
68431 /* rfkill support */
68432 - struct rfkill_ops rfkill_ops;
68433 + rfkill_ops_no_const rfkill_ops;
68434 struct rfkill *rfkill;
68435 struct work_struct rfkill_sync;
68436
68437 diff -urNp linux-2.6.39.4/net/wireless/wext-core.c linux-2.6.39.4/net/wireless/wext-core.c
68438 --- linux-2.6.39.4/net/wireless/wext-core.c 2011-05-19 00:06:34.000000000 -0400
68439 +++ linux-2.6.39.4/net/wireless/wext-core.c 2011-08-05 19:44:37.000000000 -0400
68440 @@ -746,8 +746,7 @@ static int ioctl_standard_iw_point(struc
68441 */
68442
68443 /* Support for very large requests */
68444 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
68445 - (user_length > descr->max_tokens)) {
68446 + if (user_length > descr->max_tokens) {
68447 /* Allow userspace to GET more than max so
68448 * we can support any size GET requests.
68449 * There is still a limit : -ENOMEM.
68450 @@ -784,22 +783,6 @@ static int ioctl_standard_iw_point(struc
68451 }
68452 }
68453
68454 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
68455 - /*
68456 - * If this is a GET, but not NOMAX, it means that the extra
68457 - * data is not bounded by userspace, but by max_tokens. Thus
68458 - * set the length to max_tokens. This matches the extra data
68459 - * allocation.
68460 - * The driver should fill it with the number of tokens it
68461 - * provided, and it may check iwp->length rather than having
68462 - * knowledge of max_tokens. If the driver doesn't change the
68463 - * iwp->length, this ioctl just copies back max_token tokens
68464 - * filled with zeroes. Hopefully the driver isn't claiming
68465 - * them to be valid data.
68466 - */
68467 - iwp->length = descr->max_tokens;
68468 - }
68469 -
68470 err = handler(dev, info, (union iwreq_data *) iwp, extra);
68471
68472 iwp->length += essid_compat;
68473 diff -urNp linux-2.6.39.4/net/xfrm/xfrm_policy.c linux-2.6.39.4/net/xfrm/xfrm_policy.c
68474 --- linux-2.6.39.4/net/xfrm/xfrm_policy.c 2011-05-19 00:06:34.000000000 -0400
68475 +++ linux-2.6.39.4/net/xfrm/xfrm_policy.c 2011-08-05 19:44:37.000000000 -0400
68476 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm
68477 {
68478 policy->walk.dead = 1;
68479
68480 - atomic_inc(&policy->genid);
68481 + atomic_inc_unchecked(&policy->genid);
68482
68483 if (del_timer(&policy->timer))
68484 xfrm_pol_put(policy);
68485 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct x
68486 hlist_add_head(&policy->bydst, chain);
68487 xfrm_pol_hold(policy);
68488 net->xfrm.policy_count[dir]++;
68489 - atomic_inc(&flow_cache_genid);
68490 + atomic_inc_unchecked(&flow_cache_genid);
68491 if (delpol)
68492 __xfrm_policy_unlink(delpol, dir);
68493 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
68494 @@ -1527,7 +1527,7 @@ free_dst:
68495 goto out;
68496 }
68497
68498 -static int inline
68499 +static inline int
68500 xfrm_dst_alloc_copy(void **target, const void *src, int size)
68501 {
68502 if (!*target) {
68503 @@ -1539,7 +1539,7 @@ xfrm_dst_alloc_copy(void **target, const
68504 return 0;
68505 }
68506
68507 -static int inline
68508 +static inline int
68509 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
68510 {
68511 #ifdef CONFIG_XFRM_SUB_POLICY
68512 @@ -1551,7 +1551,7 @@ xfrm_dst_update_parent(struct dst_entry
68513 #endif
68514 }
68515
68516 -static int inline
68517 +static inline int
68518 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
68519 {
68520 #ifdef CONFIG_XFRM_SUB_POLICY
68521 @@ -1645,7 +1645,7 @@ xfrm_resolve_and_create_bundle(struct xf
68522
68523 xdst->num_pols = num_pols;
68524 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
68525 - xdst->policy_genid = atomic_read(&pols[0]->genid);
68526 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
68527
68528 return xdst;
68529 }
68530 @@ -2332,7 +2332,7 @@ static int xfrm_bundle_ok(struct xfrm_ds
68531 if (xdst->xfrm_genid != dst->xfrm->genid)
68532 return 0;
68533 if (xdst->num_pols > 0 &&
68534 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
68535 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
68536 return 0;
68537
68538 mtu = dst_mtu(dst->child);
68539 @@ -2860,7 +2860,7 @@ static int xfrm_policy_migrate(struct xf
68540 sizeof(pol->xfrm_vec[i].saddr));
68541 pol->xfrm_vec[i].encap_family = mp->new_family;
68542 /* flush bundles */
68543 - atomic_inc(&pol->genid);
68544 + atomic_inc_unchecked(&pol->genid);
68545 }
68546 }
68547
68548 diff -urNp linux-2.6.39.4/net/xfrm/xfrm_user.c linux-2.6.39.4/net/xfrm/xfrm_user.c
68549 --- linux-2.6.39.4/net/xfrm/xfrm_user.c 2011-05-19 00:06:34.000000000 -0400
68550 +++ linux-2.6.39.4/net/xfrm/xfrm_user.c 2011-08-05 19:44:37.000000000 -0400
68551 @@ -1394,6 +1394,8 @@ static int copy_to_user_tmpl(struct xfrm
68552 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
68553 int i;
68554
68555 + pax_track_stack();
68556 +
68557 if (xp->xfrm_nr == 0)
68558 return 0;
68559
68560 @@ -2062,6 +2064,8 @@ static int xfrm_do_migrate(struct sk_buf
68561 int err;
68562 int n = 0;
68563
68564 + pax_track_stack();
68565 +
68566 if (attrs[XFRMA_MIGRATE] == NULL)
68567 return -EINVAL;
68568
68569 diff -urNp linux-2.6.39.4/scripts/basic/fixdep.c linux-2.6.39.4/scripts/basic/fixdep.c
68570 --- linux-2.6.39.4/scripts/basic/fixdep.c 2011-05-19 00:06:34.000000000 -0400
68571 +++ linux-2.6.39.4/scripts/basic/fixdep.c 2011-08-05 19:44:37.000000000 -0400
68572 @@ -235,9 +235,9 @@ static void use_config(const char *m, in
68573
68574 static void parse_config_file(const char *map, size_t len)
68575 {
68576 - const int *end = (const int *) (map + len);
68577 + const unsigned int *end = (const unsigned int *) (map + len);
68578 /* start at +1, so that p can never be < map */
68579 - const int *m = (const int *) map + 1;
68580 + const unsigned int *m = (const unsigned int *) map + 1;
68581 const char *p, *q;
68582
68583 for (; m < end; m++) {
68584 @@ -405,7 +405,7 @@ static void print_deps(void)
68585 static void traps(void)
68586 {
68587 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
68588 - int *p = (int *)test;
68589 + unsigned int *p = (unsigned int *)test;
68590
68591 if (*p != INT_CONF) {
68592 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
68593 diff -urNp linux-2.6.39.4/scripts/gcc-plugin.sh linux-2.6.39.4/scripts/gcc-plugin.sh
68594 --- linux-2.6.39.4/scripts/gcc-plugin.sh 1969-12-31 19:00:00.000000000 -0500
68595 +++ linux-2.6.39.4/scripts/gcc-plugin.sh 2011-08-05 20:34:06.000000000 -0400
68596 @@ -0,0 +1,3 @@
68597 +#!/bin/sh
68598 +
68599 +echo "#include \"gcc-plugin.h\"" | $* -x c - -c -o /dev/null -I`$* -print-file-name=plugin`/include>/dev/null 2>&1 && echo "y"
68600 diff -urNp linux-2.6.39.4/scripts/Makefile.build linux-2.6.39.4/scripts/Makefile.build
68601 --- linux-2.6.39.4/scripts/Makefile.build 2011-05-19 00:06:34.000000000 -0400
68602 +++ linux-2.6.39.4/scripts/Makefile.build 2011-08-05 19:44:37.000000000 -0400
68603 @@ -93,7 +93,7 @@ endif
68604 endif
68605
68606 # Do not include host rules unless needed
68607 -ifneq ($(hostprogs-y)$(hostprogs-m),)
68608 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
68609 include scripts/Makefile.host
68610 endif
68611
68612 diff -urNp linux-2.6.39.4/scripts/Makefile.clean linux-2.6.39.4/scripts/Makefile.clean
68613 --- linux-2.6.39.4/scripts/Makefile.clean 2011-05-19 00:06:34.000000000 -0400
68614 +++ linux-2.6.39.4/scripts/Makefile.clean 2011-08-05 19:44:37.000000000 -0400
68615 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
68616 __clean-files := $(extra-y) $(always) \
68617 $(targets) $(clean-files) \
68618 $(host-progs) \
68619 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
68620 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
68621 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
68622
68623 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
68624
68625 diff -urNp linux-2.6.39.4/scripts/Makefile.host linux-2.6.39.4/scripts/Makefile.host
68626 --- linux-2.6.39.4/scripts/Makefile.host 2011-05-19 00:06:34.000000000 -0400
68627 +++ linux-2.6.39.4/scripts/Makefile.host 2011-08-05 19:44:37.000000000 -0400
68628 @@ -31,6 +31,7 @@
68629 # Note: Shared libraries consisting of C++ files are not supported
68630
68631 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
68632 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
68633
68634 # C code
68635 # Executables compiled from a single .c file
68636 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
68637 # Shared libaries (only .c supported)
68638 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
68639 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
68640 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
68641 # Remove .so files from "xxx-objs"
68642 host-cobjs := $(filter-out %.so,$(host-cobjs))
68643
68644 diff -urNp linux-2.6.39.4/scripts/mod/file2alias.c linux-2.6.39.4/scripts/mod/file2alias.c
68645 --- linux-2.6.39.4/scripts/mod/file2alias.c 2011-05-19 00:06:34.000000000 -0400
68646 +++ linux-2.6.39.4/scripts/mod/file2alias.c 2011-08-05 19:44:37.000000000 -0400
68647 @@ -72,7 +72,7 @@ static void device_id_check(const char *
68648 unsigned long size, unsigned long id_size,
68649 void *symval)
68650 {
68651 - int i;
68652 + unsigned int i;
68653
68654 if (size % id_size || size < id_size) {
68655 if (cross_build != 0)
68656 @@ -102,7 +102,7 @@ static void device_id_check(const char *
68657 /* USB is special because the bcdDevice can be matched against a numeric range */
68658 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
68659 static void do_usb_entry(struct usb_device_id *id,
68660 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
68661 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
68662 unsigned char range_lo, unsigned char range_hi,
68663 unsigned char max, struct module *mod)
68664 {
68665 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *sy
68666 for (i = 0; i < count; i++) {
68667 const char *id = (char *)devs[i].id;
68668 char acpi_id[sizeof(devs[0].id)];
68669 - int j;
68670 + unsigned int j;
68671
68672 buf_printf(&mod->dev_table_buf,
68673 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
68674 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *sy
68675
68676 for (j = 0; j < PNP_MAX_DEVICES; j++) {
68677 const char *id = (char *)card->devs[j].id;
68678 - int i2, j2;
68679 + unsigned int i2, j2;
68680 int dup = 0;
68681
68682 if (!id[0])
68683 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *sy
68684 /* add an individual alias for every device entry */
68685 if (!dup) {
68686 char acpi_id[sizeof(card->devs[0].id)];
68687 - int k;
68688 + unsigned int k;
68689
68690 buf_printf(&mod->dev_table_buf,
68691 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
68692 @@ -768,7 +768,7 @@ static void dmi_ascii_filter(char *d, co
68693 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
68694 char *alias)
68695 {
68696 - int i, j;
68697 + unsigned int i, j;
68698
68699 sprintf(alias, "dmi*");
68700
68701 diff -urNp linux-2.6.39.4/scripts/mod/modpost.c linux-2.6.39.4/scripts/mod/modpost.c
68702 --- linux-2.6.39.4/scripts/mod/modpost.c 2011-05-19 00:06:34.000000000 -0400
68703 +++ linux-2.6.39.4/scripts/mod/modpost.c 2011-08-05 19:44:37.000000000 -0400
68704 @@ -896,6 +896,7 @@ enum mismatch {
68705 ANY_INIT_TO_ANY_EXIT,
68706 ANY_EXIT_TO_ANY_INIT,
68707 EXPORT_TO_INIT_EXIT,
68708 + DATA_TO_TEXT
68709 };
68710
68711 struct sectioncheck {
68712 @@ -1004,6 +1005,12 @@ const struct sectioncheck sectioncheck[]
68713 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
68714 .mismatch = EXPORT_TO_INIT_EXIT,
68715 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
68716 +},
68717 +/* Do not reference code from writable data */
68718 +{
68719 + .fromsec = { DATA_SECTIONS, NULL },
68720 + .tosec = { TEXT_SECTIONS, NULL },
68721 + .mismatch = DATA_TO_TEXT
68722 }
68723 };
68724
68725 @@ -1126,10 +1133,10 @@ static Elf_Sym *find_elf_symbol(struct e
68726 continue;
68727 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
68728 continue;
68729 - if (sym->st_value == addr)
68730 - return sym;
68731 /* Find a symbol nearby - addr are maybe negative */
68732 d = sym->st_value - addr;
68733 + if (d == 0)
68734 + return sym;
68735 if (d < 0)
68736 d = addr - sym->st_value;
68737 if (d < distance) {
68738 @@ -1408,6 +1415,14 @@ static void report_sec_mismatch(const ch
68739 tosym, prl_to, prl_to, tosym);
68740 free(prl_to);
68741 break;
68742 + case DATA_TO_TEXT:
68743 +/*
68744 + fprintf(stderr,
68745 + "The variable %s references\n"
68746 + "the %s %s%s%s\n",
68747 + fromsym, to, sec2annotation(tosec), tosym, to_p);
68748 +*/
68749 + break;
68750 }
68751 fprintf(stderr, "\n");
68752 }
68753 @@ -1633,7 +1648,7 @@ static void section_rel(const char *modn
68754 static void check_sec_ref(struct module *mod, const char *modname,
68755 struct elf_info *elf)
68756 {
68757 - int i;
68758 + unsigned int i;
68759 Elf_Shdr *sechdrs = elf->sechdrs;
68760
68761 /* Walk through all sections */
68762 @@ -1731,7 +1746,7 @@ void __attribute__((format(printf, 2, 3)
68763 va_end(ap);
68764 }
68765
68766 -void buf_write(struct buffer *buf, const char *s, int len)
68767 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
68768 {
68769 if (buf->size - buf->pos < len) {
68770 buf->size += len + SZ;
68771 @@ -1943,7 +1958,7 @@ static void write_if_changed(struct buff
68772 if (fstat(fileno(file), &st) < 0)
68773 goto close_write;
68774
68775 - if (st.st_size != b->pos)
68776 + if (st.st_size != (off_t)b->pos)
68777 goto close_write;
68778
68779 tmp = NOFAIL(malloc(b->pos));
68780 diff -urNp linux-2.6.39.4/scripts/mod/modpost.h linux-2.6.39.4/scripts/mod/modpost.h
68781 --- linux-2.6.39.4/scripts/mod/modpost.h 2011-05-19 00:06:34.000000000 -0400
68782 +++ linux-2.6.39.4/scripts/mod/modpost.h 2011-08-05 19:44:37.000000000 -0400
68783 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
68784
68785 struct buffer {
68786 char *p;
68787 - int pos;
68788 - int size;
68789 + unsigned int pos;
68790 + unsigned int size;
68791 };
68792
68793 void __attribute__((format(printf, 2, 3)))
68794 buf_printf(struct buffer *buf, const char *fmt, ...);
68795
68796 void
68797 -buf_write(struct buffer *buf, const char *s, int len);
68798 +buf_write(struct buffer *buf, const char *s, unsigned int len);
68799
68800 struct module {
68801 struct module *next;
68802 diff -urNp linux-2.6.39.4/scripts/mod/sumversion.c linux-2.6.39.4/scripts/mod/sumversion.c
68803 --- linux-2.6.39.4/scripts/mod/sumversion.c 2011-05-19 00:06:34.000000000 -0400
68804 +++ linux-2.6.39.4/scripts/mod/sumversion.c 2011-08-05 19:44:37.000000000 -0400
68805 @@ -470,7 +470,7 @@ static void write_version(const char *fi
68806 goto out;
68807 }
68808
68809 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
68810 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
68811 warn("writing sum in %s failed: %s\n",
68812 filename, strerror(errno));
68813 goto out;
68814 diff -urNp linux-2.6.39.4/scripts/pnmtologo.c linux-2.6.39.4/scripts/pnmtologo.c
68815 --- linux-2.6.39.4/scripts/pnmtologo.c 2011-05-19 00:06:34.000000000 -0400
68816 +++ linux-2.6.39.4/scripts/pnmtologo.c 2011-08-05 19:44:37.000000000 -0400
68817 @@ -237,14 +237,14 @@ static void write_header(void)
68818 fprintf(out, " * Linux logo %s\n", logoname);
68819 fputs(" */\n\n", out);
68820 fputs("#include <linux/linux_logo.h>\n\n", out);
68821 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
68822 + fprintf(out, "static unsigned char %s_data[] = {\n",
68823 logoname);
68824 }
68825
68826 static void write_footer(void)
68827 {
68828 fputs("\n};\n\n", out);
68829 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
68830 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
68831 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
68832 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
68833 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
68834 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
68835 fputs("\n};\n\n", out);
68836
68837 /* write logo clut */
68838 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
68839 + fprintf(out, "static unsigned char %s_clut[] = {\n",
68840 logoname);
68841 write_hex_cnt = 0;
68842 for (i = 0; i < logo_clutsize; i++) {
68843 diff -urNp linux-2.6.39.4/security/apparmor/lsm.c linux-2.6.39.4/security/apparmor/lsm.c
68844 --- linux-2.6.39.4/security/apparmor/lsm.c 2011-06-25 12:55:23.000000000 -0400
68845 +++ linux-2.6.39.4/security/apparmor/lsm.c 2011-08-05 20:34:06.000000000 -0400
68846 @@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struc
68847 return error;
68848 }
68849
68850 -static struct security_operations apparmor_ops = {
68851 +static struct security_operations apparmor_ops __read_only = {
68852 .name = "apparmor",
68853
68854 .ptrace_access_check = apparmor_ptrace_access_check,
68855 diff -urNp linux-2.6.39.4/security/commoncap.c linux-2.6.39.4/security/commoncap.c
68856 --- linux-2.6.39.4/security/commoncap.c 2011-05-19 00:06:34.000000000 -0400
68857 +++ linux-2.6.39.4/security/commoncap.c 2011-08-05 19:44:37.000000000 -0400
68858 @@ -28,6 +28,7 @@
68859 #include <linux/prctl.h>
68860 #include <linux/securebits.h>
68861 #include <linux/user_namespace.h>
68862 +#include <net/sock.h>
68863
68864 /*
68865 * If a non-root user executes a setuid-root binary in
68866 @@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, st
68867
68868 int cap_netlink_recv(struct sk_buff *skb, int cap)
68869 {
68870 - if (!cap_raised(current_cap(), cap))
68871 + if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
68872 return -EPERM;
68873 return 0;
68874 }
68875 @@ -580,6 +581,9 @@ int cap_bprm_secureexec(struct linux_bin
68876 {
68877 const struct cred *cred = current_cred();
68878
68879 + if (gr_acl_enable_at_secure())
68880 + return 1;
68881 +
68882 if (cred->uid != 0) {
68883 if (bprm->cap_effective)
68884 return 1;
68885 diff -urNp linux-2.6.39.4/security/integrity/ima/ima_api.c linux-2.6.39.4/security/integrity/ima/ima_api.c
68886 --- linux-2.6.39.4/security/integrity/ima/ima_api.c 2011-05-19 00:06:34.000000000 -0400
68887 +++ linux-2.6.39.4/security/integrity/ima/ima_api.c 2011-08-05 19:44:37.000000000 -0400
68888 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *ino
68889 int result;
68890
68891 /* can overflow, only indicator */
68892 - atomic_long_inc(&ima_htable.violations);
68893 + atomic_long_inc_unchecked(&ima_htable.violations);
68894
68895 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
68896 if (!entry) {
68897 diff -urNp linux-2.6.39.4/security/integrity/ima/ima_fs.c linux-2.6.39.4/security/integrity/ima/ima_fs.c
68898 --- linux-2.6.39.4/security/integrity/ima/ima_fs.c 2011-05-19 00:06:34.000000000 -0400
68899 +++ linux-2.6.39.4/security/integrity/ima/ima_fs.c 2011-08-05 19:44:37.000000000 -0400
68900 @@ -28,12 +28,12 @@
68901 static int valid_policy = 1;
68902 #define TMPBUFLEN 12
68903 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
68904 - loff_t *ppos, atomic_long_t *val)
68905 + loff_t *ppos, atomic_long_unchecked_t *val)
68906 {
68907 char tmpbuf[TMPBUFLEN];
68908 ssize_t len;
68909
68910 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
68911 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
68912 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
68913 }
68914
68915 diff -urNp linux-2.6.39.4/security/integrity/ima/ima.h linux-2.6.39.4/security/integrity/ima/ima.h
68916 --- linux-2.6.39.4/security/integrity/ima/ima.h 2011-05-19 00:06:34.000000000 -0400
68917 +++ linux-2.6.39.4/security/integrity/ima/ima.h 2011-08-05 19:44:37.000000000 -0400
68918 @@ -85,8 +85,8 @@ void ima_add_violation(struct inode *ino
68919 extern spinlock_t ima_queue_lock;
68920
68921 struct ima_h_table {
68922 - atomic_long_t len; /* number of stored measurements in the list */
68923 - atomic_long_t violations;
68924 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
68925 + atomic_long_unchecked_t violations;
68926 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
68927 };
68928 extern struct ima_h_table ima_htable;
68929 diff -urNp linux-2.6.39.4/security/integrity/ima/ima_queue.c linux-2.6.39.4/security/integrity/ima/ima_queue.c
68930 --- linux-2.6.39.4/security/integrity/ima/ima_queue.c 2011-05-19 00:06:34.000000000 -0400
68931 +++ linux-2.6.39.4/security/integrity/ima/ima_queue.c 2011-08-05 19:44:37.000000000 -0400
68932 @@ -79,7 +79,7 @@ static int ima_add_digest_entry(struct i
68933 INIT_LIST_HEAD(&qe->later);
68934 list_add_tail_rcu(&qe->later, &ima_measurements);
68935
68936 - atomic_long_inc(&ima_htable.len);
68937 + atomic_long_inc_unchecked(&ima_htable.len);
68938 key = ima_hash_key(entry->digest);
68939 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
68940 return 0;
68941 diff -urNp linux-2.6.39.4/security/Kconfig linux-2.6.39.4/security/Kconfig
68942 --- linux-2.6.39.4/security/Kconfig 2011-05-19 00:06:34.000000000 -0400
68943 +++ linux-2.6.39.4/security/Kconfig 2011-08-05 19:44:37.000000000 -0400
68944 @@ -4,6 +4,554 @@
68945
68946 menu "Security options"
68947
68948 +source grsecurity/Kconfig
68949 +
68950 +menu "PaX"
68951 +
68952 + config ARCH_TRACK_EXEC_LIMIT
68953 + bool
68954 +
68955 + config PAX_PER_CPU_PGD
68956 + bool
68957 +
68958 + config TASK_SIZE_MAX_SHIFT
68959 + int
68960 + depends on X86_64
68961 + default 47 if !PAX_PER_CPU_PGD
68962 + default 42 if PAX_PER_CPU_PGD
68963 +
68964 + config PAX_ENABLE_PAE
68965 + bool
68966 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
68967 +
68968 +config PAX
68969 + bool "Enable various PaX features"
68970 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
68971 + help
68972 + This allows you to enable various PaX features. PaX adds
68973 + intrusion prevention mechanisms to the kernel that reduce
68974 + the risks posed by exploitable memory corruption bugs.
68975 +
68976 +menu "PaX Control"
68977 + depends on PAX
68978 +
68979 +config PAX_SOFTMODE
68980 + bool 'Support soft mode'
68981 + select PAX_PT_PAX_FLAGS
68982 + help
68983 + Enabling this option will allow you to run PaX in soft mode, that
68984 + is, PaX features will not be enforced by default, only on executables
68985 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
68986 + is the only way to mark executables for soft mode use.
68987 +
68988 + Soft mode can be activated by using the "pax_softmode=1" kernel command
68989 + line option on boot. Furthermore you can control various PaX features
68990 + at runtime via the entries in /proc/sys/kernel/pax.
68991 +
68992 +config PAX_EI_PAX
68993 + bool 'Use legacy ELF header marking'
68994 + help
68995 + Enabling this option will allow you to control PaX features on
68996 + a per executable basis via the 'chpax' utility available at
68997 + http://pax.grsecurity.net/. The control flags will be read from
68998 + an otherwise reserved part of the ELF header. This marking has
68999 + numerous drawbacks (no support for soft-mode, toolchain does not
69000 + know about the non-standard use of the ELF header) therefore it
69001 + has been deprecated in favour of PT_PAX_FLAGS support.
69002 +
69003 + Note that if you enable PT_PAX_FLAGS marking support as well,
69004 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
69005 +
69006 +config PAX_PT_PAX_FLAGS
69007 + bool 'Use ELF program header marking'
69008 + help
69009 + Enabling this option will allow you to control PaX features on
69010 + a per executable basis via the 'paxctl' utility available at
69011 + http://pax.grsecurity.net/. The control flags will be read from
69012 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
69013 + has the benefits of supporting both soft mode and being fully
69014 + integrated into the toolchain (the binutils patch is available
69015 + from http://pax.grsecurity.net).
69016 +
69017 + If your toolchain does not support PT_PAX_FLAGS markings,
69018 + you can create one in most cases with 'paxctl -C'.
69019 +
69020 + Note that if you enable the legacy EI_PAX marking support as well,
69021 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
69022 +
69023 +choice
69024 + prompt 'MAC system integration'
69025 + default PAX_HAVE_ACL_FLAGS
69026 + help
69027 + Mandatory Access Control systems have the option of controlling
69028 + PaX flags on a per executable basis, choose the method supported
69029 + by your particular system.
69030 +
69031 + - "none": if your MAC system does not interact with PaX,
69032 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
69033 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
69034 +
69035 + NOTE: this option is for developers/integrators only.
69036 +
69037 + config PAX_NO_ACL_FLAGS
69038 + bool 'none'
69039 +
69040 + config PAX_HAVE_ACL_FLAGS
69041 + bool 'direct'
69042 +
69043 + config PAX_HOOK_ACL_FLAGS
69044 + bool 'hook'
69045 +endchoice
69046 +
69047 +endmenu
69048 +
69049 +menu "Non-executable pages"
69050 + depends on PAX
69051 +
69052 +config PAX_NOEXEC
69053 + bool "Enforce non-executable pages"
69054 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
69055 + help
69056 + By design some architectures do not allow for protecting memory
69057 + pages against execution or even if they do, Linux does not make
69058 + use of this feature. In practice this means that if a page is
69059 + readable (such as the stack or heap) it is also executable.
69060 +
69061 + There is a well known exploit technique that makes use of this
69062 + fact and a common programming mistake where an attacker can
69063 + introduce code of his choice somewhere in the attacked program's
69064 + memory (typically the stack or the heap) and then execute it.
69065 +
69066 + If the attacked program was running with different (typically
69067 + higher) privileges than that of the attacker, then he can elevate
69068 + his own privilege level (e.g. get a root shell, write to files for
69069 + which he does not have write access to, etc).
69070 +
69071 + Enabling this option will let you choose from various features
69072 + that prevent the injection and execution of 'foreign' code in
69073 + a program.
69074 +
69075 + This will also break programs that rely on the old behaviour and
69076 + expect that dynamically allocated memory via the malloc() family
69077 + of functions is executable (which it is not). Notable examples
69078 + are the XFree86 4.x server, the java runtime and wine.
69079 +
69080 +config PAX_PAGEEXEC
69081 + bool "Paging based non-executable pages"
69082 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
69083 + select S390_SWITCH_AMODE if S390
69084 + select S390_EXEC_PROTECT if S390
69085 + select ARCH_TRACK_EXEC_LIMIT if X86_32
69086 + help
69087 + This implementation is based on the paging feature of the CPU.
69088 + On i386 without hardware non-executable bit support there is a
69089 + variable but usually low performance impact, however on Intel's
69090 + P4 core based CPUs it is very high so you should not enable this
69091 + for kernels meant to be used on such CPUs.
69092 +
69093 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
69094 + with hardware non-executable bit support there is no performance
69095 + impact, on ppc the impact is negligible.
69096 +
69097 + Note that several architectures require various emulations due to
69098 + badly designed userland ABIs, this will cause a performance impact
69099 + but will disappear as soon as userland is fixed. For example, ppc
69100 + userland MUST have been built with secure-plt by a recent toolchain.
69101 +
69102 +config PAX_SEGMEXEC
69103 + bool "Segmentation based non-executable pages"
69104 + depends on PAX_NOEXEC && X86_32
69105 + help
69106 + This implementation is based on the segmentation feature of the
69107 + CPU and has a very small performance impact, however applications
69108 + will be limited to a 1.5 GB address space instead of the normal
69109 + 3 GB.
69110 +
69111 +config PAX_EMUTRAMP
69112 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
69113 + default y if PARISC
69114 + help
69115 + There are some programs and libraries that for one reason or
69116 + another attempt to execute special small code snippets from
69117 + non-executable memory pages. Most notable examples are the
69118 + signal handler return code generated by the kernel itself and
69119 + the GCC trampolines.
69120 +
69121 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
69122 + such programs will no longer work under your kernel.
69123 +
69124 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
69125 + utilities to enable trampoline emulation for the affected programs
69126 + yet still have the protection provided by the non-executable pages.
69127 +
69128 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
69129 + your system will not even boot.
69130 +
69131 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
69132 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
69133 + for the affected files.
69134 +
69135 + NOTE: enabling this feature *may* open up a loophole in the
69136 + protection provided by non-executable pages that an attacker
69137 + could abuse. Therefore the best solution is to not have any
69138 + files on your system that would require this option. This can
69139 + be achieved by not using libc5 (which relies on the kernel
69140 + signal handler return code) and not using or rewriting programs
69141 + that make use of the nested function implementation of GCC.
69142 + Skilled users can just fix GCC itself so that it implements
69143 + nested function calls in a way that does not interfere with PaX.
69144 +
69145 +config PAX_EMUSIGRT
69146 + bool "Automatically emulate sigreturn trampolines"
69147 + depends on PAX_EMUTRAMP && PARISC
69148 + default y
69149 + help
69150 + Enabling this option will have the kernel automatically detect
69151 + and emulate signal return trampolines executing on the stack
69152 + that would otherwise lead to task termination.
69153 +
69154 + This solution is intended as a temporary one for users with
69155 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
69156 + Modula-3 runtime, etc) or executables linked to such, basically
69157 + everything that does not specify its own SA_RESTORER function in
69158 + normal executable memory like glibc 2.1+ does.
69159 +
69160 + On parisc you MUST enable this option, otherwise your system will
69161 + not even boot.
69162 +
69163 + NOTE: this feature cannot be disabled on a per executable basis
69164 + and since it *does* open up a loophole in the protection provided
69165 + by non-executable pages, the best solution is to not have any
69166 + files on your system that would require this option.
69167 +
69168 +config PAX_MPROTECT
69169 + bool "Restrict mprotect()"
69170 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
69171 + help
69172 + Enabling this option will prevent programs from
69173 + - changing the executable status of memory pages that were
69174 + not originally created as executable,
69175 + - making read-only executable pages writable again,
69176 + - creating executable pages from anonymous memory,
69177 + - making read-only-after-relocations (RELRO) data pages writable again.
69178 +
69179 + You should say Y here to complete the protection provided by
69180 + the enforcement of non-executable pages.
69181 +
69182 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
69183 + this feature on a per file basis.
69184 +
69185 +config PAX_MPROTECT_COMPAT
69186 + bool "Use legacy/compat protection demoting (read help)"
69187 + depends on PAX_MPROTECT
69188 + default n
69189 + help
69190 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
69191 + by sending the proper error code to the application. For some broken
69192 + userland, this can cause problems with Python or other applications. The
69193 + current implementation however allows for applications like clamav to
69194 + detect if JIT compilation/execution is allowed and to fall back gracefully
69195 + to an interpreter-based mode if it does not. While we encourage everyone
69196 + to use the current implementation as-is and push upstream to fix broken
69197 + userland (note that the RWX logging option can assist with this), in some
69198 + environments this may not be possible. Having to disable MPROTECT
69199 + completely on certain binaries reduces the security benefit of PaX,
69200 + so this option is provided for those environments to revert to the old
69201 + behavior.
69202 +
69203 +config PAX_ELFRELOCS
69204 + bool "Allow ELF text relocations (read help)"
69205 + depends on PAX_MPROTECT
69206 + default n
69207 + help
69208 + Non-executable pages and mprotect() restrictions are effective
69209 + in preventing the introduction of new executable code into an
69210 + attacked task's address space. There remain only two venues
69211 + for this kind of attack: if the attacker can execute already
69212 + existing code in the attacked task then he can either have it
69213 + create and mmap() a file containing his code or have it mmap()
69214 + an already existing ELF library that does not have position
69215 + independent code in it and use mprotect() on it to make it
69216 + writable and copy his code there. While protecting against
69217 + the former approach is beyond PaX, the latter can be prevented
69218 + by having only PIC ELF libraries on one's system (which do not
69219 + need to relocate their code). If you are sure this is your case,
69220 + as is the case with all modern Linux distributions, then leave
69221 + this option disabled. You should say 'n' here.
69222 +
69223 +config PAX_ETEXECRELOCS
69224 + bool "Allow ELF ET_EXEC text relocations"
69225 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
69226 + select PAX_ELFRELOCS
69227 + default y
69228 + help
69229 + On some architectures there are incorrectly created applications
69230 + that require text relocations and would not work without enabling
69231 + this option. If you are an alpha, ia64 or parisc user, you should
69232 + enable this option and disable it once you have made sure that
69233 + none of your applications need it.
69234 +
69235 +config PAX_EMUPLT
69236 + bool "Automatically emulate ELF PLT"
69237 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
69238 + default y
69239 + help
69240 + Enabling this option will have the kernel automatically detect
69241 + and emulate the Procedure Linkage Table entries in ELF files.
69242 + On some architectures such entries are in writable memory, and
69243 + become non-executable leading to task termination. Therefore
69244 + it is mandatory that you enable this option on alpha, parisc,
69245 + sparc and sparc64, otherwise your system would not even boot.
69246 +
69247 + NOTE: this feature *does* open up a loophole in the protection
69248 + provided by the non-executable pages, therefore the proper
69249 + solution is to modify the toolchain to produce a PLT that does
69250 + not need to be writable.
69251 +
69252 +config PAX_DLRESOLVE
69253 + bool 'Emulate old glibc resolver stub'
69254 + depends on PAX_EMUPLT && SPARC
69255 + default n
69256 + help
69257 + This option is needed if userland has an old glibc (before 2.4)
69258 + that puts a 'save' instruction into the runtime generated resolver
69259 + stub that needs special emulation.
69260 +
69261 +config PAX_KERNEXEC
69262 + bool "Enforce non-executable kernel pages"
69263 + depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
69264 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
69265 + help
69266 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
69267 + that is, enabling this option will make it harder to inject
69268 + and execute 'foreign' code in kernel memory itself.
69269 +
69270 + Note that on x86_64 kernels there is a known regression when
69271 + this feature and KVM/VMX are both enabled in the host kernel.
69272 +
69273 +config PAX_KERNEXEC_MODULE_TEXT
69274 + int "Minimum amount of memory reserved for module code"
69275 + default "4"
69276 + depends on PAX_KERNEXEC && X86_32 && MODULES
69277 + help
69278 + Due to implementation details the kernel must reserve a fixed
69279 + amount of memory for module code at compile time that cannot be
69280 + changed at runtime. Here you can specify the minimum amount
69281 + in MB that will be reserved. Due to the same implementation
69282 + details this size will always be rounded up to the next 2/4 MB
69283 + boundary (depends on PAE) so the actually available memory for
69284 + module code will usually be more than this minimum.
69285 +
69286 + The default 4 MB should be enough for most users but if you have
69287 + an excessive number of modules (e.g., most distribution configs
69288 + compile many drivers as modules) or use huge modules such as
69289 + nvidia's kernel driver, you will need to adjust this amount.
69290 + A good rule of thumb is to look at your currently loaded kernel
69291 + modules and add up their sizes.
69292 +
69293 +endmenu
69294 +
69295 +menu "Address Space Layout Randomization"
69296 + depends on PAX
69297 +
69298 +config PAX_ASLR
69299 + bool "Address Space Layout Randomization"
69300 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
69301 + help
69302 + Many if not most exploit techniques rely on the knowledge of
69303 + certain addresses in the attacked program. The following options
69304 + will allow the kernel to apply a certain amount of randomization
69305 + to specific parts of the program thereby forcing an attacker to
69306 + guess them in most cases. Any failed guess will most likely crash
69307 + the attacked program which allows the kernel to detect such attempts
69308 + and react on them. PaX itself provides no reaction mechanisms,
69309 + instead it is strongly encouraged that you make use of Nergal's
69310 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
69311 + (http://www.grsecurity.net/) built-in crash detection features or
69312 + develop one yourself.
69313 +
69314 + By saying Y here you can choose to randomize the following areas:
69315 + - top of the task's kernel stack
69316 + - top of the task's userland stack
69317 + - base address for mmap() requests that do not specify one
69318 + (this includes all libraries)
69319 + - base address of the main executable
69320 +
69321 + It is strongly recommended to say Y here as address space layout
69322 + randomization has negligible impact on performance yet it provides
69323 + a very effective protection.
69324 +
69325 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
69326 + this feature on a per file basis.
69327 +
69328 +config PAX_RANDKSTACK
69329 + bool "Randomize kernel stack base"
69330 + depends on PAX_ASLR && X86_TSC && X86
69331 + help
69332 + By saying Y here the kernel will randomize every task's kernel
69333 + stack on every system call. This will not only force an attacker
69334 + to guess it but also prevent him from making use of possible
69335 + leaked information about it.
69336 +
69337 + Since the kernel stack is a rather scarce resource, randomization
69338 + may cause unexpected stack overflows, therefore you should very
69339 + carefully test your system. Note that once enabled in the kernel
69340 + configuration, this feature cannot be disabled on a per file basis.
69341 +
69342 +config PAX_RANDUSTACK
69343 + bool "Randomize user stack base"
69344 + depends on PAX_ASLR
69345 + help
69346 + By saying Y here the kernel will randomize every task's userland
69347 + stack. The randomization is done in two steps where the second
69348 + one may apply a big amount of shift to the top of the stack and
69349 + cause problems for programs that want to use lots of memory (more
69350 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
69351 + For this reason the second step can be controlled by 'chpax' or
69352 + 'paxctl' on a per file basis.
69353 +
69354 +config PAX_RANDMMAP
69355 + bool "Randomize mmap() base"
69356 + depends on PAX_ASLR
69357 + help
69358 + By saying Y here the kernel will use a randomized base address for
69359 + mmap() requests that do not specify one themselves. As a result
69360 + all dynamically loaded libraries will appear at random addresses
69361 + and therefore be harder to exploit by a technique where an attacker
69362 + attempts to execute library code for his purposes (e.g. spawn a
69363 + shell from an exploited program that is running at an elevated
69364 + privilege level).
69365 +
69366 + Furthermore, if a program is relinked as a dynamic ELF file, its
69367 + base address will be randomized as well, completing the full
69368 + randomization of the address space layout. Attacking such programs
69369 + becomes a guess game. You can find an example of doing this at
69370 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
69371 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
69372 +
69373 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
69374 + feature on a per file basis.
69375 +
69376 +endmenu
69377 +
69378 +menu "Miscellaneous hardening features"
69379 +
69380 +config PAX_MEMORY_SANITIZE
69381 + bool "Sanitize all freed memory"
69382 + help
69383 + By saying Y here the kernel will erase memory pages as soon as they
69384 + are freed. This in turn reduces the lifetime of data stored in the
69385 + pages, making it less likely that sensitive information such as
69386 + passwords, cryptographic secrets, etc stay in memory for too long.
69387 +
69388 + This is especially useful for programs whose runtime is short, long
69389 + lived processes and the kernel itself benefit from this as long as
69390 + they operate on whole memory pages and ensure timely freeing of pages
69391 + that may hold sensitive information.
69392 +
69393 + The tradeoff is performance impact, on a single CPU system kernel
69394 + compilation sees a 3% slowdown, other systems and workloads may vary
69395 + and you are advised to test this feature on your expected workload
69396 + before deploying it.
69397 +
69398 + Note that this feature does not protect data stored in live pages,
69399 + e.g., process memory swapped to disk may stay there for a long time.
69400 +
69401 +config PAX_MEMORY_STACKLEAK
69402 + bool "Sanitize kernel stack"
69403 + depends on X86
69404 + help
69405 + By saying Y here the kernel will erase the kernel stack before it
69406 + returns from a system call. This in turn reduces the information
69407 + that a kernel stack leak bug can reveal.
69408 +
69409 + Note that such a bug can still leak information that was put on
69410 + the stack by the current system call (the one eventually triggering
69411 + the bug) but traces of earlier system calls on the kernel stack
69412 + cannot leak anymore.
69413 +
69414 + The tradeoff is performance impact: on a single CPU system kernel
69415 + compilation sees a 1% slowdown, other systems and workloads may vary
69416 + and you are advised to test this feature on your expected workload
69417 + before deploying it.
69418 +
69419 + Note: full support for this feature requires gcc with plugin support
69420 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
69421 + is not supported). Using older gcc versions means that functions
69422 + with large enough stack frames may leave uninitialized memory behind
69423 + that may be exposed to a later syscall leaking the stack.
69424 +
69425 +config PAX_MEMORY_UDEREF
69426 + bool "Prevent invalid userland pointer dereference"
69427 + depends on X86 && !UML_X86 && !XEN
69428 + select PAX_PER_CPU_PGD if X86_64
69429 + help
69430 + By saying Y here the kernel will be prevented from dereferencing
69431 + userland pointers in contexts where the kernel expects only kernel
69432 + pointers. This is both a useful runtime debugging feature and a
69433 + security measure that prevents exploiting a class of kernel bugs.
69434 +
69435 + The tradeoff is that some virtualization solutions may experience
69436 + a huge slowdown and therefore you should not enable this feature
69437 + for kernels meant to run in such environments. Whether a given VM
69438 + solution is affected or not is best determined by simply trying it
69439 + out, the performance impact will be obvious right on boot as this
69440 + mechanism engages from very early on. A good rule of thumb is that
69441 + VMs running on CPUs without hardware virtualization support (i.e.,
69442 + the majority of IA-32 CPUs) will likely experience the slowdown.
69443 +
69444 +config PAX_REFCOUNT
69445 + bool "Prevent various kernel object reference counter overflows"
69446 + depends on GRKERNSEC && (X86 || SPARC64)
69447 + help
69448 + By saying Y here the kernel will detect and prevent overflowing
69449 + various (but not all) kinds of object reference counters. Such
69450 + overflows can normally occur due to bugs only and are often, if
69451 + not always, exploitable.
69452 +
69453 + The tradeoff is that data structures protected by an overflowed
69454 + refcount will never be freed and therefore will leak memory. Note
69455 + that this leak also happens even without this protection but in
69456 + that case the overflow can eventually trigger the freeing of the
69457 + data structure while it is still being used elsewhere, resulting
69458 + in the exploitable situation that this feature prevents.
69459 +
69460 + Since this has a negligible performance impact, you should enable
69461 + this feature.
69462 +
69463 +config PAX_USERCOPY
69464 + bool "Harden heap object copies between kernel and userland"
69465 + depends on X86 || PPC || SPARC || ARM
69466 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
69467 + help
69468 + By saying Y here the kernel will enforce the size of heap objects
69469 + when they are copied in either direction between the kernel and
69470 + userland, even if only a part of the heap object is copied.
69471 +
69472 + Specifically, this checking prevents information leaking from the
69473 + kernel heap during kernel to userland copies (if the kernel heap
69474 + object is otherwise fully initialized) and prevents kernel heap
69475 + overflows during userland to kernel copies.
69476 +
69477 + Note that the current implementation provides the strictest bounds
69478 + checks for the SLUB allocator.
69479 +
69480 + Enabling this option also enables per-slab cache protection against
69481 + data in a given cache being copied into/out of via userland
69482 + accessors. Though the whitelist of regions will be reduced over
69483 + time, it notably protects important data structures like task structs.
69484 +
69485 + If frame pointers are enabled on x86, this option will also restrict
69486 + copies into and out of the kernel stack to local variables within a
69487 + single frame.
69488 +
69489 + Since this has a negligible performance impact, you should enable
69490 + this feature.
69491 +
69492 +endmenu
69493 +
69494 +endmenu
69495 +
69496 config KEYS
69497 bool "Enable access key retention support"
69498 help
69499 @@ -167,7 +715,7 @@ config INTEL_TXT
69500 config LSM_MMAP_MIN_ADDR
69501 int "Low address space for LSM to protect from user allocation"
69502 depends on SECURITY && SECURITY_SELINUX
69503 - default 65536
69504 + default 32768
69505 help
69506 This is the portion of low virtual memory which should be protected
69507 from userspace allocation. Keeping a user from writing to low pages
69508 diff -urNp linux-2.6.39.4/security/keys/keyring.c linux-2.6.39.4/security/keys/keyring.c
69509 --- linux-2.6.39.4/security/keys/keyring.c 2011-05-19 00:06:34.000000000 -0400
69510 +++ linux-2.6.39.4/security/keys/keyring.c 2011-08-05 19:44:37.000000000 -0400
69511 @@ -213,15 +213,15 @@ static long keyring_read(const struct ke
69512 ret = -EFAULT;
69513
69514 for (loop = 0; loop < klist->nkeys; loop++) {
69515 + key_serial_t serial;
69516 key = klist->keys[loop];
69517 + serial = key->serial;
69518
69519 tmp = sizeof(key_serial_t);
69520 if (tmp > buflen)
69521 tmp = buflen;
69522
69523 - if (copy_to_user(buffer,
69524 - &key->serial,
69525 - tmp) != 0)
69526 + if (copy_to_user(buffer, &serial, tmp))
69527 goto error;
69528
69529 buflen -= tmp;
69530 diff -urNp linux-2.6.39.4/security/min_addr.c linux-2.6.39.4/security/min_addr.c
69531 --- linux-2.6.39.4/security/min_addr.c 2011-05-19 00:06:34.000000000 -0400
69532 +++ linux-2.6.39.4/security/min_addr.c 2011-08-05 19:44:37.000000000 -0400
69533 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
69534 */
69535 static void update_mmap_min_addr(void)
69536 {
69537 +#ifndef SPARC
69538 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
69539 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
69540 mmap_min_addr = dac_mmap_min_addr;
69541 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
69542 #else
69543 mmap_min_addr = dac_mmap_min_addr;
69544 #endif
69545 +#endif
69546 }
69547
69548 /*
69549 diff -urNp linux-2.6.39.4/security/security.c linux-2.6.39.4/security/security.c
69550 --- linux-2.6.39.4/security/security.c 2011-05-19 00:06:34.000000000 -0400
69551 +++ linux-2.6.39.4/security/security.c 2011-08-05 19:44:37.000000000 -0400
69552 @@ -25,8 +25,8 @@ static __initdata char chosen_lsm[SECURI
69553 /* things that live in capability.c */
69554 extern void __init security_fixup_ops(struct security_operations *ops);
69555
69556 -static struct security_operations *security_ops;
69557 -static struct security_operations default_security_ops = {
69558 +static struct security_operations *security_ops __read_only;
69559 +static struct security_operations default_security_ops __read_only = {
69560 .name = "default",
69561 };
69562
69563 @@ -67,7 +67,9 @@ int __init security_init(void)
69564
69565 void reset_security_ops(void)
69566 {
69567 + pax_open_kernel();
69568 security_ops = &default_security_ops;
69569 + pax_close_kernel();
69570 }
69571
69572 /* Save user chosen LSM */
69573 diff -urNp linux-2.6.39.4/security/selinux/hooks.c linux-2.6.39.4/security/selinux/hooks.c
69574 --- linux-2.6.39.4/security/selinux/hooks.c 2011-05-19 00:06:34.000000000 -0400
69575 +++ linux-2.6.39.4/security/selinux/hooks.c 2011-08-05 19:44:37.000000000 -0400
69576 @@ -93,7 +93,6 @@
69577 #define NUM_SEL_MNT_OPTS 5
69578
69579 extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
69580 -extern struct security_operations *security_ops;
69581
69582 /* SECMARK reference count */
69583 atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
69584 @@ -5431,7 +5430,7 @@ static int selinux_key_getsecurity(struc
69585
69586 #endif
69587
69588 -static struct security_operations selinux_ops = {
69589 +static struct security_operations selinux_ops __read_only = {
69590 .name = "selinux",
69591
69592 .ptrace_access_check = selinux_ptrace_access_check,
69593 diff -urNp linux-2.6.39.4/security/selinux/include/xfrm.h linux-2.6.39.4/security/selinux/include/xfrm.h
69594 --- linux-2.6.39.4/security/selinux/include/xfrm.h 2011-05-19 00:06:34.000000000 -0400
69595 +++ linux-2.6.39.4/security/selinux/include/xfrm.h 2011-08-05 19:44:37.000000000 -0400
69596 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
69597
69598 static inline void selinux_xfrm_notify_policyload(void)
69599 {
69600 - atomic_inc(&flow_cache_genid);
69601 + atomic_inc_unchecked(&flow_cache_genid);
69602 }
69603 #else
69604 static inline int selinux_xfrm_enabled(void)
69605 diff -urNp linux-2.6.39.4/security/selinux/ss/services.c linux-2.6.39.4/security/selinux/ss/services.c
69606 --- linux-2.6.39.4/security/selinux/ss/services.c 2011-05-19 00:06:34.000000000 -0400
69607 +++ linux-2.6.39.4/security/selinux/ss/services.c 2011-08-05 19:44:37.000000000 -0400
69608 @@ -1806,6 +1806,8 @@ int security_load_policy(void *data, siz
69609 int rc = 0;
69610 struct policy_file file = { data, len }, *fp = &file;
69611
69612 + pax_track_stack();
69613 +
69614 if (!ss_initialized) {
69615 avtab_cache_init();
69616 rc = policydb_read(&policydb, fp);
69617 diff -urNp linux-2.6.39.4/security/smack/smack_lsm.c linux-2.6.39.4/security/smack/smack_lsm.c
69618 --- linux-2.6.39.4/security/smack/smack_lsm.c 2011-05-19 00:06:34.000000000 -0400
69619 +++ linux-2.6.39.4/security/smack/smack_lsm.c 2011-08-05 19:44:37.000000000 -0400
69620 @@ -3386,7 +3386,7 @@ static int smack_inode_getsecctx(struct
69621 return 0;
69622 }
69623
69624 -struct security_operations smack_ops = {
69625 +struct security_operations smack_ops __read_only = {
69626 .name = "smack",
69627
69628 .ptrace_access_check = smack_ptrace_access_check,
69629 diff -urNp linux-2.6.39.4/security/tomoyo/tomoyo.c linux-2.6.39.4/security/tomoyo/tomoyo.c
69630 --- linux-2.6.39.4/security/tomoyo/tomoyo.c 2011-05-19 00:06:34.000000000 -0400
69631 +++ linux-2.6.39.4/security/tomoyo/tomoyo.c 2011-08-05 19:44:37.000000000 -0400
69632 @@ -240,7 +240,7 @@ static int tomoyo_sb_pivotroot(struct pa
69633 * tomoyo_security_ops is a "struct security_operations" which is used for
69634 * registering TOMOYO.
69635 */
69636 -static struct security_operations tomoyo_security_ops = {
69637 +static struct security_operations tomoyo_security_ops __read_only = {
69638 .name = "tomoyo",
69639 .cred_alloc_blank = tomoyo_cred_alloc_blank,
69640 .cred_prepare = tomoyo_cred_prepare,
69641 diff -urNp linux-2.6.39.4/sound/aoa/codecs/onyx.c linux-2.6.39.4/sound/aoa/codecs/onyx.c
69642 --- linux-2.6.39.4/sound/aoa/codecs/onyx.c 2011-05-19 00:06:34.000000000 -0400
69643 +++ linux-2.6.39.4/sound/aoa/codecs/onyx.c 2011-08-05 19:44:37.000000000 -0400
69644 @@ -54,7 +54,7 @@ struct onyx {
69645 spdif_locked:1,
69646 analog_locked:1,
69647 original_mute:2;
69648 - int open_count;
69649 + local_t open_count;
69650 struct codec_info *codec_info;
69651
69652 /* mutex serializes concurrent access to the device
69653 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_i
69654 struct onyx *onyx = cii->codec_data;
69655
69656 mutex_lock(&onyx->mutex);
69657 - onyx->open_count++;
69658 + local_inc(&onyx->open_count);
69659 mutex_unlock(&onyx->mutex);
69660
69661 return 0;
69662 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_
69663 struct onyx *onyx = cii->codec_data;
69664
69665 mutex_lock(&onyx->mutex);
69666 - onyx->open_count--;
69667 - if (!onyx->open_count)
69668 + if (local_dec_and_test(&onyx->open_count))
69669 onyx->spdif_locked = onyx->analog_locked = 0;
69670 mutex_unlock(&onyx->mutex);
69671
69672 diff -urNp linux-2.6.39.4/sound/aoa/codecs/onyx.h linux-2.6.39.4/sound/aoa/codecs/onyx.h
69673 --- linux-2.6.39.4/sound/aoa/codecs/onyx.h 2011-05-19 00:06:34.000000000 -0400
69674 +++ linux-2.6.39.4/sound/aoa/codecs/onyx.h 2011-08-05 19:44:37.000000000 -0400
69675 @@ -11,6 +11,7 @@
69676 #include <linux/i2c.h>
69677 #include <asm/pmac_low_i2c.h>
69678 #include <asm/prom.h>
69679 +#include <asm/local.h>
69680
69681 /* PCM3052 register definitions */
69682
69683 diff -urNp linux-2.6.39.4/sound/core/seq/seq_device.c linux-2.6.39.4/sound/core/seq/seq_device.c
69684 --- linux-2.6.39.4/sound/core/seq/seq_device.c 2011-05-19 00:06:34.000000000 -0400
69685 +++ linux-2.6.39.4/sound/core/seq/seq_device.c 2011-08-05 20:34:06.000000000 -0400
69686 @@ -63,7 +63,7 @@ struct ops_list {
69687 int argsize; /* argument size */
69688
69689 /* operators */
69690 - struct snd_seq_dev_ops ops;
69691 + struct snd_seq_dev_ops *ops;
69692
69693 /* registred devices */
69694 struct list_head dev_list; /* list of devices */
69695 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char
69696
69697 mutex_lock(&ops->reg_mutex);
69698 /* copy driver operators */
69699 - ops->ops = *entry;
69700 + ops->ops = entry;
69701 ops->driver |= DRIVER_LOADED;
69702 ops->argsize = argsize;
69703
69704 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_de
69705 dev->name, ops->id, ops->argsize, dev->argsize);
69706 return -EINVAL;
69707 }
69708 - if (ops->ops.init_device(dev) >= 0) {
69709 + if (ops->ops->init_device(dev) >= 0) {
69710 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
69711 ops->num_init_devices++;
69712 } else {
69713 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_de
69714 dev->name, ops->id, ops->argsize, dev->argsize);
69715 return -EINVAL;
69716 }
69717 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
69718 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
69719 dev->status = SNDRV_SEQ_DEVICE_FREE;
69720 dev->driver_data = NULL;
69721 ops->num_init_devices--;
69722 diff -urNp linux-2.6.39.4/sound/drivers/mts64.c linux-2.6.39.4/sound/drivers/mts64.c
69723 --- linux-2.6.39.4/sound/drivers/mts64.c 2011-05-19 00:06:34.000000000 -0400
69724 +++ linux-2.6.39.4/sound/drivers/mts64.c 2011-08-05 20:34:06.000000000 -0400
69725 @@ -28,6 +28,7 @@
69726 #include <sound/initval.h>
69727 #include <sound/rawmidi.h>
69728 #include <sound/control.h>
69729 +#include <asm/local.h>
69730
69731 #define CARD_NAME "Miditerminal 4140"
69732 #define DRIVER_NAME "MTS64"
69733 @@ -66,7 +67,7 @@ struct mts64 {
69734 struct pardevice *pardev;
69735 int pardev_claimed;
69736
69737 - int open_count;
69738 + local_t open_count;
69739 int current_midi_output_port;
69740 int current_midi_input_port;
69741 u8 mode[MTS64_NUM_INPUT_PORTS];
69742 @@ -696,7 +697,7 @@ static int snd_mts64_rawmidi_open(struct
69743 {
69744 struct mts64 *mts = substream->rmidi->private_data;
69745
69746 - if (mts->open_count == 0) {
69747 + if (local_read(&mts->open_count) == 0) {
69748 /* We don't need a spinlock here, because this is just called
69749 if the device has not been opened before.
69750 So there aren't any IRQs from the device */
69751 @@ -704,7 +705,7 @@ static int snd_mts64_rawmidi_open(struct
69752
69753 msleep(50);
69754 }
69755 - ++(mts->open_count);
69756 + local_inc(&mts->open_count);
69757
69758 return 0;
69759 }
69760 @@ -714,8 +715,7 @@ static int snd_mts64_rawmidi_close(struc
69761 struct mts64 *mts = substream->rmidi->private_data;
69762 unsigned long flags;
69763
69764 - --(mts->open_count);
69765 - if (mts->open_count == 0) {
69766 + if (local_dec_return(&mts->open_count) == 0) {
69767 /* We need the spinlock_irqsave here because we can still
69768 have IRQs at this point */
69769 spin_lock_irqsave(&mts->lock, flags);
69770 @@ -724,8 +724,8 @@ static int snd_mts64_rawmidi_close(struc
69771
69772 msleep(500);
69773
69774 - } else if (mts->open_count < 0)
69775 - mts->open_count = 0;
69776 + } else if (local_read(&mts->open_count) < 0)
69777 + local_set(&mts->open_count, 0);
69778
69779 return 0;
69780 }
69781 diff -urNp linux-2.6.39.4/sound/drivers/opl4/opl4_lib.c linux-2.6.39.4/sound/drivers/opl4/opl4_lib.c
69782 --- linux-2.6.39.4/sound/drivers/opl4/opl4_lib.c 2011-05-19 00:06:34.000000000 -0400
69783 +++ linux-2.6.39.4/sound/drivers/opl4/opl4_lib.c 2011-08-05 20:34:06.000000000 -0400
69784 @@ -28,7 +28,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@
69785 MODULE_DESCRIPTION("OPL4 driver");
69786 MODULE_LICENSE("GPL");
69787
69788 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
69789 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
69790 {
69791 int timeout = 10;
69792 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
69793 diff -urNp linux-2.6.39.4/sound/drivers/portman2x4.c linux-2.6.39.4/sound/drivers/portman2x4.c
69794 --- linux-2.6.39.4/sound/drivers/portman2x4.c 2011-05-19 00:06:34.000000000 -0400
69795 +++ linux-2.6.39.4/sound/drivers/portman2x4.c 2011-08-05 20:34:06.000000000 -0400
69796 @@ -47,6 +47,7 @@
69797 #include <sound/initval.h>
69798 #include <sound/rawmidi.h>
69799 #include <sound/control.h>
69800 +#include <asm/local.h>
69801
69802 #define CARD_NAME "Portman 2x4"
69803 #define DRIVER_NAME "portman"
69804 @@ -84,7 +85,7 @@ struct portman {
69805 struct pardevice *pardev;
69806 int pardev_claimed;
69807
69808 - int open_count;
69809 + local_t open_count;
69810 int mode[PORTMAN_NUM_INPUT_PORTS];
69811 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
69812 };
69813 diff -urNp linux-2.6.39.4/sound/firewire/amdtp.c linux-2.6.39.4/sound/firewire/amdtp.c
69814 --- linux-2.6.39.4/sound/firewire/amdtp.c 2011-05-19 00:06:34.000000000 -0400
69815 +++ linux-2.6.39.4/sound/firewire/amdtp.c 2011-08-05 19:44:37.000000000 -0400
69816 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdt
69817 ptr = s->pcm_buffer_pointer + data_blocks;
69818 if (ptr >= pcm->runtime->buffer_size)
69819 ptr -= pcm->runtime->buffer_size;
69820 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
69821 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
69822
69823 s->pcm_period_pointer += data_blocks;
69824 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
69825 @@ -510,7 +510,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
69826 */
69827 void amdtp_out_stream_update(struct amdtp_out_stream *s)
69828 {
69829 - ACCESS_ONCE(s->source_node_id_field) =
69830 + ACCESS_ONCE_RW(s->source_node_id_field) =
69831 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
69832 }
69833 EXPORT_SYMBOL(amdtp_out_stream_update);
69834 diff -urNp linux-2.6.39.4/sound/firewire/amdtp.h linux-2.6.39.4/sound/firewire/amdtp.h
69835 --- linux-2.6.39.4/sound/firewire/amdtp.h 2011-05-19 00:06:34.000000000 -0400
69836 +++ linux-2.6.39.4/sound/firewire/amdtp.h 2011-08-05 19:44:37.000000000 -0400
69837 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_
69838 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
69839 struct snd_pcm_substream *pcm)
69840 {
69841 - ACCESS_ONCE(s->pcm) = pcm;
69842 + ACCESS_ONCE_RW(s->pcm) = pcm;
69843 }
69844
69845 /**
69846 diff -urNp linux-2.6.39.4/sound/isa/cmi8330.c linux-2.6.39.4/sound/isa/cmi8330.c
69847 --- linux-2.6.39.4/sound/isa/cmi8330.c 2011-05-19 00:06:34.000000000 -0400
69848 +++ linux-2.6.39.4/sound/isa/cmi8330.c 2011-08-05 20:34:06.000000000 -0400
69849 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
69850
69851 struct snd_pcm *pcm;
69852 struct snd_cmi8330_stream {
69853 - struct snd_pcm_ops ops;
69854 + snd_pcm_ops_no_const ops;
69855 snd_pcm_open_callback_t open;
69856 void *private_data; /* sb or wss */
69857 } streams[2];
69858 diff -urNp linux-2.6.39.4/sound/oss/sb_audio.c linux-2.6.39.4/sound/oss/sb_audio.c
69859 --- linux-2.6.39.4/sound/oss/sb_audio.c 2011-05-19 00:06:34.000000000 -0400
69860 +++ linux-2.6.39.4/sound/oss/sb_audio.c 2011-08-05 19:44:37.000000000 -0400
69861 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
69862 buf16 = (signed short *)(localbuf + localoffs);
69863 while (c)
69864 {
69865 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
69866 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
69867 if (copy_from_user(lbuf8,
69868 userbuf+useroffs + p,
69869 locallen))
69870 diff -urNp linux-2.6.39.4/sound/oss/swarm_cs4297a.c linux-2.6.39.4/sound/oss/swarm_cs4297a.c
69871 --- linux-2.6.39.4/sound/oss/swarm_cs4297a.c 2011-05-19 00:06:34.000000000 -0400
69872 +++ linux-2.6.39.4/sound/oss/swarm_cs4297a.c 2011-08-05 19:44:37.000000000 -0400
69873 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
69874 {
69875 struct cs4297a_state *s;
69876 u32 pwr, id;
69877 - mm_segment_t fs;
69878 int rval;
69879 #ifndef CONFIG_BCM_CS4297A_CSWARM
69880 u64 cfg;
69881 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
69882 if (!rval) {
69883 char *sb1250_duart_present;
69884
69885 +#if 0
69886 + mm_segment_t fs;
69887 fs = get_fs();
69888 set_fs(KERNEL_DS);
69889 -#if 0
69890 val = SOUND_MASK_LINE;
69891 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
69892 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
69893 val = initvol[i].vol;
69894 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
69895 }
69896 + set_fs(fs);
69897 // cs4297a_write_ac97(s, 0x18, 0x0808);
69898 #else
69899 // cs4297a_write_ac97(s, 0x5e, 0x180);
69900 cs4297a_write_ac97(s, 0x02, 0x0808);
69901 cs4297a_write_ac97(s, 0x18, 0x0808);
69902 #endif
69903 - set_fs(fs);
69904
69905 list_add(&s->list, &cs4297a_devs);
69906
69907 diff -urNp linux-2.6.39.4/sound/pci/hda/hda_codec.h linux-2.6.39.4/sound/pci/hda/hda_codec.h
69908 --- linux-2.6.39.4/sound/pci/hda/hda_codec.h 2011-05-19 00:06:34.000000000 -0400
69909 +++ linux-2.6.39.4/sound/pci/hda/hda_codec.h 2011-08-05 20:34:06.000000000 -0400
69910 @@ -615,7 +615,7 @@ struct hda_bus_ops {
69911 /* notify power-up/down from codec to controller */
69912 void (*pm_notify)(struct hda_bus *bus);
69913 #endif
69914 -};
69915 +} __no_const;
69916
69917 /* template to pass to the bus constructor */
69918 struct hda_bus_template {
69919 @@ -713,6 +713,7 @@ struct hda_codec_ops {
69920 #endif
69921 void (*reboot_notify)(struct hda_codec *codec);
69922 };
69923 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
69924
69925 /* record for amp information cache */
69926 struct hda_cache_head {
69927 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
69928 struct snd_pcm_substream *substream);
69929 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
69930 struct snd_pcm_substream *substream);
69931 -};
69932 +} __no_const;
69933
69934 /* PCM information for each substream */
69935 struct hda_pcm_stream {
69936 @@ -801,7 +802,7 @@ struct hda_codec {
69937 const char *modelname; /* model name for preset */
69938
69939 /* set by patch */
69940 - struct hda_codec_ops patch_ops;
69941 + hda_codec_ops_no_const patch_ops;
69942
69943 /* PCM to create, set by patch_ops.build_pcms callback */
69944 unsigned int num_pcms;
69945 diff -urNp linux-2.6.39.4/sound/pci/ice1712/ice1712.h linux-2.6.39.4/sound/pci/ice1712/ice1712.h
69946 --- linux-2.6.39.4/sound/pci/ice1712/ice1712.h 2011-05-19 00:06:34.000000000 -0400
69947 +++ linux-2.6.39.4/sound/pci/ice1712/ice1712.h 2011-08-05 20:34:06.000000000 -0400
69948 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
69949 unsigned int mask_flags; /* total mask bits */
69950 struct snd_akm4xxx_ops {
69951 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
69952 - } ops;
69953 + } __no_const ops;
69954 };
69955
69956 struct snd_ice1712_spdif {
69957 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
69958 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
69959 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
69960 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
69961 - } ops;
69962 + } __no_const ops;
69963 };
69964
69965
69966 diff -urNp linux-2.6.39.4/sound/pci/intel8x0m.c linux-2.6.39.4/sound/pci/intel8x0m.c
69967 --- linux-2.6.39.4/sound/pci/intel8x0m.c 2011-05-19 00:06:34.000000000 -0400
69968 +++ linux-2.6.39.4/sound/pci/intel8x0m.c 2011-08-05 20:34:06.000000000 -0400
69969 @@ -1265,7 +1265,7 @@ static struct shortname_table {
69970 { 0x5455, "ALi M5455" },
69971 { 0x746d, "AMD AMD8111" },
69972 #endif
69973 - { 0 },
69974 + { 0, },
69975 };
69976
69977 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
69978 diff -urNp linux-2.6.39.4/sound/pci/ymfpci/ymfpci_main.c linux-2.6.39.4/sound/pci/ymfpci/ymfpci_main.c
69979 --- linux-2.6.39.4/sound/pci/ymfpci/ymfpci_main.c 2011-05-19 00:06:34.000000000 -0400
69980 +++ linux-2.6.39.4/sound/pci/ymfpci/ymfpci_main.c 2011-08-05 20:34:06.000000000 -0400
69981 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
69982 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
69983 break;
69984 }
69985 - if (atomic_read(&chip->interrupt_sleep_count)) {
69986 - atomic_set(&chip->interrupt_sleep_count, 0);
69987 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
69988 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
69989 wake_up(&chip->interrupt_sleep);
69990 }
69991 __end:
69992 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
69993 continue;
69994 init_waitqueue_entry(&wait, current);
69995 add_wait_queue(&chip->interrupt_sleep, &wait);
69996 - atomic_inc(&chip->interrupt_sleep_count);
69997 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
69998 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
69999 remove_wait_queue(&chip->interrupt_sleep, &wait);
70000 }
70001 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
70002 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
70003 spin_unlock(&chip->reg_lock);
70004
70005 - if (atomic_read(&chip->interrupt_sleep_count)) {
70006 - atomic_set(&chip->interrupt_sleep_count, 0);
70007 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
70008 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
70009 wake_up(&chip->interrupt_sleep);
70010 }
70011 }
70012 @@ -2363,7 +2363,7 @@ int __devinit snd_ymfpci_create(struct s
70013 spin_lock_init(&chip->reg_lock);
70014 spin_lock_init(&chip->voice_lock);
70015 init_waitqueue_head(&chip->interrupt_sleep);
70016 - atomic_set(&chip->interrupt_sleep_count, 0);
70017 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
70018 chip->card = card;
70019 chip->pci = pci;
70020 chip->irq = -1;
70021 diff -urNp linux-2.6.39.4/sound/soc/soc-core.c linux-2.6.39.4/sound/soc/soc-core.c
70022 --- linux-2.6.39.4/sound/soc/soc-core.c 2011-05-19 00:06:34.000000000 -0400
70023 +++ linux-2.6.39.4/sound/soc/soc-core.c 2011-08-05 20:34:06.000000000 -0400
70024 @@ -1027,7 +1027,7 @@ static snd_pcm_uframes_t soc_pcm_pointer
70025 }
70026
70027 /* ASoC PCM operations */
70028 -static struct snd_pcm_ops soc_pcm_ops = {
70029 +static snd_pcm_ops_no_const soc_pcm_ops = {
70030 .open = soc_pcm_open,
70031 .close = soc_codec_close,
70032 .hw_params = soc_pcm_hw_params,
70033 @@ -2105,6 +2105,7 @@ static int soc_new_pcm(struct snd_soc_pc
70034
70035 rtd->pcm = pcm;
70036 pcm->private_data = rtd;
70037 + /* this whole logic is broken... */
70038 soc_pcm_ops.mmap = platform->driver->ops->mmap;
70039 soc_pcm_ops.pointer = platform->driver->ops->pointer;
70040 soc_pcm_ops.ioctl = platform->driver->ops->ioctl;
70041 diff -urNp linux-2.6.39.4/sound/usb/card.h linux-2.6.39.4/sound/usb/card.h
70042 --- linux-2.6.39.4/sound/usb/card.h 2011-05-19 00:06:34.000000000 -0400
70043 +++ linux-2.6.39.4/sound/usb/card.h 2011-08-05 20:34:06.000000000 -0400
70044 @@ -44,6 +44,7 @@ struct snd_urb_ops {
70045 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
70046 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
70047 };
70048 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
70049
70050 struct snd_usb_substream {
70051 struct snd_usb_stream *stream;
70052 @@ -93,7 +94,7 @@ struct snd_usb_substream {
70053 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
70054 spinlock_t lock;
70055
70056 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
70057 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
70058 };
70059
70060 struct snd_usb_stream {
70061 diff -urNp linux-2.6.39.4/tools/gcc/constify_plugin.c linux-2.6.39.4/tools/gcc/constify_plugin.c
70062 --- linux-2.6.39.4/tools/gcc/constify_plugin.c 1969-12-31 19:00:00.000000000 -0500
70063 +++ linux-2.6.39.4/tools/gcc/constify_plugin.c 2011-08-05 20:34:06.000000000 -0400
70064 @@ -0,0 +1,189 @@
70065 +/*
70066 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
70067 + * Licensed under the GPL v2, or (at your option) v3
70068 + *
70069 + * This gcc plugin constifies all structures which contain only function pointers and const fields.
70070 + *
70071 + * Usage:
70072 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
70073 + * $ gcc -fplugin=constify_plugin.so test.c -O2
70074 + */
70075 +
70076 +#include "gcc-plugin.h"
70077 +#include "config.h"
70078 +#include "system.h"
70079 +#include "coretypes.h"
70080 +#include "tree.h"
70081 +#include "tree-pass.h"
70082 +#include "intl.h"
70083 +#include "plugin-version.h"
70084 +#include "tm.h"
70085 +#include "toplev.h"
70086 +#include "function.h"
70087 +#include "tree-flow.h"
70088 +#include "plugin.h"
70089 +
70090 +int plugin_is_GPL_compatible;
70091 +
70092 +static struct plugin_info const_plugin_info = {
70093 + .version = "20110721",
70094 + .help = "no-constify\tturn off constification\n",
70095 +};
70096 +
70097 +static bool walk_struct(tree node);
70098 +
70099 +static void deconstify_node(tree node)
70100 +{
70101 + tree field;
70102 +
70103 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
70104 + enum tree_code code = TREE_CODE(TREE_TYPE(field));
70105 + if (code == RECORD_TYPE || code == UNION_TYPE)
70106 + deconstify_node(TREE_TYPE(field));
70107 + TREE_READONLY(field) = 0;
70108 + TREE_READONLY(TREE_TYPE(field)) = 0;
70109 + }
70110 +}
70111 +
70112 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
70113 +{
70114 + if (TREE_CODE(*node) == FUNCTION_DECL) {
70115 + error("%qE attribute does not apply to functions", name);
70116 + *no_add_attrs = true;
70117 + return NULL_TREE;
70118 + }
70119 +
70120 + if (DECL_P(*node) && lookup_attribute("no_const", TYPE_ATTRIBUTES(TREE_TYPE(*node)))) {
70121 + error("%qE attribute is already applied to the type" , name);
70122 + *no_add_attrs = true;
70123 + return NULL_TREE;
70124 + }
70125 +
70126 + if (TREE_CODE(*node) == TYPE_DECL && !TREE_READONLY(TREE_TYPE(*node))) {
70127 + error("%qE attribute used on type that is not constified" , name);
70128 + *no_add_attrs = true;
70129 + return NULL_TREE;
70130 + }
70131 +
70132 + if (TREE_CODE(*node) == TYPE_DECL) {
70133 + tree chain = TREE_CHAIN(TREE_TYPE(*node));
70134 + TREE_TYPE(*node) = copy_node(TREE_TYPE(*node));
70135 + TREE_CHAIN(TREE_TYPE(*node)) = copy_list(chain);
70136 + TREE_READONLY(TREE_TYPE(*node)) = 0;
70137 + deconstify_node(TREE_TYPE(*node));
70138 + return NULL_TREE;
70139 + }
70140 +
70141 + return NULL_TREE;
70142 +}
70143 +
70144 +static struct attribute_spec no_const_attr = {
70145 + .name = "no_const",
70146 + .min_length = 0,
70147 + .max_length = 0,
70148 + .decl_required = false,
70149 + .type_required = false,
70150 + .function_type_required = false,
70151 + .handler = handle_no_const_attribute
70152 +};
70153 +
70154 +static void register_attributes(void *event_data, void *data)
70155 +{
70156 + register_attribute(&no_const_attr);
70157 +}
70158 +
70159 +/*
70160 +static void printnode(char *prefix, tree node)
70161 +{
70162 + enum tree_code code;
70163 + enum tree_code_class tclass;
70164 +
70165 + tclass = TREE_CODE_CLASS(TREE_CODE (node));
70166 +
70167 + code = TREE_CODE(node);
70168 + fprintf(stderr, "\n%s node: %p, code: %d type: %s\n", prefix, node, code, tree_code_name[(int)code]);
70169 + if (DECL_CONTEXT(node) != NULL_TREE && TYPE_NAME(DECL_CONTEXT(node)) != NULL_TREE)
70170 + fprintf(stderr, "struct name: %s\n", IDENTIFIER_POINTER(TYPE_NAME(DECL_CONTEXT(node))));
70171 + if (tclass == tcc_declaration && DECL_NAME(node) != NULL_TREE)
70172 + fprintf(stderr, "field name: %s\n", IDENTIFIER_POINTER(DECL_NAME(node)));
70173 +}
70174 +*/
70175 +
70176 +static void constify_node(tree node)
70177 +{
70178 + TREE_READONLY(node) = 1;
70179 +}
70180 +
70181 +static bool is_fptr(tree field)
70182 +{
70183 + tree ptr = TREE_TYPE(field);
70184 +
70185 + if (TREE_CODE(ptr) != POINTER_TYPE)
70186 + return false;
70187 +
70188 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
70189 +}
70190 +
70191 +static bool walk_struct(tree node)
70192 +{
70193 + tree field;
70194 +
70195 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
70196 + enum tree_code code = TREE_CODE(TREE_TYPE(field));
70197 + if (code == RECORD_TYPE || code == UNION_TYPE) {
70198 + if (!(walk_struct(TREE_TYPE(field))))
70199 + return false;
70200 + } else if (is_fptr(field) == false && !TREE_READONLY(field))
70201 + return false;
70202 + }
70203 + return true;
70204 +}
70205 +
70206 +static void finish_type(void *event_data, void *data)
70207 +{
70208 + tree node = (tree)event_data;
70209 +
70210 + if (node == NULL_TREE)
70211 + return;
70212 +
70213 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
70214 + return;
70215 +
70216 + if (TREE_READONLY(node))
70217 + return;
70218 +
70219 + if (TYPE_FIELDS(node) == NULL_TREE)
70220 + return;
70221 +
70222 + if (walk_struct(node))
70223 + constify_node(node);
70224 +}
70225 +
70226 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
70227 +{
70228 + const char * const plugin_name = plugin_info->base_name;
70229 + const int argc = plugin_info->argc;
70230 + const struct plugin_argument * const argv = plugin_info->argv;
70231 + int i;
70232 + bool constify = true;
70233 +
70234 + if (!plugin_default_version_check(version, &gcc_version)) {
70235 + error(G_("incompatible gcc/plugin versions"));
70236 + return 1;
70237 + }
70238 +
70239 + for (i = 0; i < argc; ++i) {
70240 + if (!(strcmp(argv[i].key, "no-constify"))) {
70241 + constify = false;
70242 + continue;
70243 + }
70244 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
70245 + }
70246 +
70247 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
70248 + if (constify)
70249 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
70250 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
70251 +
70252 + return 0;
70253 +}
70254 diff -urNp linux-2.6.39.4/tools/gcc/Makefile linux-2.6.39.4/tools/gcc/Makefile
70255 --- linux-2.6.39.4/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
70256 +++ linux-2.6.39.4/tools/gcc/Makefile 2011-08-05 20:34:06.000000000 -0400
70257 @@ -0,0 +1,12 @@
70258 +#CC := gcc
70259 +#PLUGIN_SOURCE_FILES := pax_plugin.c
70260 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
70261 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
70262 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
70263 +
70264 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
70265 +
70266 +hostlibs-y := stackleak_plugin.so constify_plugin.so
70267 +always := $(hostlibs-y)
70268 +stackleak_plugin-objs := stackleak_plugin.o
70269 +constify_plugin-objs := constify_plugin.o
70270 diff -urNp linux-2.6.39.4/tools/gcc/stackleak_plugin.c linux-2.6.39.4/tools/gcc/stackleak_plugin.c
70271 --- linux-2.6.39.4/tools/gcc/stackleak_plugin.c 1969-12-31 19:00:00.000000000 -0500
70272 +++ linux-2.6.39.4/tools/gcc/stackleak_plugin.c 2011-08-05 20:34:06.000000000 -0400
70273 @@ -0,0 +1,243 @@
70274 +/*
70275 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
70276 + * Licensed under the GPL v2
70277 + *
70278 + * Note: the choice of the license means that the compilation process is
70279 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
70280 + * but for the kernel it doesn't matter since it doesn't link against
70281 + * any of the gcc libraries
70282 + *
70283 + * gcc plugin to help implement various PaX features
70284 + *
70285 + * - track lowest stack pointer
70286 + *
70287 + * TODO:
70288 + * - initialize all local variables
70289 + *
70290 + * BUGS:
70291 + * - cloned functions are instrumented twice
70292 + */
70293 +#include "gcc-plugin.h"
70294 +#include "plugin-version.h"
70295 +#include "config.h"
70296 +#include "system.h"
70297 +#include "coretypes.h"
70298 +#include "tm.h"
70299 +#include "toplev.h"
70300 +#include "basic-block.h"
70301 +#include "gimple.h"
70302 +//#include "expr.h" where are you...
70303 +#include "diagnostic.h"
70304 +#include "rtl.h"
70305 +#include "emit-rtl.h"
70306 +#include "function.h"
70307 +#include "tree.h"
70308 +#include "tree-pass.h"
70309 +#include "intl.h"
70310 +
70311 +int plugin_is_GPL_compatible;
70312 +
70313 +static int track_frame_size = -1;
70314 +static const char track_function[] = "pax_track_stack";
70315 +static bool init_locals;
70316 +
70317 +static struct plugin_info stackleak_plugin_info = {
70318 + .version = "201106030000",
70319 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
70320 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
70321 +};
70322 +
70323 +static bool gate_stackleak_track_stack(void);
70324 +static unsigned int execute_stackleak_tree_instrument(void);
70325 +static unsigned int execute_stackleak_final(void);
70326 +
70327 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
70328 + .pass = {
70329 + .type = GIMPLE_PASS,
70330 + .name = "stackleak_tree_instrument",
70331 + .gate = gate_stackleak_track_stack,
70332 + .execute = execute_stackleak_tree_instrument,
70333 + .sub = NULL,
70334 + .next = NULL,
70335 + .static_pass_number = 0,
70336 + .tv_id = TV_NONE,
70337 + .properties_required = PROP_gimple_leh | PROP_cfg,
70338 + .properties_provided = 0,
70339 + .properties_destroyed = 0,
70340 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
70341 + .todo_flags_finish = TODO_verify_stmts // | TODO_dump_func
70342 + }
70343 +};
70344 +
70345 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
70346 + .pass = {
70347 + .type = RTL_PASS,
70348 + .name = "stackleak_final",
70349 + .gate = gate_stackleak_track_stack,
70350 + .execute = execute_stackleak_final,
70351 + .sub = NULL,
70352 + .next = NULL,
70353 + .static_pass_number = 0,
70354 + .tv_id = TV_NONE,
70355 + .properties_required = 0,
70356 + .properties_provided = 0,
70357 + .properties_destroyed = 0,
70358 + .todo_flags_start = 0,
70359 + .todo_flags_finish = 0
70360 + }
70361 +};
70362 +
70363 +static bool gate_stackleak_track_stack(void)
70364 +{
70365 + return track_frame_size >= 0;
70366 +}
70367 +
70368 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
70369 +{
70370 + gimple call;
70371 + tree decl, type;
70372 +
70373 + // insert call to void pax_track_stack(void)
70374 + type = build_function_type_list(void_type_node, NULL_TREE);
70375 + decl = build_fn_decl(track_function, type);
70376 + DECL_ASSEMBLER_NAME(decl); // for LTO
70377 + call = gimple_build_call(decl, 0);
70378 + if (before)
70379 + gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
70380 + else
70381 + gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
70382 +}
70383 +
70384 +static unsigned int execute_stackleak_tree_instrument(void)
70385 +{
70386 + basic_block bb;
70387 + gimple_stmt_iterator gsi;
70388 +
70389 + // 1. loop through BBs and GIMPLE statements
70390 + FOR_EACH_BB(bb) {
70391 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
70392 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
70393 + tree decl;
70394 + gimple stmt = gsi_stmt(gsi);
70395 +
70396 + if (!is_gimple_call(stmt))
70397 + continue;
70398 + decl = gimple_call_fndecl(stmt);
70399 + if (!decl)
70400 + continue;
70401 + if (TREE_CODE(decl) != FUNCTION_DECL)
70402 + continue;
70403 + if (!DECL_BUILT_IN(decl))
70404 + continue;
70405 + if (DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
70406 + continue;
70407 + if (DECL_FUNCTION_CODE(decl) != BUILT_IN_ALLOCA)
70408 + continue;
70409 +
70410 + // 2. insert track call after each __builtin_alloca call
70411 + stackleak_add_instrumentation(&gsi, false);
70412 +// print_node(stderr, "pax", decl, 4);
70413 + }
70414 + }
70415 +
70416 + // 3. insert track call at the beginning
70417 + bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
70418 + gsi = gsi_start_bb(bb);
70419 + stackleak_add_instrumentation(&gsi, true);
70420 +
70421 + return 0;
70422 +}
70423 +
70424 +static unsigned int execute_stackleak_final(void)
70425 +{
70426 + rtx insn;
70427 +
70428 + if (cfun->calls_alloca)
70429 + return 0;
70430 +
70431 + // 1. find pax_track_stack calls
70432 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
70433 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
70434 + rtx body;
70435 +
70436 + if (!CALL_P(insn))
70437 + continue;
70438 + body = PATTERN(insn);
70439 + if (GET_CODE(body) != CALL)
70440 + continue;
70441 + body = XEXP(body, 0);
70442 + if (GET_CODE(body) != MEM)
70443 + continue;
70444 + body = XEXP(body, 0);
70445 + if (GET_CODE(body) != SYMBOL_REF)
70446 + continue;
70447 + if (strcmp(XSTR(body, 0), track_function))
70448 + continue;
70449 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
70450 + // 2. delete call if function frame is not big enough
70451 + if (get_frame_size() >= track_frame_size)
70452 + continue;
70453 + delete_insn_and_edges(insn);
70454 + }
70455 +
70456 +// print_simple_rtl(stderr, get_insns());
70457 +// print_rtl(stderr, get_insns());
70458 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
70459 +
70460 + return 0;
70461 +}
70462 +
70463 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
70464 +{
70465 + const char * const plugin_name = plugin_info->base_name;
70466 + const int argc = plugin_info->argc;
70467 + const struct plugin_argument * const argv = plugin_info->argv;
70468 + int i;
70469 + struct register_pass_info stackleak_tree_instrument_pass_info = {
70470 + .pass = &stackleak_tree_instrument_pass.pass,
70471 +// .reference_pass_name = "tree_profile",
70472 + .reference_pass_name = "optimized",
70473 + .ref_pass_instance_number = 0,
70474 + .pos_op = PASS_POS_INSERT_AFTER
70475 + };
70476 + struct register_pass_info stackleak_final_pass_info = {
70477 + .pass = &stackleak_final_rtl_opt_pass.pass,
70478 + .reference_pass_name = "final",
70479 + .ref_pass_instance_number = 0,
70480 + .pos_op = PASS_POS_INSERT_BEFORE
70481 + };
70482 +
70483 + if (!plugin_default_version_check(version, &gcc_version)) {
70484 + error(G_("incompatible gcc/plugin versions"));
70485 + return 1;
70486 + }
70487 +
70488 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
70489 +
70490 + for (i = 0; i < argc; ++i) {
70491 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
70492 + if (!argv[i].value) {
70493 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
70494 + continue;
70495 + }
70496 + track_frame_size = atoi(argv[i].value);
70497 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
70498 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
70499 + continue;
70500 + }
70501 + if (!strcmp(argv[i].key, "initialize-locals")) {
70502 + if (argv[i].value) {
70503 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
70504 + continue;
70505 + }
70506 + init_locals = true;
70507 + continue;
70508 + }
70509 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
70510 + }
70511 +
70512 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
70513 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
70514 +
70515 + return 0;
70516 +}
70517 diff -urNp linux-2.6.39.4/usr/gen_init_cpio.c linux-2.6.39.4/usr/gen_init_cpio.c
70518 --- linux-2.6.39.4/usr/gen_init_cpio.c 2011-05-19 00:06:34.000000000 -0400
70519 +++ linux-2.6.39.4/usr/gen_init_cpio.c 2011-08-05 19:44:38.000000000 -0400
70520 @@ -305,7 +305,7 @@ static int cpio_mkfile(const char *name,
70521 int retval;
70522 int rc = -1;
70523 int namesize;
70524 - int i;
70525 + unsigned int i;
70526
70527 mode |= S_IFREG;
70528
70529 @@ -394,9 +394,10 @@ static char *cpio_replace_env(char *new_
70530 *env_var = *expanded = '\0';
70531 strncat(env_var, start + 2, end - start - 2);
70532 strncat(expanded, new_location, start - new_location);
70533 - strncat(expanded, getenv(env_var), PATH_MAX);
70534 - strncat(expanded, end + 1, PATH_MAX);
70535 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
70536 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
70537 strncpy(new_location, expanded, PATH_MAX);
70538 + new_location[PATH_MAX] = 0;
70539 } else
70540 break;
70541 }
70542 diff -urNp linux-2.6.39.4/virt/kvm/kvm_main.c linux-2.6.39.4/virt/kvm/kvm_main.c
70543 --- linux-2.6.39.4/virt/kvm/kvm_main.c 2011-05-19 00:06:34.000000000 -0400
70544 +++ linux-2.6.39.4/virt/kvm/kvm_main.c 2011-08-05 20:34:06.000000000 -0400
70545 @@ -73,7 +73,7 @@ LIST_HEAD(vm_list);
70546
70547 static cpumask_var_t cpus_hardware_enabled;
70548 static int kvm_usage_count = 0;
70549 -static atomic_t hardware_enable_failed;
70550 +static atomic_unchecked_t hardware_enable_failed;
70551
70552 struct kmem_cache *kvm_vcpu_cache;
70553 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
70554 @@ -2187,7 +2187,7 @@ static void hardware_enable_nolock(void
70555
70556 if (r) {
70557 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
70558 - atomic_inc(&hardware_enable_failed);
70559 + atomic_inc_unchecked(&hardware_enable_failed);
70560 printk(KERN_INFO "kvm: enabling virtualization on "
70561 "CPU%d failed\n", cpu);
70562 }
70563 @@ -2241,10 +2241,10 @@ static int hardware_enable_all(void)
70564
70565 kvm_usage_count++;
70566 if (kvm_usage_count == 1) {
70567 - atomic_set(&hardware_enable_failed, 0);
70568 + atomic_set_unchecked(&hardware_enable_failed, 0);
70569 on_each_cpu(hardware_enable_nolock, NULL, 1);
70570
70571 - if (atomic_read(&hardware_enable_failed)) {
70572 + if (atomic_read_unchecked(&hardware_enable_failed)) {
70573 hardware_disable_all_nolock();
70574 r = -EBUSY;
70575 }
70576 @@ -2509,7 +2509,7 @@ static void kvm_sched_out(struct preempt
70577 kvm_arch_vcpu_put(vcpu);
70578 }
70579
70580 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
70581 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
70582 struct module *module)
70583 {
70584 int r;
70585 @@ -2572,7 +2572,7 @@ int kvm_init(void *opaque, unsigned vcpu
70586 if (!vcpu_align)
70587 vcpu_align = __alignof__(struct kvm_vcpu);
70588 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
70589 - 0, NULL);
70590 + SLAB_USERCOPY, NULL);
70591 if (!kvm_vcpu_cache) {
70592 r = -ENOMEM;
70593 goto out_free_3;
70594 @@ -2582,9 +2582,11 @@ int kvm_init(void *opaque, unsigned vcpu
70595 if (r)
70596 goto out_free;
70597
70598 - kvm_chardev_ops.owner = module;
70599 - kvm_vm_fops.owner = module;
70600 - kvm_vcpu_fops.owner = module;
70601 + pax_open_kernel();
70602 + *(void **)&kvm_chardev_ops.owner = module;
70603 + *(void **)&kvm_vm_fops.owner = module;
70604 + *(void **)&kvm_vcpu_fops.owner = module;
70605 + pax_close_kernel();
70606
70607 r = misc_register(&kvm_dev);
70608 if (r) {