]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blob - test/grsecurity-2.2.2-2.6.39.4-201108182325.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.39.4-201108182325.patch
1 diff -urNp linux-2.6.39.4/arch/alpha/include/asm/elf.h linux-2.6.39.4/arch/alpha/include/asm/elf.h
2 --- linux-2.6.39.4/arch/alpha/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
3 +++ linux-2.6.39.4/arch/alpha/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
4 @@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8 +#ifdef CONFIG_PAX_ASLR
9 +#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10 +
11 +#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12 +#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13 +#endif
14 +
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18 diff -urNp linux-2.6.39.4/arch/alpha/include/asm/pgtable.h linux-2.6.39.4/arch/alpha/include/asm/pgtable.h
19 --- linux-2.6.39.4/arch/alpha/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
20 +++ linux-2.6.39.4/arch/alpha/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
21 @@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25 +
26 +#ifdef CONFIG_PAX_PAGEEXEC
27 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30 +#else
31 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
32 +# define PAGE_COPY_NOEXEC PAGE_COPY
33 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
34 +#endif
35 +
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39 diff -urNp linux-2.6.39.4/arch/alpha/kernel/module.c linux-2.6.39.4/arch/alpha/kernel/module.c
40 --- linux-2.6.39.4/arch/alpha/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
41 +++ linux-2.6.39.4/arch/alpha/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
42 @@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46 - gp = (u64)me->module_core + me->core_size - 0x8000;
47 + gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51 diff -urNp linux-2.6.39.4/arch/alpha/kernel/osf_sys.c linux-2.6.39.4/arch/alpha/kernel/osf_sys.c
52 --- linux-2.6.39.4/arch/alpha/kernel/osf_sys.c 2011-08-05 21:11:51.000000000 -0400
53 +++ linux-2.6.39.4/arch/alpha/kernel/osf_sys.c 2011-08-05 19:44:33.000000000 -0400
54 @@ -1145,7 +1145,7 @@ arch_get_unmapped_area_1(unsigned long a
55 /* At this point: (!vma || addr < vma->vm_end). */
56 if (limit - len < addr)
57 return -ENOMEM;
58 - if (!vma || addr + len <= vma->vm_start)
59 + if (check_heap_stack_gap(vma, addr, len))
60 return addr;
61 addr = vma->vm_end;
62 vma = vma->vm_next;
63 @@ -1181,6 +1181,10 @@ arch_get_unmapped_area(struct file *filp
64 merely specific addresses, but regions of memory -- perhaps
65 this feature should be incorporated into all ports? */
66
67 +#ifdef CONFIG_PAX_RANDMMAP
68 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
69 +#endif
70 +
71 if (addr) {
72 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
73 if (addr != (unsigned long) -ENOMEM)
74 @@ -1188,8 +1192,8 @@ arch_get_unmapped_area(struct file *filp
75 }
76
77 /* Next, try allocating at TASK_UNMAPPED_BASE. */
78 - addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
79 - len, limit);
80 + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
81 +
82 if (addr != (unsigned long) -ENOMEM)
83 return addr;
84
85 diff -urNp linux-2.6.39.4/arch/alpha/mm/fault.c linux-2.6.39.4/arch/alpha/mm/fault.c
86 --- linux-2.6.39.4/arch/alpha/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
87 +++ linux-2.6.39.4/arch/alpha/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
88 @@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
89 __reload_thread(pcb);
90 }
91
92 +#ifdef CONFIG_PAX_PAGEEXEC
93 +/*
94 + * PaX: decide what to do with offenders (regs->pc = fault address)
95 + *
96 + * returns 1 when task should be killed
97 + * 2 when patched PLT trampoline was detected
98 + * 3 when unpatched PLT trampoline was detected
99 + */
100 +static int pax_handle_fetch_fault(struct pt_regs *regs)
101 +{
102 +
103 +#ifdef CONFIG_PAX_EMUPLT
104 + int err;
105 +
106 + do { /* PaX: patched PLT emulation #1 */
107 + unsigned int ldah, ldq, jmp;
108 +
109 + err = get_user(ldah, (unsigned int *)regs->pc);
110 + err |= get_user(ldq, (unsigned int *)(regs->pc+4));
111 + err |= get_user(jmp, (unsigned int *)(regs->pc+8));
112 +
113 + if (err)
114 + break;
115 +
116 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
117 + (ldq & 0xFFFF0000U) == 0xA77B0000U &&
118 + jmp == 0x6BFB0000U)
119 + {
120 + unsigned long r27, addr;
121 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
122 + unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
123 +
124 + addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
125 + err = get_user(r27, (unsigned long *)addr);
126 + if (err)
127 + break;
128 +
129 + regs->r27 = r27;
130 + regs->pc = r27;
131 + return 2;
132 + }
133 + } while (0);
134 +
135 + do { /* PaX: patched PLT emulation #2 */
136 + unsigned int ldah, lda, br;
137 +
138 + err = get_user(ldah, (unsigned int *)regs->pc);
139 + err |= get_user(lda, (unsigned int *)(regs->pc+4));
140 + err |= get_user(br, (unsigned int *)(regs->pc+8));
141 +
142 + if (err)
143 + break;
144 +
145 + if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
146 + (lda & 0xFFFF0000U) == 0xA77B0000U &&
147 + (br & 0xFFE00000U) == 0xC3E00000U)
148 + {
149 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
150 + unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
151 + unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
152 +
153 + regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
154 + regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
155 + return 2;
156 + }
157 + } while (0);
158 +
159 + do { /* PaX: unpatched PLT emulation */
160 + unsigned int br;
161 +
162 + err = get_user(br, (unsigned int *)regs->pc);
163 +
164 + if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
165 + unsigned int br2, ldq, nop, jmp;
166 + unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
167 +
168 + addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
169 + err = get_user(br2, (unsigned int *)addr);
170 + err |= get_user(ldq, (unsigned int *)(addr+4));
171 + err |= get_user(nop, (unsigned int *)(addr+8));
172 + err |= get_user(jmp, (unsigned int *)(addr+12));
173 + err |= get_user(resolver, (unsigned long *)(addr+16));
174 +
175 + if (err)
176 + break;
177 +
178 + if (br2 == 0xC3600000U &&
179 + ldq == 0xA77B000CU &&
180 + nop == 0x47FF041FU &&
181 + jmp == 0x6B7B0000U)
182 + {
183 + regs->r28 = regs->pc+4;
184 + regs->r27 = addr+16;
185 + regs->pc = resolver;
186 + return 3;
187 + }
188 + }
189 + } while (0);
190 +#endif
191 +
192 + return 1;
193 +}
194 +
195 +void pax_report_insns(void *pc, void *sp)
196 +{
197 + unsigned long i;
198 +
199 + printk(KERN_ERR "PAX: bytes at PC: ");
200 + for (i = 0; i < 5; i++) {
201 + unsigned int c;
202 + if (get_user(c, (unsigned int *)pc+i))
203 + printk(KERN_CONT "???????? ");
204 + else
205 + printk(KERN_CONT "%08x ", c);
206 + }
207 + printk("\n");
208 +}
209 +#endif
210
211 /*
212 * This routine handles page faults. It determines the address,
213 @@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
214 good_area:
215 si_code = SEGV_ACCERR;
216 if (cause < 0) {
217 - if (!(vma->vm_flags & VM_EXEC))
218 + if (!(vma->vm_flags & VM_EXEC)) {
219 +
220 +#ifdef CONFIG_PAX_PAGEEXEC
221 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
222 + goto bad_area;
223 +
224 + up_read(&mm->mmap_sem);
225 + switch (pax_handle_fetch_fault(regs)) {
226 +
227 +#ifdef CONFIG_PAX_EMUPLT
228 + case 2:
229 + case 3:
230 + return;
231 +#endif
232 +
233 + }
234 + pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
235 + do_group_exit(SIGKILL);
236 +#else
237 goto bad_area;
238 +#endif
239 +
240 + }
241 } else if (!cause) {
242 /* Allow reads even for write-only mappings */
243 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
244 diff -urNp linux-2.6.39.4/arch/arm/include/asm/elf.h linux-2.6.39.4/arch/arm/include/asm/elf.h
245 --- linux-2.6.39.4/arch/arm/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
246 +++ linux-2.6.39.4/arch/arm/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
247 @@ -115,7 +115,14 @@ int dump_task_regs(struct task_struct *t
248 the loader. We need to make sure that it is out of the way of the program
249 that it will "exec", and that there is sufficient room for the brk. */
250
251 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
252 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
253 +
254 +#ifdef CONFIG_PAX_ASLR
255 +#define PAX_ELF_ET_DYN_BASE 0x00008000UL
256 +
257 +#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
258 +#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
259 +#endif
260
261 /* When the program starts, a1 contains a pointer to a function to be
262 registered with atexit, as per the SVR4 ABI. A value of 0 means we
263 @@ -125,10 +132,6 @@ int dump_task_regs(struct task_struct *t
264 extern void elf_set_personality(const struct elf32_hdr *);
265 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
266
267 -struct mm_struct;
268 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
269 -#define arch_randomize_brk arch_randomize_brk
270 -
271 extern int vectors_user_mapping(void);
272 #define arch_setup_additional_pages(bprm, uses_interp) vectors_user_mapping()
273 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES
274 diff -urNp linux-2.6.39.4/arch/arm/include/asm/kmap_types.h linux-2.6.39.4/arch/arm/include/asm/kmap_types.h
275 --- linux-2.6.39.4/arch/arm/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
276 +++ linux-2.6.39.4/arch/arm/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
277 @@ -21,6 +21,7 @@ enum km_type {
278 KM_L1_CACHE,
279 KM_L2_CACHE,
280 KM_KDB,
281 + KM_CLEARPAGE,
282 KM_TYPE_NR
283 };
284
285 diff -urNp linux-2.6.39.4/arch/arm/include/asm/uaccess.h linux-2.6.39.4/arch/arm/include/asm/uaccess.h
286 --- linux-2.6.39.4/arch/arm/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
287 +++ linux-2.6.39.4/arch/arm/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
288 @@ -22,6 +22,8 @@
289 #define VERIFY_READ 0
290 #define VERIFY_WRITE 1
291
292 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
293 +
294 /*
295 * The exception table consists of pairs of addresses: the first is the
296 * address of an instruction that is allowed to fault, and the second is
297 @@ -387,8 +389,23 @@ do { \
298
299
300 #ifdef CONFIG_MMU
301 -extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
302 -extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
303 +extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
304 +extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
305 +
306 +static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
307 +{
308 + if (!__builtin_constant_p(n))
309 + check_object_size(to, n, false);
310 + return ___copy_from_user(to, from, n);
311 +}
312 +
313 +static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
314 +{
315 + if (!__builtin_constant_p(n))
316 + check_object_size(from, n, true);
317 + return ___copy_to_user(to, from, n);
318 +}
319 +
320 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
321 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
322 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
323 @@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
324
325 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
326 {
327 + if ((long)n < 0)
328 + return n;
329 +
330 if (access_ok(VERIFY_READ, from, n))
331 n = __copy_from_user(to, from, n);
332 else /* security hole - plug it */
333 @@ -412,6 +432,9 @@ static inline unsigned long __must_check
334
335 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
336 {
337 + if ((long)n < 0)
338 + return n;
339 +
340 if (access_ok(VERIFY_WRITE, to, n))
341 n = __copy_to_user(to, from, n);
342 return n;
343 diff -urNp linux-2.6.39.4/arch/arm/kernel/armksyms.c linux-2.6.39.4/arch/arm/kernel/armksyms.c
344 --- linux-2.6.39.4/arch/arm/kernel/armksyms.c 2011-05-19 00:06:34.000000000 -0400
345 +++ linux-2.6.39.4/arch/arm/kernel/armksyms.c 2011-08-05 19:44:33.000000000 -0400
346 @@ -98,8 +98,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
347 #ifdef CONFIG_MMU
348 EXPORT_SYMBOL(copy_page);
349
350 -EXPORT_SYMBOL(__copy_from_user);
351 -EXPORT_SYMBOL(__copy_to_user);
352 +EXPORT_SYMBOL(___copy_from_user);
353 +EXPORT_SYMBOL(___copy_to_user);
354 EXPORT_SYMBOL(__clear_user);
355
356 EXPORT_SYMBOL(__get_user_1);
357 diff -urNp linux-2.6.39.4/arch/arm/kernel/process.c linux-2.6.39.4/arch/arm/kernel/process.c
358 --- linux-2.6.39.4/arch/arm/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
359 +++ linux-2.6.39.4/arch/arm/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
360 @@ -28,7 +28,6 @@
361 #include <linux/tick.h>
362 #include <linux/utsname.h>
363 #include <linux/uaccess.h>
364 -#include <linux/random.h>
365 #include <linux/hw_breakpoint.h>
366
367 #include <asm/cacheflush.h>
368 @@ -479,12 +478,6 @@ unsigned long get_wchan(struct task_stru
369 return 0;
370 }
371
372 -unsigned long arch_randomize_brk(struct mm_struct *mm)
373 -{
374 - unsigned long range_end = mm->brk + 0x02000000;
375 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
376 -}
377 -
378 #ifdef CONFIG_MMU
379 /*
380 * The vectors page is always readable from user space for the
381 diff -urNp linux-2.6.39.4/arch/arm/kernel/traps.c linux-2.6.39.4/arch/arm/kernel/traps.c
382 --- linux-2.6.39.4/arch/arm/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
383 +++ linux-2.6.39.4/arch/arm/kernel/traps.c 2011-08-05 19:44:33.000000000 -0400
384 @@ -258,6 +258,8 @@ static int __die(const char *str, int er
385
386 static DEFINE_SPINLOCK(die_lock);
387
388 +extern void gr_handle_kernel_exploit(void);
389 +
390 /*
391 * This function is protected against re-entrancy.
392 */
393 @@ -285,6 +287,9 @@ void die(const char *str, struct pt_regs
394 panic("Fatal exception in interrupt");
395 if (panic_on_oops)
396 panic("Fatal exception");
397 +
398 + gr_handle_kernel_exploit();
399 +
400 if (ret != NOTIFY_STOP)
401 do_exit(SIGSEGV);
402 }
403 diff -urNp linux-2.6.39.4/arch/arm/lib/copy_from_user.S linux-2.6.39.4/arch/arm/lib/copy_from_user.S
404 --- linux-2.6.39.4/arch/arm/lib/copy_from_user.S 2011-05-19 00:06:34.000000000 -0400
405 +++ linux-2.6.39.4/arch/arm/lib/copy_from_user.S 2011-08-05 19:44:33.000000000 -0400
406 @@ -16,7 +16,7 @@
407 /*
408 * Prototype:
409 *
410 - * size_t __copy_from_user(void *to, const void *from, size_t n)
411 + * size_t ___copy_from_user(void *to, const void *from, size_t n)
412 *
413 * Purpose:
414 *
415 @@ -84,11 +84,11 @@
416
417 .text
418
419 -ENTRY(__copy_from_user)
420 +ENTRY(___copy_from_user)
421
422 #include "copy_template.S"
423
424 -ENDPROC(__copy_from_user)
425 +ENDPROC(___copy_from_user)
426
427 .pushsection .fixup,"ax"
428 .align 0
429 diff -urNp linux-2.6.39.4/arch/arm/lib/copy_to_user.S linux-2.6.39.4/arch/arm/lib/copy_to_user.S
430 --- linux-2.6.39.4/arch/arm/lib/copy_to_user.S 2011-05-19 00:06:34.000000000 -0400
431 +++ linux-2.6.39.4/arch/arm/lib/copy_to_user.S 2011-08-05 19:44:33.000000000 -0400
432 @@ -16,7 +16,7 @@
433 /*
434 * Prototype:
435 *
436 - * size_t __copy_to_user(void *to, const void *from, size_t n)
437 + * size_t ___copy_to_user(void *to, const void *from, size_t n)
438 *
439 * Purpose:
440 *
441 @@ -88,11 +88,11 @@
442 .text
443
444 ENTRY(__copy_to_user_std)
445 -WEAK(__copy_to_user)
446 +WEAK(___copy_to_user)
447
448 #include "copy_template.S"
449
450 -ENDPROC(__copy_to_user)
451 +ENDPROC(___copy_to_user)
452 ENDPROC(__copy_to_user_std)
453
454 .pushsection .fixup,"ax"
455 diff -urNp linux-2.6.39.4/arch/arm/lib/uaccess.S linux-2.6.39.4/arch/arm/lib/uaccess.S
456 --- linux-2.6.39.4/arch/arm/lib/uaccess.S 2011-05-19 00:06:34.000000000 -0400
457 +++ linux-2.6.39.4/arch/arm/lib/uaccess.S 2011-08-05 19:44:33.000000000 -0400
458 @@ -20,7 +20,7 @@
459
460 #define PAGE_SHIFT 12
461
462 -/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
463 +/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
464 * Purpose : copy a block to user memory from kernel memory
465 * Params : to - user memory
466 * : from - kernel memory
467 @@ -40,7 +40,7 @@ USER( T(strgtb) r3, [r0], #1) @ May f
468 sub r2, r2, ip
469 b .Lc2u_dest_aligned
470
471 -ENTRY(__copy_to_user)
472 +ENTRY(___copy_to_user)
473 stmfd sp!, {r2, r4 - r7, lr}
474 cmp r2, #4
475 blt .Lc2u_not_enough
476 @@ -278,14 +278,14 @@ USER( T(strgeb) r3, [r0], #1) @ May f
477 ldrgtb r3, [r1], #0
478 USER( T(strgtb) r3, [r0], #1) @ May fault
479 b .Lc2u_finished
480 -ENDPROC(__copy_to_user)
481 +ENDPROC(___copy_to_user)
482
483 .pushsection .fixup,"ax"
484 .align 0
485 9001: ldmfd sp!, {r0, r4 - r7, pc}
486 .popsection
487
488 -/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
489 +/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
490 * Purpose : copy a block from user memory to kernel memory
491 * Params : to - kernel memory
492 * : from - user memory
493 @@ -304,7 +304,7 @@ USER( T(ldrgtb) r3, [r1], #1) @ May f
494 sub r2, r2, ip
495 b .Lcfu_dest_aligned
496
497 -ENTRY(__copy_from_user)
498 +ENTRY(___copy_from_user)
499 stmfd sp!, {r0, r2, r4 - r7, lr}
500 cmp r2, #4
501 blt .Lcfu_not_enough
502 @@ -544,7 +544,7 @@ USER( T(ldrgeb) r3, [r1], #1) @ May f
503 USER( T(ldrgtb) r3, [r1], #1) @ May fault
504 strgtb r3, [r0], #1
505 b .Lcfu_finished
506 -ENDPROC(__copy_from_user)
507 +ENDPROC(___copy_from_user)
508
509 .pushsection .fixup,"ax"
510 .align 0
511 diff -urNp linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c
512 --- linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c 2011-05-19 00:06:34.000000000 -0400
513 +++ linux-2.6.39.4/arch/arm/lib/uaccess_with_memcpy.c 2011-08-05 19:44:33.000000000 -0400
514 @@ -103,7 +103,7 @@ out:
515 }
516
517 unsigned long
518 -__copy_to_user(void __user *to, const void *from, unsigned long n)
519 +___copy_to_user(void __user *to, const void *from, unsigned long n)
520 {
521 /*
522 * This test is stubbed out of the main function above to keep
523 diff -urNp linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c
524 --- linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c 2011-05-19 00:06:34.000000000 -0400
525 +++ linux-2.6.39.4/arch/arm/mach-ux500/mbox-db5500.c 2011-08-05 19:44:33.000000000 -0400
526 @@ -168,7 +168,7 @@ static ssize_t mbox_read_fifo(struct dev
527 return sprintf(buf, "0x%X\n", mbox_value);
528 }
529
530 -static DEVICE_ATTR(fifo, S_IWUGO | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
531 +static DEVICE_ATTR(fifo, S_IWUSR | S_IRUGO, mbox_read_fifo, mbox_write_fifo);
532
533 static int mbox_show(struct seq_file *s, void *data)
534 {
535 diff -urNp linux-2.6.39.4/arch/arm/mm/fault.c linux-2.6.39.4/arch/arm/mm/fault.c
536 --- linux-2.6.39.4/arch/arm/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
537 +++ linux-2.6.39.4/arch/arm/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
538 @@ -182,6 +182,13 @@ __do_user_fault(struct task_struct *tsk,
539 }
540 #endif
541
542 +#ifdef CONFIG_PAX_PAGEEXEC
543 + if (fsr & FSR_LNX_PF) {
544 + pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
545 + do_group_exit(SIGKILL);
546 + }
547 +#endif
548 +
549 tsk->thread.address = addr;
550 tsk->thread.error_code = fsr;
551 tsk->thread.trap_no = 14;
552 @@ -379,6 +386,33 @@ do_page_fault(unsigned long addr, unsign
553 }
554 #endif /* CONFIG_MMU */
555
556 +#ifdef CONFIG_PAX_PAGEEXEC
557 +void pax_report_insns(void *pc, void *sp)
558 +{
559 + long i;
560 +
561 + printk(KERN_ERR "PAX: bytes at PC: ");
562 + for (i = 0; i < 20; i++) {
563 + unsigned char c;
564 + if (get_user(c, (__force unsigned char __user *)pc+i))
565 + printk(KERN_CONT "?? ");
566 + else
567 + printk(KERN_CONT "%02x ", c);
568 + }
569 + printk("\n");
570 +
571 + printk(KERN_ERR "PAX: bytes at SP-4: ");
572 + for (i = -1; i < 20; i++) {
573 + unsigned long c;
574 + if (get_user(c, (__force unsigned long __user *)sp+i))
575 + printk(KERN_CONT "???????? ");
576 + else
577 + printk(KERN_CONT "%08lx ", c);
578 + }
579 + printk("\n");
580 +}
581 +#endif
582 +
583 /*
584 * First Level Translation Fault Handler
585 *
586 diff -urNp linux-2.6.39.4/arch/arm/mm/mmap.c linux-2.6.39.4/arch/arm/mm/mmap.c
587 --- linux-2.6.39.4/arch/arm/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
588 +++ linux-2.6.39.4/arch/arm/mm/mmap.c 2011-08-05 19:44:33.000000000 -0400
589 @@ -65,6 +65,10 @@ arch_get_unmapped_area(struct file *filp
590 if (len > TASK_SIZE)
591 return -ENOMEM;
592
593 +#ifdef CONFIG_PAX_RANDMMAP
594 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
595 +#endif
596 +
597 if (addr) {
598 if (do_align)
599 addr = COLOUR_ALIGN(addr, pgoff);
600 @@ -72,15 +76,14 @@ arch_get_unmapped_area(struct file *filp
601 addr = PAGE_ALIGN(addr);
602
603 vma = find_vma(mm, addr);
604 - if (TASK_SIZE - len >= addr &&
605 - (!vma || addr + len <= vma->vm_start))
606 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
607 return addr;
608 }
609 if (len > mm->cached_hole_size) {
610 - start_addr = addr = mm->free_area_cache;
611 + start_addr = addr = mm->free_area_cache;
612 } else {
613 - start_addr = addr = TASK_UNMAPPED_BASE;
614 - mm->cached_hole_size = 0;
615 + start_addr = addr = mm->mmap_base;
616 + mm->cached_hole_size = 0;
617 }
618 /* 8 bits of randomness in 20 address space bits */
619 if ((current->flags & PF_RANDOMIZE) &&
620 @@ -100,14 +103,14 @@ full_search:
621 * Start a new search - just in case we missed
622 * some holes.
623 */
624 - if (start_addr != TASK_UNMAPPED_BASE) {
625 - start_addr = addr = TASK_UNMAPPED_BASE;
626 + if (start_addr != mm->mmap_base) {
627 + start_addr = addr = mm->mmap_base;
628 mm->cached_hole_size = 0;
629 goto full_search;
630 }
631 return -ENOMEM;
632 }
633 - if (!vma || addr + len <= vma->vm_start) {
634 + if (check_heap_stack_gap(vma, addr, len)) {
635 /*
636 * Remember the place where we stopped the search:
637 */
638 diff -urNp linux-2.6.39.4/arch/avr32/include/asm/elf.h linux-2.6.39.4/arch/avr32/include/asm/elf.h
639 --- linux-2.6.39.4/arch/avr32/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
640 +++ linux-2.6.39.4/arch/avr32/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
641 @@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg
642 the loader. We need to make sure that it is out of the way of the program
643 that it will "exec", and that there is sufficient room for the brk. */
644
645 -#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
646 +#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
647
648 +#ifdef CONFIG_PAX_ASLR
649 +#define PAX_ELF_ET_DYN_BASE 0x00001000UL
650 +
651 +#define PAX_DELTA_MMAP_LEN 15
652 +#define PAX_DELTA_STACK_LEN 15
653 +#endif
654
655 /* This yields a mask that user programs can use to figure out what
656 instruction set this CPU supports. This could be done in user space,
657 diff -urNp linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h
658 --- linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
659 +++ linux-2.6.39.4/arch/avr32/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
660 @@ -22,7 +22,8 @@ D(10) KM_IRQ0,
661 D(11) KM_IRQ1,
662 D(12) KM_SOFTIRQ0,
663 D(13) KM_SOFTIRQ1,
664 -D(14) KM_TYPE_NR
665 +D(14) KM_CLEARPAGE,
666 +D(15) KM_TYPE_NR
667 };
668
669 #undef D
670 diff -urNp linux-2.6.39.4/arch/avr32/mm/fault.c linux-2.6.39.4/arch/avr32/mm/fault.c
671 --- linux-2.6.39.4/arch/avr32/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
672 +++ linux-2.6.39.4/arch/avr32/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
673 @@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
674
675 int exception_trace = 1;
676
677 +#ifdef CONFIG_PAX_PAGEEXEC
678 +void pax_report_insns(void *pc, void *sp)
679 +{
680 + unsigned long i;
681 +
682 + printk(KERN_ERR "PAX: bytes at PC: ");
683 + for (i = 0; i < 20; i++) {
684 + unsigned char c;
685 + if (get_user(c, (unsigned char *)pc+i))
686 + printk(KERN_CONT "???????? ");
687 + else
688 + printk(KERN_CONT "%02x ", c);
689 + }
690 + printk("\n");
691 +}
692 +#endif
693 +
694 /*
695 * This routine handles page faults. It determines the address and the
696 * problem, and then passes it off to one of the appropriate routines.
697 @@ -156,6 +173,16 @@ bad_area:
698 up_read(&mm->mmap_sem);
699
700 if (user_mode(regs)) {
701 +
702 +#ifdef CONFIG_PAX_PAGEEXEC
703 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
704 + if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
705 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
706 + do_group_exit(SIGKILL);
707 + }
708 + }
709 +#endif
710 +
711 if (exception_trace && printk_ratelimit())
712 printk("%s%s[%d]: segfault at %08lx pc %08lx "
713 "sp %08lx ecr %lu\n",
714 diff -urNp linux-2.6.39.4/arch/frv/include/asm/kmap_types.h linux-2.6.39.4/arch/frv/include/asm/kmap_types.h
715 --- linux-2.6.39.4/arch/frv/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
716 +++ linux-2.6.39.4/arch/frv/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
717 @@ -23,6 +23,7 @@ enum km_type {
718 KM_IRQ1,
719 KM_SOFTIRQ0,
720 KM_SOFTIRQ1,
721 + KM_CLEARPAGE,
722 KM_TYPE_NR
723 };
724
725 diff -urNp linux-2.6.39.4/arch/frv/mm/elf-fdpic.c linux-2.6.39.4/arch/frv/mm/elf-fdpic.c
726 --- linux-2.6.39.4/arch/frv/mm/elf-fdpic.c 2011-05-19 00:06:34.000000000 -0400
727 +++ linux-2.6.39.4/arch/frv/mm/elf-fdpic.c 2011-08-05 19:44:33.000000000 -0400
728 @@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
729 if (addr) {
730 addr = PAGE_ALIGN(addr);
731 vma = find_vma(current->mm, addr);
732 - if (TASK_SIZE - len >= addr &&
733 - (!vma || addr + len <= vma->vm_start))
734 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
735 goto success;
736 }
737
738 @@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
739 for (; vma; vma = vma->vm_next) {
740 if (addr > limit)
741 break;
742 - if (addr + len <= vma->vm_start)
743 + if (check_heap_stack_gap(vma, addr, len))
744 goto success;
745 addr = vma->vm_end;
746 }
747 @@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
748 for (; vma; vma = vma->vm_next) {
749 if (addr > limit)
750 break;
751 - if (addr + len <= vma->vm_start)
752 + if (check_heap_stack_gap(vma, addr, len))
753 goto success;
754 addr = vma->vm_end;
755 }
756 diff -urNp linux-2.6.39.4/arch/ia64/include/asm/elf.h linux-2.6.39.4/arch/ia64/include/asm/elf.h
757 --- linux-2.6.39.4/arch/ia64/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
758 +++ linux-2.6.39.4/arch/ia64/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
759 @@ -42,6 +42,13 @@
760 */
761 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
762
763 +#ifdef CONFIG_PAX_ASLR
764 +#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
765 +
766 +#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
767 +#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
768 +#endif
769 +
770 #define PT_IA_64_UNWIND 0x70000001
771
772 /* IA-64 relocations: */
773 diff -urNp linux-2.6.39.4/arch/ia64/include/asm/pgtable.h linux-2.6.39.4/arch/ia64/include/asm/pgtable.h
774 --- linux-2.6.39.4/arch/ia64/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
775 +++ linux-2.6.39.4/arch/ia64/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
776 @@ -12,7 +12,7 @@
777 * David Mosberger-Tang <davidm@hpl.hp.com>
778 */
779
780 -
781 +#include <linux/const.h>
782 #include <asm/mman.h>
783 #include <asm/page.h>
784 #include <asm/processor.h>
785 @@ -143,6 +143,17 @@
786 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
787 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
788 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
789 +
790 +#ifdef CONFIG_PAX_PAGEEXEC
791 +# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
792 +# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
793 +# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
794 +#else
795 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
796 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
797 +# define PAGE_COPY_NOEXEC PAGE_COPY
798 +#endif
799 +
800 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
801 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
802 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
803 diff -urNp linux-2.6.39.4/arch/ia64/include/asm/spinlock.h linux-2.6.39.4/arch/ia64/include/asm/spinlock.h
804 --- linux-2.6.39.4/arch/ia64/include/asm/spinlock.h 2011-05-19 00:06:34.000000000 -0400
805 +++ linux-2.6.39.4/arch/ia64/include/asm/spinlock.h 2011-08-05 19:44:33.000000000 -0400
806 @@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
807 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
808
809 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
810 - ACCESS_ONCE(*p) = (tmp + 2) & ~1;
811 + ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
812 }
813
814 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
815 diff -urNp linux-2.6.39.4/arch/ia64/include/asm/uaccess.h linux-2.6.39.4/arch/ia64/include/asm/uaccess.h
816 --- linux-2.6.39.4/arch/ia64/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
817 +++ linux-2.6.39.4/arch/ia64/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
818 @@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
819 const void *__cu_from = (from); \
820 long __cu_len = (n); \
821 \
822 - if (__access_ok(__cu_to, __cu_len, get_fs())) \
823 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
825 __cu_len; \
826 })
827 @@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
828 long __cu_len = (n); \
829 \
830 __chk_user_ptr(__cu_from); \
831 - if (__access_ok(__cu_from, __cu_len, get_fs())) \
832 + if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
833 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
834 __cu_len; \
835 })
836 diff -urNp linux-2.6.39.4/arch/ia64/kernel/module.c linux-2.6.39.4/arch/ia64/kernel/module.c
837 --- linux-2.6.39.4/arch/ia64/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
838 +++ linux-2.6.39.4/arch/ia64/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
839 @@ -315,8 +315,7 @@ module_alloc (unsigned long size)
840 void
841 module_free (struct module *mod, void *module_region)
842 {
843 - if (mod && mod->arch.init_unw_table &&
844 - module_region == mod->module_init) {
845 + if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
846 unw_remove_unwind_table(mod->arch.init_unw_table);
847 mod->arch.init_unw_table = NULL;
848 }
849 @@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
850 }
851
852 static inline int
853 +in_init_rx (const struct module *mod, uint64_t addr)
854 +{
855 + return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
856 +}
857 +
858 +static inline int
859 +in_init_rw (const struct module *mod, uint64_t addr)
860 +{
861 + return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
862 +}
863 +
864 +static inline int
865 in_init (const struct module *mod, uint64_t addr)
866 {
867 - return addr - (uint64_t) mod->module_init < mod->init_size;
868 + return in_init_rx(mod, addr) || in_init_rw(mod, addr);
869 +}
870 +
871 +static inline int
872 +in_core_rx (const struct module *mod, uint64_t addr)
873 +{
874 + return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
875 +}
876 +
877 +static inline int
878 +in_core_rw (const struct module *mod, uint64_t addr)
879 +{
880 + return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
881 }
882
883 static inline int
884 in_core (const struct module *mod, uint64_t addr)
885 {
886 - return addr - (uint64_t) mod->module_core < mod->core_size;
887 + return in_core_rx(mod, addr) || in_core_rw(mod, addr);
888 }
889
890 static inline int
891 @@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
892 break;
893
894 case RV_BDREL:
895 - val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
896 + if (in_init_rx(mod, val))
897 + val -= (uint64_t) mod->module_init_rx;
898 + else if (in_init_rw(mod, val))
899 + val -= (uint64_t) mod->module_init_rw;
900 + else if (in_core_rx(mod, val))
901 + val -= (uint64_t) mod->module_core_rx;
902 + else if (in_core_rw(mod, val))
903 + val -= (uint64_t) mod->module_core_rw;
904 break;
905
906 case RV_LTV:
907 @@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
908 * addresses have been selected...
909 */
910 uint64_t gp;
911 - if (mod->core_size > MAX_LTOFF)
912 + if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
913 /*
914 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
915 * at the end of the module.
916 */
917 - gp = mod->core_size - MAX_LTOFF / 2;
918 + gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
919 else
920 - gp = mod->core_size / 2;
921 - gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
922 + gp = (mod->core_size_rx + mod->core_size_rw) / 2;
923 + gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
924 mod->arch.gp = gp;
925 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
926 }
927 diff -urNp linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c
928 --- linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c 2011-05-19 00:06:34.000000000 -0400
929 +++ linux-2.6.39.4/arch/ia64/kernel/sys_ia64.c 2011-08-05 19:44:33.000000000 -0400
930 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
931 if (REGION_NUMBER(addr) == RGN_HPAGE)
932 addr = 0;
933 #endif
934 +
935 +#ifdef CONFIG_PAX_RANDMMAP
936 + if (mm->pax_flags & MF_PAX_RANDMMAP)
937 + addr = mm->free_area_cache;
938 + else
939 +#endif
940 +
941 if (!addr)
942 addr = mm->free_area_cache;
943
944 @@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
945 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
946 /* At this point: (!vma || addr < vma->vm_end). */
947 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
948 - if (start_addr != TASK_UNMAPPED_BASE) {
949 + if (start_addr != mm->mmap_base) {
950 /* Start a new search --- just in case we missed some holes. */
951 - addr = TASK_UNMAPPED_BASE;
952 + addr = mm->mmap_base;
953 goto full_search;
954 }
955 return -ENOMEM;
956 }
957 - if (!vma || addr + len <= vma->vm_start) {
958 + if (check_heap_stack_gap(vma, addr, len)) {
959 /* Remember the address where we stopped this search: */
960 mm->free_area_cache = addr + len;
961 return addr;
962 diff -urNp linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S
963 --- linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S 2011-05-19 00:06:34.000000000 -0400
964 +++ linux-2.6.39.4/arch/ia64/kernel/vmlinux.lds.S 2011-08-05 19:44:33.000000000 -0400
965 @@ -199,7 +199,7 @@ SECTIONS {
966 /* Per-cpu data: */
967 . = ALIGN(PERCPU_PAGE_SIZE);
968 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
969 - __phys_per_cpu_start = __per_cpu_load;
970 + __phys_per_cpu_start = per_cpu_load;
971 /*
972 * ensure percpu data fits
973 * into percpu page size
974 diff -urNp linux-2.6.39.4/arch/ia64/mm/fault.c linux-2.6.39.4/arch/ia64/mm/fault.c
975 --- linux-2.6.39.4/arch/ia64/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
976 +++ linux-2.6.39.4/arch/ia64/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
977 @@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
978 return pte_present(pte);
979 }
980
981 +#ifdef CONFIG_PAX_PAGEEXEC
982 +void pax_report_insns(void *pc, void *sp)
983 +{
984 + unsigned long i;
985 +
986 + printk(KERN_ERR "PAX: bytes at PC: ");
987 + for (i = 0; i < 8; i++) {
988 + unsigned int c;
989 + if (get_user(c, (unsigned int *)pc+i))
990 + printk(KERN_CONT "???????? ");
991 + else
992 + printk(KERN_CONT "%08x ", c);
993 + }
994 + printk("\n");
995 +}
996 +#endif
997 +
998 void __kprobes
999 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1000 {
1001 @@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1002 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1003 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1004
1005 - if ((vma->vm_flags & mask) != mask)
1006 + if ((vma->vm_flags & mask) != mask) {
1007 +
1008 +#ifdef CONFIG_PAX_PAGEEXEC
1009 + if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1010 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1011 + goto bad_area;
1012 +
1013 + up_read(&mm->mmap_sem);
1014 + pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1015 + do_group_exit(SIGKILL);
1016 + }
1017 +#endif
1018 +
1019 goto bad_area;
1020
1021 + }
1022 +
1023 /*
1024 * If for any reason at all we couldn't handle the fault, make
1025 * sure we exit gracefully rather than endlessly redo the
1026 diff -urNp linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c
1027 --- linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c 2011-05-19 00:06:34.000000000 -0400
1028 +++ linux-2.6.39.4/arch/ia64/mm/hugetlbpage.c 2011-08-05 19:44:33.000000000 -0400
1029 @@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area(
1030 /* At this point: (!vmm || addr < vmm->vm_end). */
1031 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1032 return -ENOMEM;
1033 - if (!vmm || (addr + len) <= vmm->vm_start)
1034 + if (check_heap_stack_gap(vmm, addr, len))
1035 return addr;
1036 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1037 }
1038 diff -urNp linux-2.6.39.4/arch/ia64/mm/init.c linux-2.6.39.4/arch/ia64/mm/init.c
1039 --- linux-2.6.39.4/arch/ia64/mm/init.c 2011-05-19 00:06:34.000000000 -0400
1040 +++ linux-2.6.39.4/arch/ia64/mm/init.c 2011-08-05 19:44:33.000000000 -0400
1041 @@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1042 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1043 vma->vm_end = vma->vm_start + PAGE_SIZE;
1044 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1045 +
1046 +#ifdef CONFIG_PAX_PAGEEXEC
1047 + if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1048 + vma->vm_flags &= ~VM_EXEC;
1049 +
1050 +#ifdef CONFIG_PAX_MPROTECT
1051 + if (current->mm->pax_flags & MF_PAX_MPROTECT)
1052 + vma->vm_flags &= ~VM_MAYEXEC;
1053 +#endif
1054 +
1055 + }
1056 +#endif
1057 +
1058 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1059 down_write(&current->mm->mmap_sem);
1060 if (insert_vm_struct(current->mm, vma)) {
1061 diff -urNp linux-2.6.39.4/arch/m32r/lib/usercopy.c linux-2.6.39.4/arch/m32r/lib/usercopy.c
1062 --- linux-2.6.39.4/arch/m32r/lib/usercopy.c 2011-05-19 00:06:34.000000000 -0400
1063 +++ linux-2.6.39.4/arch/m32r/lib/usercopy.c 2011-08-05 19:44:33.000000000 -0400
1064 @@ -14,6 +14,9 @@
1065 unsigned long
1066 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1067 {
1068 + if ((long)n < 0)
1069 + return n;
1070 +
1071 prefetch(from);
1072 if (access_ok(VERIFY_WRITE, to, n))
1073 __copy_user(to,from,n);
1074 @@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1075 unsigned long
1076 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1077 {
1078 + if ((long)n < 0)
1079 + return n;
1080 +
1081 prefetchw(to);
1082 if (access_ok(VERIFY_READ, from, n))
1083 __copy_user_zeroing(to,from,n);
1084 diff -urNp linux-2.6.39.4/arch/mips/include/asm/elf.h linux-2.6.39.4/arch/mips/include/asm/elf.h
1085 --- linux-2.6.39.4/arch/mips/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
1086 +++ linux-2.6.39.4/arch/mips/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
1087 @@ -372,13 +372,16 @@ extern const char *__elf_platform;
1088 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1089 #endif
1090
1091 +#ifdef CONFIG_PAX_ASLR
1092 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1093 +
1094 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1095 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1096 +#endif
1097 +
1098 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
1099 struct linux_binprm;
1100 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
1101 int uses_interp);
1102
1103 -struct mm_struct;
1104 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1105 -#define arch_randomize_brk arch_randomize_brk
1106 -
1107 #endif /* _ASM_ELF_H */
1108 diff -urNp linux-2.6.39.4/arch/mips/include/asm/page.h linux-2.6.39.4/arch/mips/include/asm/page.h
1109 --- linux-2.6.39.4/arch/mips/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400
1110 +++ linux-2.6.39.4/arch/mips/include/asm/page.h 2011-08-05 19:44:33.000000000 -0400
1111 @@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1112 #ifdef CONFIG_CPU_MIPS32
1113 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1114 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1115 - #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1116 + #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1117 #else
1118 typedef struct { unsigned long long pte; } pte_t;
1119 #define pte_val(x) ((x).pte)
1120 diff -urNp linux-2.6.39.4/arch/mips/include/asm/system.h linux-2.6.39.4/arch/mips/include/asm/system.h
1121 --- linux-2.6.39.4/arch/mips/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
1122 +++ linux-2.6.39.4/arch/mips/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
1123 @@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1124 */
1125 #define __ARCH_WANT_UNLOCKED_CTXSW
1126
1127 -extern unsigned long arch_align_stack(unsigned long sp);
1128 +#define arch_align_stack(x) ((x) & ~0xfUL)
1129
1130 #endif /* _ASM_SYSTEM_H */
1131 diff -urNp linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c
1132 --- linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c 2011-05-19 00:06:34.000000000 -0400
1133 +++ linux-2.6.39.4/arch/mips/kernel/binfmt_elfn32.c 2011-08-05 19:44:33.000000000 -0400
1134 @@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1135 #undef ELF_ET_DYN_BASE
1136 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1137
1138 +#ifdef CONFIG_PAX_ASLR
1139 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1140 +
1141 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1142 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1143 +#endif
1144 +
1145 #include <asm/processor.h>
1146 #include <linux/module.h>
1147 #include <linux/elfcore.h>
1148 diff -urNp linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c
1149 --- linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c 2011-05-19 00:06:34.000000000 -0400
1150 +++ linux-2.6.39.4/arch/mips/kernel/binfmt_elfo32.c 2011-08-05 19:44:33.000000000 -0400
1151 @@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1152 #undef ELF_ET_DYN_BASE
1153 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1154
1155 +#ifdef CONFIG_PAX_ASLR
1156 +#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
1157 +
1158 +#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1159 +#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1160 +#endif
1161 +
1162 #include <asm/processor.h>
1163
1164 /*
1165 diff -urNp linux-2.6.39.4/arch/mips/kernel/process.c linux-2.6.39.4/arch/mips/kernel/process.c
1166 --- linux-2.6.39.4/arch/mips/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
1167 +++ linux-2.6.39.4/arch/mips/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
1168 @@ -473,15 +473,3 @@ unsigned long get_wchan(struct task_stru
1169 out:
1170 return pc;
1171 }
1172 -
1173 -/*
1174 - * Don't forget that the stack pointer must be aligned on a 8 bytes
1175 - * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1176 - */
1177 -unsigned long arch_align_stack(unsigned long sp)
1178 -{
1179 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1180 - sp -= get_random_int() & ~PAGE_MASK;
1181 -
1182 - return sp & ALMASK;
1183 -}
1184 diff -urNp linux-2.6.39.4/arch/mips/kernel/syscall.c linux-2.6.39.4/arch/mips/kernel/syscall.c
1185 --- linux-2.6.39.4/arch/mips/kernel/syscall.c 2011-05-19 00:06:34.000000000 -0400
1186 +++ linux-2.6.39.4/arch/mips/kernel/syscall.c 2011-08-05 19:44:33.000000000 -0400
1187 @@ -108,14 +108,18 @@ unsigned long arch_get_unmapped_area(str
1188 do_color_align = 0;
1189 if (filp || (flags & MAP_SHARED))
1190 do_color_align = 1;
1191 +
1192 +#ifdef CONFIG_PAX_RANDMMAP
1193 + if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1194 +#endif
1195 +
1196 if (addr) {
1197 if (do_color_align)
1198 addr = COLOUR_ALIGN(addr, pgoff);
1199 else
1200 addr = PAGE_ALIGN(addr);
1201 vmm = find_vma(current->mm, addr);
1202 - if (task_size - len >= addr &&
1203 - (!vmm || addr + len <= vmm->vm_start))
1204 + if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1205 return addr;
1206 }
1207 addr = current->mm->mmap_base;
1208 @@ -128,7 +132,7 @@ unsigned long arch_get_unmapped_area(str
1209 /* At this point: (!vmm || addr < vmm->vm_end). */
1210 if (task_size - len < addr)
1211 return -ENOMEM;
1212 - if (!vmm || addr + len <= vmm->vm_start)
1213 + if (check_heap_stack_gap(vmm, addr, len))
1214 return addr;
1215 addr = vmm->vm_end;
1216 if (do_color_align)
1217 @@ -154,33 +158,6 @@ void arch_pick_mmap_layout(struct mm_str
1218 mm->unmap_area = arch_unmap_area;
1219 }
1220
1221 -static inline unsigned long brk_rnd(void)
1222 -{
1223 - unsigned long rnd = get_random_int();
1224 -
1225 - rnd = rnd << PAGE_SHIFT;
1226 - /* 8MB for 32bit, 256MB for 64bit */
1227 - if (TASK_IS_32BIT_ADDR)
1228 - rnd = rnd & 0x7ffffful;
1229 - else
1230 - rnd = rnd & 0xffffffful;
1231 -
1232 - return rnd;
1233 -}
1234 -
1235 -unsigned long arch_randomize_brk(struct mm_struct *mm)
1236 -{
1237 - unsigned long base = mm->brk;
1238 - unsigned long ret;
1239 -
1240 - ret = PAGE_ALIGN(base + brk_rnd());
1241 -
1242 - if (ret < mm->brk)
1243 - return mm->brk;
1244 -
1245 - return ret;
1246 -}
1247 -
1248 SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
1249 unsigned long, prot, unsigned long, flags, unsigned long,
1250 fd, off_t, offset)
1251 diff -urNp linux-2.6.39.4/arch/mips/mm/fault.c linux-2.6.39.4/arch/mips/mm/fault.c
1252 --- linux-2.6.39.4/arch/mips/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
1253 +++ linux-2.6.39.4/arch/mips/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
1254 @@ -28,6 +28,23 @@
1255 #include <asm/highmem.h> /* For VMALLOC_END */
1256 #include <linux/kdebug.h>
1257
1258 +#ifdef CONFIG_PAX_PAGEEXEC
1259 +void pax_report_insns(void *pc, void *sp)
1260 +{
1261 + unsigned long i;
1262 +
1263 + printk(KERN_ERR "PAX: bytes at PC: ");
1264 + for (i = 0; i < 5; i++) {
1265 + unsigned int c;
1266 + if (get_user(c, (unsigned int *)pc+i))
1267 + printk(KERN_CONT "???????? ");
1268 + else
1269 + printk(KERN_CONT "%08x ", c);
1270 + }
1271 + printk("\n");
1272 +}
1273 +#endif
1274 +
1275 /*
1276 * This routine handles page faults. It determines the address,
1277 * and the problem, and then passes it off to one of the appropriate
1278 diff -urNp linux-2.6.39.4/arch/parisc/include/asm/elf.h linux-2.6.39.4/arch/parisc/include/asm/elf.h
1279 --- linux-2.6.39.4/arch/parisc/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
1280 +++ linux-2.6.39.4/arch/parisc/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
1281 @@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration..
1282
1283 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1284
1285 +#ifdef CONFIG_PAX_ASLR
1286 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
1287 +
1288 +#define PAX_DELTA_MMAP_LEN 16
1289 +#define PAX_DELTA_STACK_LEN 16
1290 +#endif
1291 +
1292 /* This yields a mask that user programs can use to figure out what
1293 instruction set this CPU supports. This could be done in user space,
1294 but it's not easy, and we've already done it here. */
1295 diff -urNp linux-2.6.39.4/arch/parisc/include/asm/pgtable.h linux-2.6.39.4/arch/parisc/include/asm/pgtable.h
1296 --- linux-2.6.39.4/arch/parisc/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
1297 +++ linux-2.6.39.4/arch/parisc/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
1298 @@ -207,6 +207,17 @@ struct vm_area_struct;
1299 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1300 #define PAGE_COPY PAGE_EXECREAD
1301 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1302 +
1303 +#ifdef CONFIG_PAX_PAGEEXEC
1304 +# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1305 +# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1306 +# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1307 +#else
1308 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
1309 +# define PAGE_COPY_NOEXEC PAGE_COPY
1310 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
1311 +#endif
1312 +
1313 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1314 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1315 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1316 diff -urNp linux-2.6.39.4/arch/parisc/kernel/module.c linux-2.6.39.4/arch/parisc/kernel/module.c
1317 --- linux-2.6.39.4/arch/parisc/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
1318 +++ linux-2.6.39.4/arch/parisc/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
1319 @@ -96,16 +96,38 @@
1320
1321 /* three functions to determine where in the module core
1322 * or init pieces the location is */
1323 +static inline int in_init_rx(struct module *me, void *loc)
1324 +{
1325 + return (loc >= me->module_init_rx &&
1326 + loc < (me->module_init_rx + me->init_size_rx));
1327 +}
1328 +
1329 +static inline int in_init_rw(struct module *me, void *loc)
1330 +{
1331 + return (loc >= me->module_init_rw &&
1332 + loc < (me->module_init_rw + me->init_size_rw));
1333 +}
1334 +
1335 static inline int in_init(struct module *me, void *loc)
1336 {
1337 - return (loc >= me->module_init &&
1338 - loc <= (me->module_init + me->init_size));
1339 + return in_init_rx(me, loc) || in_init_rw(me, loc);
1340 +}
1341 +
1342 +static inline int in_core_rx(struct module *me, void *loc)
1343 +{
1344 + return (loc >= me->module_core_rx &&
1345 + loc < (me->module_core_rx + me->core_size_rx));
1346 +}
1347 +
1348 +static inline int in_core_rw(struct module *me, void *loc)
1349 +{
1350 + return (loc >= me->module_core_rw &&
1351 + loc < (me->module_core_rw + me->core_size_rw));
1352 }
1353
1354 static inline int in_core(struct module *me, void *loc)
1355 {
1356 - return (loc >= me->module_core &&
1357 - loc <= (me->module_core + me->core_size));
1358 + return in_core_rx(me, loc) || in_core_rw(me, loc);
1359 }
1360
1361 static inline int in_local(struct module *me, void *loc)
1362 @@ -365,13 +387,13 @@ int module_frob_arch_sections(CONST Elf_
1363 }
1364
1365 /* align things a bit */
1366 - me->core_size = ALIGN(me->core_size, 16);
1367 - me->arch.got_offset = me->core_size;
1368 - me->core_size += gots * sizeof(struct got_entry);
1369 -
1370 - me->core_size = ALIGN(me->core_size, 16);
1371 - me->arch.fdesc_offset = me->core_size;
1372 - me->core_size += fdescs * sizeof(Elf_Fdesc);
1373 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1374 + me->arch.got_offset = me->core_size_rw;
1375 + me->core_size_rw += gots * sizeof(struct got_entry);
1376 +
1377 + me->core_size_rw = ALIGN(me->core_size_rw, 16);
1378 + me->arch.fdesc_offset = me->core_size_rw;
1379 + me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1380
1381 me->arch.got_max = gots;
1382 me->arch.fdesc_max = fdescs;
1383 @@ -389,7 +411,7 @@ static Elf64_Word get_got(struct module
1384
1385 BUG_ON(value == 0);
1386
1387 - got = me->module_core + me->arch.got_offset;
1388 + got = me->module_core_rw + me->arch.got_offset;
1389 for (i = 0; got[i].addr; i++)
1390 if (got[i].addr == value)
1391 goto out;
1392 @@ -407,7 +429,7 @@ static Elf64_Word get_got(struct module
1393 #ifdef CONFIG_64BIT
1394 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1395 {
1396 - Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1397 + Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1398
1399 if (!value) {
1400 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1401 @@ -425,7 +447,7 @@ static Elf_Addr get_fdesc(struct module
1402
1403 /* Create new one */
1404 fdesc->addr = value;
1405 - fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1406 + fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1407 return (Elf_Addr)fdesc;
1408 }
1409 #endif /* CONFIG_64BIT */
1410 @@ -849,7 +871,7 @@ register_unwind_table(struct module *me,
1411
1412 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1413 end = table + sechdrs[me->arch.unwind_section].sh_size;
1414 - gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1415 + gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1416
1417 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1418 me->arch.unwind_section, table, end, gp);
1419 diff -urNp linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c
1420 --- linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c 2011-05-19 00:06:34.000000000 -0400
1421 +++ linux-2.6.39.4/arch/parisc/kernel/sys_parisc.c 2011-08-05 19:44:33.000000000 -0400
1422 @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1423 /* At this point: (!vma || addr < vma->vm_end). */
1424 if (TASK_SIZE - len < addr)
1425 return -ENOMEM;
1426 - if (!vma || addr + len <= vma->vm_start)
1427 + if (check_heap_stack_gap(vma, addr, len))
1428 return addr;
1429 addr = vma->vm_end;
1430 }
1431 @@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1432 /* At this point: (!vma || addr < vma->vm_end). */
1433 if (TASK_SIZE - len < addr)
1434 return -ENOMEM;
1435 - if (!vma || addr + len <= vma->vm_start)
1436 + if (check_heap_stack_gap(vma, addr, len))
1437 return addr;
1438 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1439 if (addr < vma->vm_end) /* handle wraparound */
1440 @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1441 if (flags & MAP_FIXED)
1442 return addr;
1443 if (!addr)
1444 - addr = TASK_UNMAPPED_BASE;
1445 + addr = current->mm->mmap_base;
1446
1447 if (filp) {
1448 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1449 diff -urNp linux-2.6.39.4/arch/parisc/kernel/traps.c linux-2.6.39.4/arch/parisc/kernel/traps.c
1450 --- linux-2.6.39.4/arch/parisc/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
1451 +++ linux-2.6.39.4/arch/parisc/kernel/traps.c 2011-08-05 19:44:33.000000000 -0400
1452 @@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1453
1454 down_read(&current->mm->mmap_sem);
1455 vma = find_vma(current->mm,regs->iaoq[0]);
1456 - if (vma && (regs->iaoq[0] >= vma->vm_start)
1457 - && (vma->vm_flags & VM_EXEC)) {
1458 -
1459 + if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1460 fault_address = regs->iaoq[0];
1461 fault_space = regs->iasq[0];
1462
1463 diff -urNp linux-2.6.39.4/arch/parisc/mm/fault.c linux-2.6.39.4/arch/parisc/mm/fault.c
1464 --- linux-2.6.39.4/arch/parisc/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
1465 +++ linux-2.6.39.4/arch/parisc/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
1466 @@ -15,6 +15,7 @@
1467 #include <linux/sched.h>
1468 #include <linux/interrupt.h>
1469 #include <linux/module.h>
1470 +#include <linux/unistd.h>
1471
1472 #include <asm/uaccess.h>
1473 #include <asm/traps.h>
1474 @@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1475 static unsigned long
1476 parisc_acctyp(unsigned long code, unsigned int inst)
1477 {
1478 - if (code == 6 || code == 16)
1479 + if (code == 6 || code == 7 || code == 16)
1480 return VM_EXEC;
1481
1482 switch (inst & 0xf0000000) {
1483 @@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1484 }
1485 #endif
1486
1487 +#ifdef CONFIG_PAX_PAGEEXEC
1488 +/*
1489 + * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1490 + *
1491 + * returns 1 when task should be killed
1492 + * 2 when rt_sigreturn trampoline was detected
1493 + * 3 when unpatched PLT trampoline was detected
1494 + */
1495 +static int pax_handle_fetch_fault(struct pt_regs *regs)
1496 +{
1497 +
1498 +#ifdef CONFIG_PAX_EMUPLT
1499 + int err;
1500 +
1501 + do { /* PaX: unpatched PLT emulation */
1502 + unsigned int bl, depwi;
1503 +
1504 + err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1505 + err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1506 +
1507 + if (err)
1508 + break;
1509 +
1510 + if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1511 + unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1512 +
1513 + err = get_user(ldw, (unsigned int *)addr);
1514 + err |= get_user(bv, (unsigned int *)(addr+4));
1515 + err |= get_user(ldw2, (unsigned int *)(addr+8));
1516 +
1517 + if (err)
1518 + break;
1519 +
1520 + if (ldw == 0x0E801096U &&
1521 + bv == 0xEAC0C000U &&
1522 + ldw2 == 0x0E881095U)
1523 + {
1524 + unsigned int resolver, map;
1525 +
1526 + err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1527 + err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1528 + if (err)
1529 + break;
1530 +
1531 + regs->gr[20] = instruction_pointer(regs)+8;
1532 + regs->gr[21] = map;
1533 + regs->gr[22] = resolver;
1534 + regs->iaoq[0] = resolver | 3UL;
1535 + regs->iaoq[1] = regs->iaoq[0] + 4;
1536 + return 3;
1537 + }
1538 + }
1539 + } while (0);
1540 +#endif
1541 +
1542 +#ifdef CONFIG_PAX_EMUTRAMP
1543 +
1544 +#ifndef CONFIG_PAX_EMUSIGRT
1545 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1546 + return 1;
1547 +#endif
1548 +
1549 + do { /* PaX: rt_sigreturn emulation */
1550 + unsigned int ldi1, ldi2, bel, nop;
1551 +
1552 + err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1553 + err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1554 + err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1555 + err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1556 +
1557 + if (err)
1558 + break;
1559 +
1560 + if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1561 + ldi2 == 0x3414015AU &&
1562 + bel == 0xE4008200U &&
1563 + nop == 0x08000240U)
1564 + {
1565 + regs->gr[25] = (ldi1 & 2) >> 1;
1566 + regs->gr[20] = __NR_rt_sigreturn;
1567 + regs->gr[31] = regs->iaoq[1] + 16;
1568 + regs->sr[0] = regs->iasq[1];
1569 + regs->iaoq[0] = 0x100UL;
1570 + regs->iaoq[1] = regs->iaoq[0] + 4;
1571 + regs->iasq[0] = regs->sr[2];
1572 + regs->iasq[1] = regs->sr[2];
1573 + return 2;
1574 + }
1575 + } while (0);
1576 +#endif
1577 +
1578 + return 1;
1579 +}
1580 +
1581 +void pax_report_insns(void *pc, void *sp)
1582 +{
1583 + unsigned long i;
1584 +
1585 + printk(KERN_ERR "PAX: bytes at PC: ");
1586 + for (i = 0; i < 5; i++) {
1587 + unsigned int c;
1588 + if (get_user(c, (unsigned int *)pc+i))
1589 + printk(KERN_CONT "???????? ");
1590 + else
1591 + printk(KERN_CONT "%08x ", c);
1592 + }
1593 + printk("\n");
1594 +}
1595 +#endif
1596 +
1597 int fixup_exception(struct pt_regs *regs)
1598 {
1599 const struct exception_table_entry *fix;
1600 @@ -192,8 +303,33 @@ good_area:
1601
1602 acc_type = parisc_acctyp(code,regs->iir);
1603
1604 - if ((vma->vm_flags & acc_type) != acc_type)
1605 + if ((vma->vm_flags & acc_type) != acc_type) {
1606 +
1607 +#ifdef CONFIG_PAX_PAGEEXEC
1608 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
1609 + (address & ~3UL) == instruction_pointer(regs))
1610 + {
1611 + up_read(&mm->mmap_sem);
1612 + switch (pax_handle_fetch_fault(regs)) {
1613 +
1614 +#ifdef CONFIG_PAX_EMUPLT
1615 + case 3:
1616 + return;
1617 +#endif
1618 +
1619 +#ifdef CONFIG_PAX_EMUTRAMP
1620 + case 2:
1621 + return;
1622 +#endif
1623 +
1624 + }
1625 + pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
1626 + do_group_exit(SIGKILL);
1627 + }
1628 +#endif
1629 +
1630 goto bad_area;
1631 + }
1632
1633 /*
1634 * If for any reason at all we couldn't handle the fault, make
1635 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/elf.h linux-2.6.39.4/arch/powerpc/include/asm/elf.h
1636 --- linux-2.6.39.4/arch/powerpc/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
1637 +++ linux-2.6.39.4/arch/powerpc/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
1638 @@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
1639 the loader. We need to make sure that it is out of the way of the program
1640 that it will "exec", and that there is sufficient room for the brk. */
1641
1642 -extern unsigned long randomize_et_dyn(unsigned long base);
1643 -#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
1644 +#define ELF_ET_DYN_BASE (0x20000000)
1645 +
1646 +#ifdef CONFIG_PAX_ASLR
1647 +#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
1648 +
1649 +#ifdef __powerpc64__
1650 +#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
1651 +#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
1652 +#else
1653 +#define PAX_DELTA_MMAP_LEN 15
1654 +#define PAX_DELTA_STACK_LEN 15
1655 +#endif
1656 +#endif
1657
1658 /*
1659 * Our registers are always unsigned longs, whether we're a 32 bit
1660 @@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s
1661 (0x7ff >> (PAGE_SHIFT - 12)) : \
1662 (0x3ffff >> (PAGE_SHIFT - 12)))
1663
1664 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1665 -#define arch_randomize_brk arch_randomize_brk
1666 -
1667 #endif /* __KERNEL__ */
1668
1669 /*
1670 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h
1671 --- linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
1672 +++ linux-2.6.39.4/arch/powerpc/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
1673 @@ -27,6 +27,7 @@ enum km_type {
1674 KM_PPC_SYNC_PAGE,
1675 KM_PPC_SYNC_ICACHE,
1676 KM_KDB,
1677 + KM_CLEARPAGE,
1678 KM_TYPE_NR
1679 };
1680
1681 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/page_64.h linux-2.6.39.4/arch/powerpc/include/asm/page_64.h
1682 --- linux-2.6.39.4/arch/powerpc/include/asm/page_64.h 2011-05-19 00:06:34.000000000 -0400
1683 +++ linux-2.6.39.4/arch/powerpc/include/asm/page_64.h 2011-08-05 19:44:33.000000000 -0400
1684 @@ -172,15 +172,18 @@ do { \
1685 * stack by default, so in the absence of a PT_GNU_STACK program header
1686 * we turn execute permission off.
1687 */
1688 -#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1689 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1690 +#define VM_STACK_DEFAULT_FLAGS32 \
1691 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1692 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1693
1694 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1695 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1696
1697 +#ifndef CONFIG_PAX_PAGEEXEC
1698 #define VM_STACK_DEFAULT_FLAGS \
1699 (is_32bit_task() ? \
1700 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
1701 +#endif
1702
1703 #include <asm-generic/getorder.h>
1704
1705 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/page.h linux-2.6.39.4/arch/powerpc/include/asm/page.h
1706 --- linux-2.6.39.4/arch/powerpc/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400
1707 +++ linux-2.6.39.4/arch/powerpc/include/asm/page.h 2011-08-05 19:44:33.000000000 -0400
1708 @@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr;
1709 * and needs to be executable. This means the whole heap ends
1710 * up being executable.
1711 */
1712 -#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
1713 - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1714 +#define VM_DATA_DEFAULT_FLAGS32 \
1715 + (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
1716 + VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1717
1718 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
1719 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
1720 @@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr;
1721 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
1722 #endif
1723
1724 +#define ktla_ktva(addr) (addr)
1725 +#define ktva_ktla(addr) (addr)
1726 +
1727 #ifndef __ASSEMBLY__
1728
1729 #undef STRICT_MM_TYPECHECKS
1730 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h
1731 --- linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
1732 +++ linux-2.6.39.4/arch/powerpc/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
1733 @@ -2,6 +2,7 @@
1734 #define _ASM_POWERPC_PGTABLE_H
1735 #ifdef __KERNEL__
1736
1737 +#include <linux/const.h>
1738 #ifndef __ASSEMBLY__
1739 #include <asm/processor.h> /* For TASK_SIZE */
1740 #include <asm/mmu.h>
1741 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h
1742 --- linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h 2011-05-19 00:06:34.000000000 -0400
1743 +++ linux-2.6.39.4/arch/powerpc/include/asm/pte-hash32.h 2011-08-05 19:44:33.000000000 -0400
1744 @@ -21,6 +21,7 @@
1745 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
1746 #define _PAGE_USER 0x004 /* usermode access allowed */
1747 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
1748 +#define _PAGE_EXEC _PAGE_GUARDED
1749 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
1750 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
1751 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
1752 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/reg.h linux-2.6.39.4/arch/powerpc/include/asm/reg.h
1753 --- linux-2.6.39.4/arch/powerpc/include/asm/reg.h 2011-05-19 00:06:34.000000000 -0400
1754 +++ linux-2.6.39.4/arch/powerpc/include/asm/reg.h 2011-08-05 19:44:33.000000000 -0400
1755 @@ -201,6 +201,7 @@
1756 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
1757 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
1758 #define DSISR_NOHPTE 0x40000000 /* no translation found */
1759 +#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
1760 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
1761 #define DSISR_ISSTORE 0x02000000 /* access was a store */
1762 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
1763 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/system.h linux-2.6.39.4/arch/powerpc/include/asm/system.h
1764 --- linux-2.6.39.4/arch/powerpc/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
1765 +++ linux-2.6.39.4/arch/powerpc/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
1766 @@ -533,7 +533,7 @@ __cmpxchg_local(volatile void *ptr, unsi
1767 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
1768 #endif
1769
1770 -extern unsigned long arch_align_stack(unsigned long sp);
1771 +#define arch_align_stack(x) ((x) & ~0xfUL)
1772
1773 /* Used in very early kernel initialization. */
1774 extern unsigned long reloc_offset(void);
1775 diff -urNp linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h
1776 --- linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
1777 +++ linux-2.6.39.4/arch/powerpc/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
1778 @@ -13,6 +13,8 @@
1779 #define VERIFY_READ 0
1780 #define VERIFY_WRITE 1
1781
1782 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
1783 +
1784 /*
1785 * The fs value determines whether argument validity checking should be
1786 * performed or not. If get_fs() == USER_DS, checking is performed, with
1787 @@ -327,52 +329,6 @@ do { \
1788 extern unsigned long __copy_tofrom_user(void __user *to,
1789 const void __user *from, unsigned long size);
1790
1791 -#ifndef __powerpc64__
1792 -
1793 -static inline unsigned long copy_from_user(void *to,
1794 - const void __user *from, unsigned long n)
1795 -{
1796 - unsigned long over;
1797 -
1798 - if (access_ok(VERIFY_READ, from, n))
1799 - return __copy_tofrom_user((__force void __user *)to, from, n);
1800 - if ((unsigned long)from < TASK_SIZE) {
1801 - over = (unsigned long)from + n - TASK_SIZE;
1802 - return __copy_tofrom_user((__force void __user *)to, from,
1803 - n - over) + over;
1804 - }
1805 - return n;
1806 -}
1807 -
1808 -static inline unsigned long copy_to_user(void __user *to,
1809 - const void *from, unsigned long n)
1810 -{
1811 - unsigned long over;
1812 -
1813 - if (access_ok(VERIFY_WRITE, to, n))
1814 - return __copy_tofrom_user(to, (__force void __user *)from, n);
1815 - if ((unsigned long)to < TASK_SIZE) {
1816 - over = (unsigned long)to + n - TASK_SIZE;
1817 - return __copy_tofrom_user(to, (__force void __user *)from,
1818 - n - over) + over;
1819 - }
1820 - return n;
1821 -}
1822 -
1823 -#else /* __powerpc64__ */
1824 -
1825 -#define __copy_in_user(to, from, size) \
1826 - __copy_tofrom_user((to), (from), (size))
1827 -
1828 -extern unsigned long copy_from_user(void *to, const void __user *from,
1829 - unsigned long n);
1830 -extern unsigned long copy_to_user(void __user *to, const void *from,
1831 - unsigned long n);
1832 -extern unsigned long copy_in_user(void __user *to, const void __user *from,
1833 - unsigned long n);
1834 -
1835 -#endif /* __powerpc64__ */
1836 -
1837 static inline unsigned long __copy_from_user_inatomic(void *to,
1838 const void __user *from, unsigned long n)
1839 {
1840 @@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
1841 if (ret == 0)
1842 return 0;
1843 }
1844 +
1845 + if (!__builtin_constant_p(n))
1846 + check_object_size(to, n, false);
1847 +
1848 return __copy_tofrom_user((__force void __user *)to, from, n);
1849 }
1850
1851 @@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
1852 if (ret == 0)
1853 return 0;
1854 }
1855 +
1856 + if (!__builtin_constant_p(n))
1857 + check_object_size(from, n, true);
1858 +
1859 return __copy_tofrom_user(to, (__force const void __user *)from, n);
1860 }
1861
1862 @@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
1863 return __copy_to_user_inatomic(to, from, size);
1864 }
1865
1866 +#ifndef __powerpc64__
1867 +
1868 +static inline unsigned long __must_check copy_from_user(void *to,
1869 + const void __user *from, unsigned long n)
1870 +{
1871 + unsigned long over;
1872 +
1873 + if ((long)n < 0)
1874 + return n;
1875 +
1876 + if (access_ok(VERIFY_READ, from, n)) {
1877 + if (!__builtin_constant_p(n))
1878 + check_object_size(to, n, false);
1879 + return __copy_tofrom_user((__force void __user *)to, from, n);
1880 + }
1881 + if ((unsigned long)from < TASK_SIZE) {
1882 + over = (unsigned long)from + n - TASK_SIZE;
1883 + if (!__builtin_constant_p(n - over))
1884 + check_object_size(to, n - over, false);
1885 + return __copy_tofrom_user((__force void __user *)to, from,
1886 + n - over) + over;
1887 + }
1888 + return n;
1889 +}
1890 +
1891 +static inline unsigned long __must_check copy_to_user(void __user *to,
1892 + const void *from, unsigned long n)
1893 +{
1894 + unsigned long over;
1895 +
1896 + if ((long)n < 0)
1897 + return n;
1898 +
1899 + if (access_ok(VERIFY_WRITE, to, n)) {
1900 + if (!__builtin_constant_p(n))
1901 + check_object_size(from, n, true);
1902 + return __copy_tofrom_user(to, (__force void __user *)from, n);
1903 + }
1904 + if ((unsigned long)to < TASK_SIZE) {
1905 + over = (unsigned long)to + n - TASK_SIZE;
1906 + if (!__builtin_constant_p(n))
1907 + check_object_size(from, n - over, true);
1908 + return __copy_tofrom_user(to, (__force void __user *)from,
1909 + n - over) + over;
1910 + }
1911 + return n;
1912 +}
1913 +
1914 +#else /* __powerpc64__ */
1915 +
1916 +#define __copy_in_user(to, from, size) \
1917 + __copy_tofrom_user((to), (from), (size))
1918 +
1919 +static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
1920 +{
1921 + if ((long)n < 0 || n > INT_MAX)
1922 + return n;
1923 +
1924 + if (!__builtin_constant_p(n))
1925 + check_object_size(to, n, false);
1926 +
1927 + if (likely(access_ok(VERIFY_READ, from, n)))
1928 + n = __copy_from_user(to, from, n);
1929 + else
1930 + memset(to, 0, n);
1931 + return n;
1932 +}
1933 +
1934 +static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
1935 +{
1936 + if ((long)n < 0 || n > INT_MAX)
1937 + return n;
1938 +
1939 + if (likely(access_ok(VERIFY_WRITE, to, n))) {
1940 + if (!__builtin_constant_p(n))
1941 + check_object_size(from, n, true);
1942 + n = __copy_to_user(to, from, n);
1943 + }
1944 + return n;
1945 +}
1946 +
1947 +extern unsigned long copy_in_user(void __user *to, const void __user *from,
1948 + unsigned long n);
1949 +
1950 +#endif /* __powerpc64__ */
1951 +
1952 extern unsigned long __clear_user(void __user *addr, unsigned long size);
1953
1954 static inline unsigned long clear_user(void __user *addr, unsigned long size)
1955 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S
1956 --- linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S 2011-05-19 00:06:34.000000000 -0400
1957 +++ linux-2.6.39.4/arch/powerpc/kernel/exceptions-64e.S 2011-08-05 19:44:33.000000000 -0400
1958 @@ -495,6 +495,7 @@ storage_fault_common:
1959 std r14,_DAR(r1)
1960 std r15,_DSISR(r1)
1961 addi r3,r1,STACK_FRAME_OVERHEAD
1962 + bl .save_nvgprs
1963 mr r4,r14
1964 mr r5,r15
1965 ld r14,PACA_EXGEN+EX_R14(r13)
1966 @@ -504,8 +505,7 @@ storage_fault_common:
1967 cmpdi r3,0
1968 bne- 1f
1969 b .ret_from_except_lite
1970 -1: bl .save_nvgprs
1971 - mr r5,r3
1972 +1: mr r5,r3
1973 addi r3,r1,STACK_FRAME_OVERHEAD
1974 ld r4,_DAR(r1)
1975 bl .bad_page_fault
1976 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S
1977 --- linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S 2011-05-19 00:06:34.000000000 -0400
1978 +++ linux-2.6.39.4/arch/powerpc/kernel/exceptions-64s.S 2011-08-05 19:44:33.000000000 -0400
1979 @@ -848,10 +848,10 @@ handle_page_fault:
1980 11: ld r4,_DAR(r1)
1981 ld r5,_DSISR(r1)
1982 addi r3,r1,STACK_FRAME_OVERHEAD
1983 + bl .save_nvgprs
1984 bl .do_page_fault
1985 cmpdi r3,0
1986 beq+ 13f
1987 - bl .save_nvgprs
1988 mr r5,r3
1989 addi r3,r1,STACK_FRAME_OVERHEAD
1990 lwz r4,_DAR(r1)
1991 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/module_32.c linux-2.6.39.4/arch/powerpc/kernel/module_32.c
1992 --- linux-2.6.39.4/arch/powerpc/kernel/module_32.c 2011-05-19 00:06:34.000000000 -0400
1993 +++ linux-2.6.39.4/arch/powerpc/kernel/module_32.c 2011-08-05 19:44:33.000000000 -0400
1994 @@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
1995 me->arch.core_plt_section = i;
1996 }
1997 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
1998 - printk("Module doesn't contain .plt or .init.plt sections.\n");
1999 + printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2000 return -ENOEXEC;
2001 }
2002
2003 @@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2004
2005 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2006 /* Init, or core PLT? */
2007 - if (location >= mod->module_core
2008 - && location < mod->module_core + mod->core_size)
2009 + if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2010 + (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2011 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2012 - else
2013 + else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2014 + (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2015 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2016 + else {
2017 + printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2018 + return ~0UL;
2019 + }
2020
2021 /* Find this entry, or if that fails, the next avail. entry */
2022 while (entry->jump[0]) {
2023 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/module.c linux-2.6.39.4/arch/powerpc/kernel/module.c
2024 --- linux-2.6.39.4/arch/powerpc/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
2025 +++ linux-2.6.39.4/arch/powerpc/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
2026 @@ -31,11 +31,24 @@
2027
2028 LIST_HEAD(module_bug_list);
2029
2030 +#ifdef CONFIG_PAX_KERNEXEC
2031 void *module_alloc(unsigned long size)
2032 {
2033 if (size == 0)
2034 return NULL;
2035
2036 + return vmalloc(size);
2037 +}
2038 +
2039 +void *module_alloc_exec(unsigned long size)
2040 +#else
2041 +void *module_alloc(unsigned long size)
2042 +#endif
2043 +
2044 +{
2045 + if (size == 0)
2046 + return NULL;
2047 +
2048 return vmalloc_exec(size);
2049 }
2050
2051 @@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2052 vfree(module_region);
2053 }
2054
2055 +#ifdef CONFIG_PAX_KERNEXEC
2056 +void module_free_exec(struct module *mod, void *module_region)
2057 +{
2058 + module_free(mod, module_region);
2059 +}
2060 +#endif
2061 +
2062 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2063 const Elf_Shdr *sechdrs,
2064 const char *name)
2065 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/process.c linux-2.6.39.4/arch/powerpc/kernel/process.c
2066 --- linux-2.6.39.4/arch/powerpc/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
2067 +++ linux-2.6.39.4/arch/powerpc/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
2068 @@ -655,8 +655,8 @@ void show_regs(struct pt_regs * regs)
2069 * Lookup NIP late so we have the best change of getting the
2070 * above info out without failing
2071 */
2072 - printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2073 - printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2074 + printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2075 + printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2076 #endif
2077 show_stack(current, (unsigned long *) regs->gpr[1]);
2078 if (!user_mode(regs))
2079 @@ -1146,10 +1146,10 @@ void show_stack(struct task_struct *tsk,
2080 newsp = stack[0];
2081 ip = stack[STACK_FRAME_LR_SAVE];
2082 if (!firstframe || ip != lr) {
2083 - printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2084 + printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2085 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2086 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2087 - printk(" (%pS)",
2088 + printk(" (%pA)",
2089 (void *)current->ret_stack[curr_frame].ret);
2090 curr_frame--;
2091 }
2092 @@ -1169,7 +1169,7 @@ void show_stack(struct task_struct *tsk,
2093 struct pt_regs *regs = (struct pt_regs *)
2094 (sp + STACK_FRAME_OVERHEAD);
2095 lr = regs->link;
2096 - printk("--- Exception: %lx at %pS\n LR = %pS\n",
2097 + printk("--- Exception: %lx at %pA\n LR = %pA\n",
2098 regs->trap, (void *)regs->nip, (void *)lr);
2099 firstframe = 1;
2100 }
2101 @@ -1244,58 +1244,3 @@ void thread_info_cache_init(void)
2102 }
2103
2104 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2105 -
2106 -unsigned long arch_align_stack(unsigned long sp)
2107 -{
2108 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2109 - sp -= get_random_int() & ~PAGE_MASK;
2110 - return sp & ~0xf;
2111 -}
2112 -
2113 -static inline unsigned long brk_rnd(void)
2114 -{
2115 - unsigned long rnd = 0;
2116 -
2117 - /* 8MB for 32bit, 1GB for 64bit */
2118 - if (is_32bit_task())
2119 - rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2120 - else
2121 - rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2122 -
2123 - return rnd << PAGE_SHIFT;
2124 -}
2125 -
2126 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2127 -{
2128 - unsigned long base = mm->brk;
2129 - unsigned long ret;
2130 -
2131 -#ifdef CONFIG_PPC_STD_MMU_64
2132 - /*
2133 - * If we are using 1TB segments and we are allowed to randomise
2134 - * the heap, we can put it above 1TB so it is backed by a 1TB
2135 - * segment. Otherwise the heap will be in the bottom 1TB
2136 - * which always uses 256MB segments and this may result in a
2137 - * performance penalty.
2138 - */
2139 - if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2140 - base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2141 -#endif
2142 -
2143 - ret = PAGE_ALIGN(base + brk_rnd());
2144 -
2145 - if (ret < mm->brk)
2146 - return mm->brk;
2147 -
2148 - return ret;
2149 -}
2150 -
2151 -unsigned long randomize_et_dyn(unsigned long base)
2152 -{
2153 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2154 -
2155 - if (ret < base)
2156 - return base;
2157 -
2158 - return ret;
2159 -}
2160 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/signal_32.c linux-2.6.39.4/arch/powerpc/kernel/signal_32.c
2161 --- linux-2.6.39.4/arch/powerpc/kernel/signal_32.c 2011-05-19 00:06:34.000000000 -0400
2162 +++ linux-2.6.39.4/arch/powerpc/kernel/signal_32.c 2011-08-05 19:44:33.000000000 -0400
2163 @@ -858,7 +858,7 @@ int handle_rt_signal32(unsigned long sig
2164 /* Save user registers on the stack */
2165 frame = &rt_sf->uc.uc_mcontext;
2166 addr = frame;
2167 - if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2168 + if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2169 if (save_user_regs(regs, frame, 0, 1))
2170 goto badframe;
2171 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2172 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/signal_64.c linux-2.6.39.4/arch/powerpc/kernel/signal_64.c
2173 --- linux-2.6.39.4/arch/powerpc/kernel/signal_64.c 2011-05-19 00:06:34.000000000 -0400
2174 +++ linux-2.6.39.4/arch/powerpc/kernel/signal_64.c 2011-08-05 19:44:33.000000000 -0400
2175 @@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2176 current->thread.fpscr.val = 0;
2177
2178 /* Set up to return from userspace. */
2179 - if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2180 + if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2181 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2182 } else {
2183 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2184 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/traps.c linux-2.6.39.4/arch/powerpc/kernel/traps.c
2185 --- linux-2.6.39.4/arch/powerpc/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
2186 +++ linux-2.6.39.4/arch/powerpc/kernel/traps.c 2011-08-05 19:44:33.000000000 -0400
2187 @@ -96,6 +96,8 @@ static void pmac_backlight_unblank(void)
2188 static inline void pmac_backlight_unblank(void) { }
2189 #endif
2190
2191 +extern void gr_handle_kernel_exploit(void);
2192 +
2193 int die(const char *str, struct pt_regs *regs, long err)
2194 {
2195 static struct {
2196 @@ -170,6 +172,8 @@ int die(const char *str, struct pt_regs
2197 if (panic_on_oops)
2198 panic("Fatal exception");
2199
2200 + gr_handle_kernel_exploit();
2201 +
2202 oops_exit();
2203 do_exit(err);
2204
2205 diff -urNp linux-2.6.39.4/arch/powerpc/kernel/vdso.c linux-2.6.39.4/arch/powerpc/kernel/vdso.c
2206 --- linux-2.6.39.4/arch/powerpc/kernel/vdso.c 2011-05-19 00:06:34.000000000 -0400
2207 +++ linux-2.6.39.4/arch/powerpc/kernel/vdso.c 2011-08-05 19:44:33.000000000 -0400
2208 @@ -36,6 +36,7 @@
2209 #include <asm/firmware.h>
2210 #include <asm/vdso.h>
2211 #include <asm/vdso_datapage.h>
2212 +#include <asm/mman.h>
2213
2214 #include "setup.h"
2215
2216 @@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2217 vdso_base = VDSO32_MBASE;
2218 #endif
2219
2220 - current->mm->context.vdso_base = 0;
2221 + current->mm->context.vdso_base = ~0UL;
2222
2223 /* vDSO has a problem and was disabled, just don't "enable" it for the
2224 * process
2225 @@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2226 vdso_base = get_unmapped_area(NULL, vdso_base,
2227 (vdso_pages << PAGE_SHIFT) +
2228 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2229 - 0, 0);
2230 + 0, MAP_PRIVATE | MAP_EXECUTABLE);
2231 if (IS_ERR_VALUE(vdso_base)) {
2232 rc = vdso_base;
2233 goto fail_mmapsem;
2234 diff -urNp linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c
2235 --- linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c 2011-05-19 00:06:34.000000000 -0400
2236 +++ linux-2.6.39.4/arch/powerpc/lib/usercopy_64.c 2011-08-05 19:44:33.000000000 -0400
2237 @@ -9,22 +9,6 @@
2238 #include <linux/module.h>
2239 #include <asm/uaccess.h>
2240
2241 -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2242 -{
2243 - if (likely(access_ok(VERIFY_READ, from, n)))
2244 - n = __copy_from_user(to, from, n);
2245 - else
2246 - memset(to, 0, n);
2247 - return n;
2248 -}
2249 -
2250 -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2251 -{
2252 - if (likely(access_ok(VERIFY_WRITE, to, n)))
2253 - n = __copy_to_user(to, from, n);
2254 - return n;
2255 -}
2256 -
2257 unsigned long copy_in_user(void __user *to, const void __user *from,
2258 unsigned long n)
2259 {
2260 @@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2261 return n;
2262 }
2263
2264 -EXPORT_SYMBOL(copy_from_user);
2265 -EXPORT_SYMBOL(copy_to_user);
2266 EXPORT_SYMBOL(copy_in_user);
2267
2268 diff -urNp linux-2.6.39.4/arch/powerpc/mm/fault.c linux-2.6.39.4/arch/powerpc/mm/fault.c
2269 --- linux-2.6.39.4/arch/powerpc/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
2270 +++ linux-2.6.39.4/arch/powerpc/mm/fault.c 2011-08-05 19:44:33.000000000 -0400
2271 @@ -31,6 +31,10 @@
2272 #include <linux/kdebug.h>
2273 #include <linux/perf_event.h>
2274 #include <linux/magic.h>
2275 +#include <linux/slab.h>
2276 +#include <linux/pagemap.h>
2277 +#include <linux/compiler.h>
2278 +#include <linux/unistd.h>
2279
2280 #include <asm/firmware.h>
2281 #include <asm/page.h>
2282 @@ -42,6 +46,7 @@
2283 #include <asm/tlbflush.h>
2284 #include <asm/siginfo.h>
2285 #include <mm/mmu_decl.h>
2286 +#include <asm/ptrace.h>
2287
2288 #ifdef CONFIG_KPROBES
2289 static inline int notify_page_fault(struct pt_regs *regs)
2290 @@ -65,6 +70,33 @@ static inline int notify_page_fault(stru
2291 }
2292 #endif
2293
2294 +#ifdef CONFIG_PAX_PAGEEXEC
2295 +/*
2296 + * PaX: decide what to do with offenders (regs->nip = fault address)
2297 + *
2298 + * returns 1 when task should be killed
2299 + */
2300 +static int pax_handle_fetch_fault(struct pt_regs *regs)
2301 +{
2302 + return 1;
2303 +}
2304 +
2305 +void pax_report_insns(void *pc, void *sp)
2306 +{
2307 + unsigned long i;
2308 +
2309 + printk(KERN_ERR "PAX: bytes at PC: ");
2310 + for (i = 0; i < 5; i++) {
2311 + unsigned int c;
2312 + if (get_user(c, (unsigned int __user *)pc+i))
2313 + printk(KERN_CONT "???????? ");
2314 + else
2315 + printk(KERN_CONT "%08x ", c);
2316 + }
2317 + printk("\n");
2318 +}
2319 +#endif
2320 +
2321 /*
2322 * Check whether the instruction at regs->nip is a store using
2323 * an update addressing form which will update r1.
2324 @@ -135,7 +167,7 @@ int __kprobes do_page_fault(struct pt_re
2325 * indicate errors in DSISR but can validly be set in SRR1.
2326 */
2327 if (trap == 0x400)
2328 - error_code &= 0x48200000;
2329 + error_code &= 0x58200000;
2330 else
2331 is_write = error_code & DSISR_ISSTORE;
2332 #else
2333 @@ -258,7 +290,7 @@ good_area:
2334 * "undefined". Of those that can be set, this is the only
2335 * one which seems bad.
2336 */
2337 - if (error_code & 0x10000000)
2338 + if (error_code & DSISR_GUARDED)
2339 /* Guarded storage error. */
2340 goto bad_area;
2341 #endif /* CONFIG_8xx */
2342 @@ -273,7 +305,7 @@ good_area:
2343 * processors use the same I/D cache coherency mechanism
2344 * as embedded.
2345 */
2346 - if (error_code & DSISR_PROTFAULT)
2347 + if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
2348 goto bad_area;
2349 #endif /* CONFIG_PPC_STD_MMU */
2350
2351 @@ -342,6 +374,23 @@ bad_area:
2352 bad_area_nosemaphore:
2353 /* User mode accesses cause a SIGSEGV */
2354 if (user_mode(regs)) {
2355 +
2356 +#ifdef CONFIG_PAX_PAGEEXEC
2357 + if (mm->pax_flags & MF_PAX_PAGEEXEC) {
2358 +#ifdef CONFIG_PPC_STD_MMU
2359 + if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
2360 +#else
2361 + if (is_exec && regs->nip == address) {
2362 +#endif
2363 + switch (pax_handle_fetch_fault(regs)) {
2364 + }
2365 +
2366 + pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
2367 + do_group_exit(SIGKILL);
2368 + }
2369 + }
2370 +#endif
2371 +
2372 _exception(SIGSEGV, regs, code, address);
2373 return 0;
2374 }
2375 diff -urNp linux-2.6.39.4/arch/powerpc/mm/mmap_64.c linux-2.6.39.4/arch/powerpc/mm/mmap_64.c
2376 --- linux-2.6.39.4/arch/powerpc/mm/mmap_64.c 2011-05-19 00:06:34.000000000 -0400
2377 +++ linux-2.6.39.4/arch/powerpc/mm/mmap_64.c 2011-08-05 19:44:33.000000000 -0400
2378 @@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
2379 */
2380 if (mmap_is_legacy()) {
2381 mm->mmap_base = TASK_UNMAPPED_BASE;
2382 +
2383 +#ifdef CONFIG_PAX_RANDMMAP
2384 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2385 + mm->mmap_base += mm->delta_mmap;
2386 +#endif
2387 +
2388 mm->get_unmapped_area = arch_get_unmapped_area;
2389 mm->unmap_area = arch_unmap_area;
2390 } else {
2391 mm->mmap_base = mmap_base();
2392 +
2393 +#ifdef CONFIG_PAX_RANDMMAP
2394 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2395 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2396 +#endif
2397 +
2398 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2399 mm->unmap_area = arch_unmap_area_topdown;
2400 }
2401 diff -urNp linux-2.6.39.4/arch/powerpc/mm/slice.c linux-2.6.39.4/arch/powerpc/mm/slice.c
2402 --- linux-2.6.39.4/arch/powerpc/mm/slice.c 2011-05-19 00:06:34.000000000 -0400
2403 +++ linux-2.6.39.4/arch/powerpc/mm/slice.c 2011-08-05 19:44:33.000000000 -0400
2404 @@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
2405 if ((mm->task_size - len) < addr)
2406 return 0;
2407 vma = find_vma(mm, addr);
2408 - return (!vma || (addr + len) <= vma->vm_start);
2409 + return check_heap_stack_gap(vma, addr, len);
2410 }
2411
2412 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
2413 @@ -256,7 +256,7 @@ full_search:
2414 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
2415 continue;
2416 }
2417 - if (!vma || addr + len <= vma->vm_start) {
2418 + if (check_heap_stack_gap(vma, addr, len)) {
2419 /*
2420 * Remember the place where we stopped the search:
2421 */
2422 @@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
2423 }
2424 }
2425
2426 - addr = mm->mmap_base;
2427 - while (addr > len) {
2428 + if (mm->mmap_base < len)
2429 + addr = -ENOMEM;
2430 + else
2431 + addr = mm->mmap_base - len;
2432 +
2433 + while (!IS_ERR_VALUE(addr)) {
2434 /* Go down by chunk size */
2435 - addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
2436 + addr = _ALIGN_DOWN(addr, 1ul << pshift);
2437
2438 /* Check for hit with different page size */
2439 mask = slice_range_to_mask(addr, len);
2440 @@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
2441 * return with success:
2442 */
2443 vma = find_vma(mm, addr);
2444 - if (!vma || (addr + len) <= vma->vm_start) {
2445 + if (check_heap_stack_gap(vma, addr, len)) {
2446 /* remember the address as a hint for next time */
2447 if (use_cache)
2448 mm->free_area_cache = addr;
2449 @@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
2450 mm->cached_hole_size = vma->vm_start - addr;
2451
2452 /* try just below the current vma->vm_start */
2453 - addr = vma->vm_start;
2454 + addr = skip_heap_stack_gap(vma, len);
2455 }
2456
2457 /*
2458 @@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
2459 if (fixed && addr > (mm->task_size - len))
2460 return -EINVAL;
2461
2462 +#ifdef CONFIG_PAX_RANDMMAP
2463 + if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
2464 + addr = 0;
2465 +#endif
2466 +
2467 /* If hint, make sure it matches our alignment restrictions */
2468 if (!fixed && addr) {
2469 addr = _ALIGN_UP(addr, 1ul << pshift);
2470 diff -urNp linux-2.6.39.4/arch/s390/include/asm/elf.h linux-2.6.39.4/arch/s390/include/asm/elf.h
2471 --- linux-2.6.39.4/arch/s390/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
2472 +++ linux-2.6.39.4/arch/s390/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
2473 @@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
2474 the loader. We need to make sure that it is out of the way of the program
2475 that it will "exec", and that there is sufficient room for the brk. */
2476
2477 -extern unsigned long randomize_et_dyn(unsigned long base);
2478 -#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
2479 +#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
2480 +
2481 +#ifdef CONFIG_PAX_ASLR
2482 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
2483 +
2484 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2485 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
2486 +#endif
2487
2488 /* This yields a mask that user programs can use to figure out what
2489 instruction set this CPU supports. */
2490 @@ -222,7 +228,4 @@ struct linux_binprm;
2491 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
2492 int arch_setup_additional_pages(struct linux_binprm *, int);
2493
2494 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2495 -#define arch_randomize_brk arch_randomize_brk
2496 -
2497 #endif
2498 diff -urNp linux-2.6.39.4/arch/s390/include/asm/system.h linux-2.6.39.4/arch/s390/include/asm/system.h
2499 --- linux-2.6.39.4/arch/s390/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
2500 +++ linux-2.6.39.4/arch/s390/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
2501 @@ -255,7 +255,7 @@ extern void (*_machine_restart)(char *co
2502 extern void (*_machine_halt)(void);
2503 extern void (*_machine_power_off)(void);
2504
2505 -extern unsigned long arch_align_stack(unsigned long sp);
2506 +#define arch_align_stack(x) ((x) & ~0xfUL)
2507
2508 static inline int tprot(unsigned long addr)
2509 {
2510 diff -urNp linux-2.6.39.4/arch/s390/include/asm/uaccess.h linux-2.6.39.4/arch/s390/include/asm/uaccess.h
2511 --- linux-2.6.39.4/arch/s390/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
2512 +++ linux-2.6.39.4/arch/s390/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
2513 @@ -234,6 +234,10 @@ static inline unsigned long __must_check
2514 copy_to_user(void __user *to, const void *from, unsigned long n)
2515 {
2516 might_fault();
2517 +
2518 + if ((long)n < 0)
2519 + return n;
2520 +
2521 if (access_ok(VERIFY_WRITE, to, n))
2522 n = __copy_to_user(to, from, n);
2523 return n;
2524 @@ -259,6 +263,9 @@ copy_to_user(void __user *to, const void
2525 static inline unsigned long __must_check
2526 __copy_from_user(void *to, const void __user *from, unsigned long n)
2527 {
2528 + if ((long)n < 0)
2529 + return n;
2530 +
2531 if (__builtin_constant_p(n) && (n <= 256))
2532 return uaccess.copy_from_user_small(n, from, to);
2533 else
2534 @@ -293,6 +300,10 @@ copy_from_user(void *to, const void __us
2535 unsigned int sz = __compiletime_object_size(to);
2536
2537 might_fault();
2538 +
2539 + if ((long)n < 0)
2540 + return n;
2541 +
2542 if (unlikely(sz != -1 && sz < n)) {
2543 copy_from_user_overflow();
2544 return n;
2545 diff -urNp linux-2.6.39.4/arch/s390/Kconfig linux-2.6.39.4/arch/s390/Kconfig
2546 --- linux-2.6.39.4/arch/s390/Kconfig 2011-05-19 00:06:34.000000000 -0400
2547 +++ linux-2.6.39.4/arch/s390/Kconfig 2011-08-05 19:44:33.000000000 -0400
2548 @@ -234,11 +234,9 @@ config S390_EXEC_PROTECT
2549 prompt "Data execute protection"
2550 help
2551 This option allows to enable a buffer overflow protection for user
2552 - space programs and it also selects the addressing mode option above.
2553 - The kernel parameter noexec=on will enable this feature and also
2554 - switch the addressing modes, default is disabled. Enabling this (via
2555 - kernel parameter) on machines earlier than IBM System z9 this will
2556 - reduce system performance.
2557 + space programs.
2558 + Enabling this (via kernel parameter) on machines earlier than IBM
2559 + System z9 this will reduce system performance.
2560
2561 comment "Code generation options"
2562
2563 diff -urNp linux-2.6.39.4/arch/s390/kernel/module.c linux-2.6.39.4/arch/s390/kernel/module.c
2564 --- linux-2.6.39.4/arch/s390/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
2565 +++ linux-2.6.39.4/arch/s390/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
2566 @@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
2567
2568 /* Increase core size by size of got & plt and set start
2569 offsets for got and plt. */
2570 - me->core_size = ALIGN(me->core_size, 4);
2571 - me->arch.got_offset = me->core_size;
2572 - me->core_size += me->arch.got_size;
2573 - me->arch.plt_offset = me->core_size;
2574 - me->core_size += me->arch.plt_size;
2575 + me->core_size_rw = ALIGN(me->core_size_rw, 4);
2576 + me->arch.got_offset = me->core_size_rw;
2577 + me->core_size_rw += me->arch.got_size;
2578 + me->arch.plt_offset = me->core_size_rx;
2579 + me->core_size_rx += me->arch.plt_size;
2580 return 0;
2581 }
2582
2583 @@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2584 if (info->got_initialized == 0) {
2585 Elf_Addr *gotent;
2586
2587 - gotent = me->module_core + me->arch.got_offset +
2588 + gotent = me->module_core_rw + me->arch.got_offset +
2589 info->got_offset;
2590 *gotent = val;
2591 info->got_initialized = 1;
2592 @@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2593 else if (r_type == R_390_GOTENT ||
2594 r_type == R_390_GOTPLTENT)
2595 *(unsigned int *) loc =
2596 - (val + (Elf_Addr) me->module_core - loc) >> 1;
2597 + (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
2598 else if (r_type == R_390_GOT64 ||
2599 r_type == R_390_GOTPLT64)
2600 *(unsigned long *) loc = val;
2601 @@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2602 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
2603 if (info->plt_initialized == 0) {
2604 unsigned int *ip;
2605 - ip = me->module_core + me->arch.plt_offset +
2606 + ip = me->module_core_rx + me->arch.plt_offset +
2607 info->plt_offset;
2608 #ifndef CONFIG_64BIT
2609 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
2610 @@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2611 val - loc + 0xffffUL < 0x1ffffeUL) ||
2612 (r_type == R_390_PLT32DBL &&
2613 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
2614 - val = (Elf_Addr) me->module_core +
2615 + val = (Elf_Addr) me->module_core_rx +
2616 me->arch.plt_offset +
2617 info->plt_offset;
2618 val += rela->r_addend - loc;
2619 @@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2620 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
2621 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
2622 val = val + rela->r_addend -
2623 - ((Elf_Addr) me->module_core + me->arch.got_offset);
2624 + ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
2625 if (r_type == R_390_GOTOFF16)
2626 *(unsigned short *) loc = val;
2627 else if (r_type == R_390_GOTOFF32)
2628 @@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
2629 break;
2630 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
2631 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
2632 - val = (Elf_Addr) me->module_core + me->arch.got_offset +
2633 + val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
2634 rela->r_addend - loc;
2635 if (r_type == R_390_GOTPC)
2636 *(unsigned int *) loc = val;
2637 diff -urNp linux-2.6.39.4/arch/s390/kernel/process.c linux-2.6.39.4/arch/s390/kernel/process.c
2638 --- linux-2.6.39.4/arch/s390/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
2639 +++ linux-2.6.39.4/arch/s390/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
2640 @@ -334,39 +334,3 @@ unsigned long get_wchan(struct task_stru
2641 }
2642 return 0;
2643 }
2644 -
2645 -unsigned long arch_align_stack(unsigned long sp)
2646 -{
2647 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2648 - sp -= get_random_int() & ~PAGE_MASK;
2649 - return sp & ~0xf;
2650 -}
2651 -
2652 -static inline unsigned long brk_rnd(void)
2653 -{
2654 - /* 8MB for 32bit, 1GB for 64bit */
2655 - if (is_32bit_task())
2656 - return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
2657 - else
2658 - return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
2659 -}
2660 -
2661 -unsigned long arch_randomize_brk(struct mm_struct *mm)
2662 -{
2663 - unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
2664 -
2665 - if (ret < mm->brk)
2666 - return mm->brk;
2667 - return ret;
2668 -}
2669 -
2670 -unsigned long randomize_et_dyn(unsigned long base)
2671 -{
2672 - unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2673 -
2674 - if (!(current->flags & PF_RANDOMIZE))
2675 - return base;
2676 - if (ret < base)
2677 - return base;
2678 - return ret;
2679 -}
2680 diff -urNp linux-2.6.39.4/arch/s390/kernel/setup.c linux-2.6.39.4/arch/s390/kernel/setup.c
2681 --- linux-2.6.39.4/arch/s390/kernel/setup.c 2011-05-19 00:06:34.000000000 -0400
2682 +++ linux-2.6.39.4/arch/s390/kernel/setup.c 2011-08-05 19:44:33.000000000 -0400
2683 @@ -271,7 +271,7 @@ static int __init early_parse_mem(char *
2684 }
2685 early_param("mem", early_parse_mem);
2686
2687 -unsigned int user_mode = HOME_SPACE_MODE;
2688 +unsigned int user_mode = SECONDARY_SPACE_MODE;
2689 EXPORT_SYMBOL_GPL(user_mode);
2690
2691 static int set_amode_and_uaccess(unsigned long user_amode,
2692 @@ -300,17 +300,6 @@ static int set_amode_and_uaccess(unsigne
2693 }
2694 }
2695
2696 -/*
2697 - * Switch kernel/user addressing modes?
2698 - */
2699 -static int __init early_parse_switch_amode(char *p)
2700 -{
2701 - if (user_mode != SECONDARY_SPACE_MODE)
2702 - user_mode = PRIMARY_SPACE_MODE;
2703 - return 0;
2704 -}
2705 -early_param("switch_amode", early_parse_switch_amode);
2706 -
2707 static int __init early_parse_user_mode(char *p)
2708 {
2709 if (p && strcmp(p, "primary") == 0)
2710 @@ -327,20 +316,6 @@ static int __init early_parse_user_mode(
2711 }
2712 early_param("user_mode", early_parse_user_mode);
2713
2714 -#ifdef CONFIG_S390_EXEC_PROTECT
2715 -/*
2716 - * Enable execute protection?
2717 - */
2718 -static int __init early_parse_noexec(char *p)
2719 -{
2720 - if (!strncmp(p, "off", 3))
2721 - return 0;
2722 - user_mode = SECONDARY_SPACE_MODE;
2723 - return 0;
2724 -}
2725 -early_param("noexec", early_parse_noexec);
2726 -#endif /* CONFIG_S390_EXEC_PROTECT */
2727 -
2728 static void setup_addressing_mode(void)
2729 {
2730 if (user_mode == SECONDARY_SPACE_MODE) {
2731 diff -urNp linux-2.6.39.4/arch/s390/mm/mmap.c linux-2.6.39.4/arch/s390/mm/mmap.c
2732 --- linux-2.6.39.4/arch/s390/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
2733 +++ linux-2.6.39.4/arch/s390/mm/mmap.c 2011-08-05 19:44:33.000000000 -0400
2734 @@ -91,10 +91,22 @@ void arch_pick_mmap_layout(struct mm_str
2735 */
2736 if (mmap_is_legacy()) {
2737 mm->mmap_base = TASK_UNMAPPED_BASE;
2738 +
2739 +#ifdef CONFIG_PAX_RANDMMAP
2740 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2741 + mm->mmap_base += mm->delta_mmap;
2742 +#endif
2743 +
2744 mm->get_unmapped_area = arch_get_unmapped_area;
2745 mm->unmap_area = arch_unmap_area;
2746 } else {
2747 mm->mmap_base = mmap_base();
2748 +
2749 +#ifdef CONFIG_PAX_RANDMMAP
2750 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2751 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2752 +#endif
2753 +
2754 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
2755 mm->unmap_area = arch_unmap_area_topdown;
2756 }
2757 @@ -166,10 +178,22 @@ void arch_pick_mmap_layout(struct mm_str
2758 */
2759 if (mmap_is_legacy()) {
2760 mm->mmap_base = TASK_UNMAPPED_BASE;
2761 +
2762 +#ifdef CONFIG_PAX_RANDMMAP
2763 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2764 + mm->mmap_base += mm->delta_mmap;
2765 +#endif
2766 +
2767 mm->get_unmapped_area = s390_get_unmapped_area;
2768 mm->unmap_area = arch_unmap_area;
2769 } else {
2770 mm->mmap_base = mmap_base();
2771 +
2772 +#ifdef CONFIG_PAX_RANDMMAP
2773 + if (mm->pax_flags & MF_PAX_RANDMMAP)
2774 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
2775 +#endif
2776 +
2777 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
2778 mm->unmap_area = arch_unmap_area_topdown;
2779 }
2780 diff -urNp linux-2.6.39.4/arch/score/include/asm/system.h linux-2.6.39.4/arch/score/include/asm/system.h
2781 --- linux-2.6.39.4/arch/score/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
2782 +++ linux-2.6.39.4/arch/score/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
2783 @@ -17,7 +17,7 @@ do { \
2784 #define finish_arch_switch(prev) do {} while (0)
2785
2786 typedef void (*vi_handler_t)(void);
2787 -extern unsigned long arch_align_stack(unsigned long sp);
2788 +#define arch_align_stack(x) (x)
2789
2790 #define mb() barrier()
2791 #define rmb() barrier()
2792 diff -urNp linux-2.6.39.4/arch/score/kernel/process.c linux-2.6.39.4/arch/score/kernel/process.c
2793 --- linux-2.6.39.4/arch/score/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
2794 +++ linux-2.6.39.4/arch/score/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
2795 @@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
2796
2797 return task_pt_regs(task)->cp0_epc;
2798 }
2799 -
2800 -unsigned long arch_align_stack(unsigned long sp)
2801 -{
2802 - return sp;
2803 -}
2804 diff -urNp linux-2.6.39.4/arch/sh/mm/mmap.c linux-2.6.39.4/arch/sh/mm/mmap.c
2805 --- linux-2.6.39.4/arch/sh/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
2806 +++ linux-2.6.39.4/arch/sh/mm/mmap.c 2011-08-05 19:44:33.000000000 -0400
2807 @@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
2808 addr = PAGE_ALIGN(addr);
2809
2810 vma = find_vma(mm, addr);
2811 - if (TASK_SIZE - len >= addr &&
2812 - (!vma || addr + len <= vma->vm_start))
2813 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2814 return addr;
2815 }
2816
2817 @@ -106,7 +105,7 @@ full_search:
2818 }
2819 return -ENOMEM;
2820 }
2821 - if (likely(!vma || addr + len <= vma->vm_start)) {
2822 + if (likely(check_heap_stack_gap(vma, addr, len))) {
2823 /*
2824 * Remember the place where we stopped the search:
2825 */
2826 @@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
2827 addr = PAGE_ALIGN(addr);
2828
2829 vma = find_vma(mm, addr);
2830 - if (TASK_SIZE - len >= addr &&
2831 - (!vma || addr + len <= vma->vm_start))
2832 + if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
2833 return addr;
2834 }
2835
2836 @@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
2837 /* make sure it can fit in the remaining address space */
2838 if (likely(addr > len)) {
2839 vma = find_vma(mm, addr-len);
2840 - if (!vma || addr <= vma->vm_start) {
2841 + if (check_heap_stack_gap(vma, addr - len, len)) {
2842 /* remember the address as a hint for next time */
2843 return (mm->free_area_cache = addr-len);
2844 }
2845 @@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
2846 if (unlikely(mm->mmap_base < len))
2847 goto bottomup;
2848
2849 - addr = mm->mmap_base-len;
2850 - if (do_colour_align)
2851 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2852 + addr = mm->mmap_base - len;
2853
2854 do {
2855 + if (do_colour_align)
2856 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2857 /*
2858 * Lookup failure means no vma is above this address,
2859 * else if new region fits below vma->vm_start,
2860 * return with success:
2861 */
2862 vma = find_vma(mm, addr);
2863 - if (likely(!vma || addr+len <= vma->vm_start)) {
2864 + if (likely(check_heap_stack_gap(vma, addr, len))) {
2865 /* remember the address as a hint for next time */
2866 return (mm->free_area_cache = addr);
2867 }
2868 @@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
2869 mm->cached_hole_size = vma->vm_start - addr;
2870
2871 /* try just below the current vma->vm_start */
2872 - addr = vma->vm_start-len;
2873 - if (do_colour_align)
2874 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
2875 - } while (likely(len < vma->vm_start));
2876 + addr = skip_heap_stack_gap(vma, len);
2877 + } while (!IS_ERR_VALUE(addr));
2878
2879 bottomup:
2880 /*
2881 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h
2882 --- linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h 2011-05-19 00:06:34.000000000 -0400
2883 +++ linux-2.6.39.4/arch/sparc/include/asm/atomic_64.h 2011-08-18 23:17:16.000000000 -0400
2884 @@ -14,18 +14,40 @@
2885 #define ATOMIC64_INIT(i) { (i) }
2886
2887 #define atomic_read(v) (*(volatile int *)&(v)->counter)
2888 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
2889 +{
2890 + return v->counter;
2891 +}
2892 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
2893 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
2894 +{
2895 + return v->counter;
2896 +}
2897
2898 #define atomic_set(v, i) (((v)->counter) = i)
2899 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
2900 +{
2901 + v->counter = i;
2902 +}
2903 #define atomic64_set(v, i) (((v)->counter) = i)
2904 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
2905 +{
2906 + v->counter = i;
2907 +}
2908
2909 extern void atomic_add(int, atomic_t *);
2910 +extern void atomic_add_unchecked(int, atomic_unchecked_t *);
2911 extern void atomic64_add(long, atomic64_t *);
2912 +extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
2913 extern void atomic_sub(int, atomic_t *);
2914 +extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
2915 extern void atomic64_sub(long, atomic64_t *);
2916 +extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
2917
2918 extern int atomic_add_ret(int, atomic_t *);
2919 +extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
2920 extern long atomic64_add_ret(long, atomic64_t *);
2921 +extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
2922 extern int atomic_sub_ret(int, atomic_t *);
2923 extern long atomic64_sub_ret(long, atomic64_t *);
2924
2925 @@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
2926 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
2927
2928 #define atomic_inc_return(v) atomic_add_ret(1, v)
2929 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
2930 +{
2931 + return atomic_add_ret_unchecked(1, v);
2932 +}
2933 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
2934 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
2935 +{
2936 + return atomic64_add_ret_unchecked(1, v);
2937 +}
2938
2939 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
2940 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
2941
2942 #define atomic_add_return(i, v) atomic_add_ret(i, v)
2943 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
2944 +{
2945 + return atomic_add_ret_unchecked(i, v);
2946 +}
2947 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
2948 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
2949 +{
2950 + return atomic64_add_ret_unchecked(i, v);
2951 +}
2952
2953 /*
2954 * atomic_inc_and_test - increment and test
2955 @@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomi
2956 * other cases.
2957 */
2958 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
2959 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
2960 +{
2961 + return atomic_inc_return_unchecked(v) == 0;
2962 +}
2963 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
2964
2965 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
2966 @@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomi
2967 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
2968
2969 #define atomic_inc(v) atomic_add(1, v)
2970 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
2971 +{
2972 + atomic_add_unchecked(1, v);
2973 +}
2974 #define atomic64_inc(v) atomic64_add(1, v)
2975 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
2976 +{
2977 + atomic64_add_unchecked(1, v);
2978 +}
2979
2980 #define atomic_dec(v) atomic_sub(1, v)
2981 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
2982 +{
2983 + atomic_sub_unchecked(1, v);
2984 +}
2985 #define atomic64_dec(v) atomic64_sub(1, v)
2986 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
2987 +{
2988 + atomic64_sub_unchecked(1, v);
2989 +}
2990
2991 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
2992 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
2993
2994 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
2995 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
2996 +{
2997 + return cmpxchg(&v->counter, old, new);
2998 +}
2999 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3000 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
3001 +{
3002 + return xchg(&v->counter, new);
3003 +}
3004
3005 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3006 {
3007 - int c, old;
3008 + int c, old, new;
3009 c = atomic_read(v);
3010 for (;;) {
3011 - if (unlikely(c == (u)))
3012 + if (unlikely(c == u))
3013 break;
3014 - old = atomic_cmpxchg((v), c, c + (a));
3015 +
3016 + asm volatile("addcc %2, %0, %0\n"
3017 +
3018 +#ifdef CONFIG_PAX_REFCOUNT
3019 + "tvs %%icc, 6\n"
3020 +#endif
3021 +
3022 + : "=r" (new)
3023 + : "0" (c), "ir" (a)
3024 + : "cc");
3025 +
3026 + old = atomic_cmpxchg(v, c, new);
3027 if (likely(old == c))
3028 break;
3029 c = old;
3030 }
3031 - return c != (u);
3032 + return c != u;
3033 }
3034
3035 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3036 @@ -90,20 +167,35 @@ static inline int atomic_add_unless(atom
3037 #define atomic64_cmpxchg(v, o, n) \
3038 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
3039 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
3040 +static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
3041 +{
3042 + return xchg(&v->counter, new);
3043 +}
3044
3045 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3046 {
3047 - long c, old;
3048 + long c, old, new;
3049 c = atomic64_read(v);
3050 for (;;) {
3051 - if (unlikely(c == (u)))
3052 + if (unlikely(c == u))
3053 break;
3054 - old = atomic64_cmpxchg((v), c, c + (a));
3055 +
3056 + asm volatile("addcc %2, %0, %0\n"
3057 +
3058 +#ifdef CONFIG_PAX_REFCOUNT
3059 + "tvs %%xcc, 6\n"
3060 +#endif
3061 +
3062 + : "=r" (new)
3063 + : "0" (c), "ir" (a)
3064 + : "cc");
3065 +
3066 + old = atomic64_cmpxchg(v, c, new);
3067 if (likely(old == c))
3068 break;
3069 c = old;
3070 }
3071 - return c != (u);
3072 + return c != u;
3073 }
3074
3075 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3076 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/cache.h linux-2.6.39.4/arch/sparc/include/asm/cache.h
3077 --- linux-2.6.39.4/arch/sparc/include/asm/cache.h 2011-05-19 00:06:34.000000000 -0400
3078 +++ linux-2.6.39.4/arch/sparc/include/asm/cache.h 2011-08-05 19:44:33.000000000 -0400
3079 @@ -10,7 +10,7 @@
3080 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
3081
3082 #define L1_CACHE_SHIFT 5
3083 -#define L1_CACHE_BYTES 32
3084 +#define L1_CACHE_BYTES 32UL
3085
3086 #ifdef CONFIG_SPARC32
3087 #define SMP_CACHE_BYTES_SHIFT 5
3088 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/elf_32.h linux-2.6.39.4/arch/sparc/include/asm/elf_32.h
3089 --- linux-2.6.39.4/arch/sparc/include/asm/elf_32.h 2011-05-19 00:06:34.000000000 -0400
3090 +++ linux-2.6.39.4/arch/sparc/include/asm/elf_32.h 2011-08-05 19:44:33.000000000 -0400
3091 @@ -114,6 +114,13 @@ typedef struct {
3092
3093 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3094
3095 +#ifdef CONFIG_PAX_ASLR
3096 +#define PAX_ELF_ET_DYN_BASE 0x10000UL
3097 +
3098 +#define PAX_DELTA_MMAP_LEN 16
3099 +#define PAX_DELTA_STACK_LEN 16
3100 +#endif
3101 +
3102 /* This yields a mask that user programs can use to figure out what
3103 instruction set this cpu supports. This can NOT be done in userspace
3104 on Sparc. */
3105 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/elf_64.h linux-2.6.39.4/arch/sparc/include/asm/elf_64.h
3106 --- linux-2.6.39.4/arch/sparc/include/asm/elf_64.h 2011-05-19 00:06:34.000000000 -0400
3107 +++ linux-2.6.39.4/arch/sparc/include/asm/elf_64.h 2011-08-05 19:44:33.000000000 -0400
3108 @@ -162,6 +162,12 @@ typedef struct {
3109 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3110 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3111
3112 +#ifdef CONFIG_PAX_ASLR
3113 +#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3114 +
3115 +#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3116 +#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3117 +#endif
3118
3119 /* This yields a mask that user programs can use to figure out what
3120 instruction set this cpu supports. */
3121 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h
3122 --- linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h 2011-05-19 00:06:34.000000000 -0400
3123 +++ linux-2.6.39.4/arch/sparc/include/asm/pgtable_32.h 2011-08-05 19:44:33.000000000 -0400
3124 @@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3125 BTFIXUPDEF_INT(page_none)
3126 BTFIXUPDEF_INT(page_copy)
3127 BTFIXUPDEF_INT(page_readonly)
3128 +
3129 +#ifdef CONFIG_PAX_PAGEEXEC
3130 +BTFIXUPDEF_INT(page_shared_noexec)
3131 +BTFIXUPDEF_INT(page_copy_noexec)
3132 +BTFIXUPDEF_INT(page_readonly_noexec)
3133 +#endif
3134 +
3135 BTFIXUPDEF_INT(page_kernel)
3136
3137 #define PMD_SHIFT SUN4C_PMD_SHIFT
3138 @@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
3139 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3140 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3141
3142 +#ifdef CONFIG_PAX_PAGEEXEC
3143 +extern pgprot_t PAGE_SHARED_NOEXEC;
3144 +# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3145 +# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3146 +#else
3147 +# define PAGE_SHARED_NOEXEC PAGE_SHARED
3148 +# define PAGE_COPY_NOEXEC PAGE_COPY
3149 +# define PAGE_READONLY_NOEXEC PAGE_READONLY
3150 +#endif
3151 +
3152 extern unsigned long page_kernel;
3153
3154 #ifdef MODULE
3155 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h
3156 --- linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h 2011-05-19 00:06:34.000000000 -0400
3157 +++ linux-2.6.39.4/arch/sparc/include/asm/pgtsrmmu.h 2011-08-05 19:44:33.000000000 -0400
3158 @@ -115,6 +115,13 @@
3159 SRMMU_EXEC | SRMMU_REF)
3160 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3161 SRMMU_EXEC | SRMMU_REF)
3162 +
3163 +#ifdef CONFIG_PAX_PAGEEXEC
3164 +#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3165 +#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3166 +#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3167 +#endif
3168 +
3169 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3170 SRMMU_DIRTY | SRMMU_REF)
3171
3172 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h
3173 --- linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h 2011-05-19 00:06:34.000000000 -0400
3174 +++ linux-2.6.39.4/arch/sparc/include/asm/spinlock_64.h 2011-08-18 23:17:16.000000000 -0400
3175 @@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(
3176
3177 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3178
3179 -static void inline arch_read_lock(arch_rwlock_t *lock)
3180 +static inline void arch_read_lock(arch_rwlock_t *lock)
3181 {
3182 unsigned long tmp1, tmp2;
3183
3184 __asm__ __volatile__ (
3185 "1: ldsw [%2], %0\n"
3186 " brlz,pn %0, 2f\n"
3187 -"4: add %0, 1, %1\n"
3188 +"4: addcc %0, 1, %1\n"
3189 +
3190 +#ifdef CONFIG_PAX_REFCOUNT
3191 +" tvs %%icc, 6\n"
3192 +#endif
3193 +
3194 " cas [%2], %0, %1\n"
3195 " cmp %0, %1\n"
3196 " bne,pn %%icc, 1b\n"
3197 @@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_r
3198 " .previous"
3199 : "=&r" (tmp1), "=&r" (tmp2)
3200 : "r" (lock)
3201 - : "memory");
3202 + : "memory", "cc");
3203 }
3204
3205 -static int inline arch_read_trylock(arch_rwlock_t *lock)
3206 +static inline int arch_read_trylock(arch_rwlock_t *lock)
3207 {
3208 int tmp1, tmp2;
3209
3210 @@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch
3211 "1: ldsw [%2], %0\n"
3212 " brlz,a,pn %0, 2f\n"
3213 " mov 0, %0\n"
3214 -" add %0, 1, %1\n"
3215 +" addcc %0, 1, %1\n"
3216 +
3217 +#ifdef CONFIG_PAX_REFCOUNT
3218 +" tvs %%icc, 6\n"
3219 +#endif
3220 +
3221 " cas [%2], %0, %1\n"
3222 " cmp %0, %1\n"
3223 " bne,pn %%icc, 1b\n"
3224 @@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch
3225 return tmp1;
3226 }
3227
3228 -static void inline arch_read_unlock(arch_rwlock_t *lock)
3229 +static inline void arch_read_unlock(arch_rwlock_t *lock)
3230 {
3231 unsigned long tmp1, tmp2;
3232
3233 __asm__ __volatile__(
3234 "1: lduw [%2], %0\n"
3235 -" sub %0, 1, %1\n"
3236 +" subcc %0, 1, %1\n"
3237 +
3238 +#ifdef CONFIG_PAX_REFCOUNT
3239 +" tvs %%icc, 6\n"
3240 +#endif
3241 +
3242 " cas [%2], %0, %1\n"
3243 " cmp %0, %1\n"
3244 " bne,pn %%xcc, 1b\n"
3245 @@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch
3246 : "memory");
3247 }
3248
3249 -static void inline arch_write_lock(arch_rwlock_t *lock)
3250 +static inline void arch_write_lock(arch_rwlock_t *lock)
3251 {
3252 unsigned long mask, tmp1, tmp2;
3253
3254 @@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_
3255 : "memory");
3256 }
3257
3258 -static void inline arch_write_unlock(arch_rwlock_t *lock)
3259 +static inline void arch_write_unlock(arch_rwlock_t *lock)
3260 {
3261 __asm__ __volatile__(
3262 " stw %%g0, [%0]"
3263 @@ -186,7 +201,7 @@ static void inline arch_write_unlock(arc
3264 : "memory");
3265 }
3266
3267 -static int inline arch_write_trylock(arch_rwlock_t *lock)
3268 +static inline int arch_write_trylock(arch_rwlock_t *lock)
3269 {
3270 unsigned long mask, tmp1, tmp2, result;
3271
3272 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h
3273 --- linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h 2011-05-19 00:06:34.000000000 -0400
3274 +++ linux-2.6.39.4/arch/sparc/include/asm/thread_info_32.h 2011-08-05 19:44:33.000000000 -0400
3275 @@ -50,6 +50,8 @@ struct thread_info {
3276 unsigned long w_saved;
3277
3278 struct restart_block restart_block;
3279 +
3280 + unsigned long lowest_stack;
3281 };
3282
3283 /*
3284 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h
3285 --- linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h 2011-05-19 00:06:34.000000000 -0400
3286 +++ linux-2.6.39.4/arch/sparc/include/asm/thread_info_64.h 2011-08-05 19:44:33.000000000 -0400
3287 @@ -63,6 +63,8 @@ struct thread_info {
3288 struct pt_regs *kern_una_regs;
3289 unsigned int kern_una_insn;
3290
3291 + unsigned long lowest_stack;
3292 +
3293 unsigned long fpregs[0] __attribute__ ((aligned(64)));
3294 };
3295
3296 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h
3297 --- linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h 2011-05-19 00:06:34.000000000 -0400
3298 +++ linux-2.6.39.4/arch/sparc/include/asm/uaccess_32.h 2011-08-05 19:44:33.000000000 -0400
3299 @@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
3300
3301 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3302 {
3303 - if (n && __access_ok((unsigned long) to, n))
3304 + if ((long)n < 0)
3305 + return n;
3306 +
3307 + if (n && __access_ok((unsigned long) to, n)) {
3308 + if (!__builtin_constant_p(n))
3309 + check_object_size(from, n, true);
3310 return __copy_user(to, (__force void __user *) from, n);
3311 - else
3312 + } else
3313 return n;
3314 }
3315
3316 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
3317 {
3318 + if ((long)n < 0)
3319 + return n;
3320 +
3321 + if (!__builtin_constant_p(n))
3322 + check_object_size(from, n, true);
3323 +
3324 return __copy_user(to, (__force void __user *) from, n);
3325 }
3326
3327 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3328 {
3329 - if (n && __access_ok((unsigned long) from, n))
3330 + if ((long)n < 0)
3331 + return n;
3332 +
3333 + if (n && __access_ok((unsigned long) from, n)) {
3334 + if (!__builtin_constant_p(n))
3335 + check_object_size(to, n, false);
3336 return __copy_user((__force void __user *) to, from, n);
3337 - else
3338 + } else
3339 return n;
3340 }
3341
3342 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
3343 {
3344 + if ((long)n < 0)
3345 + return n;
3346 +
3347 return __copy_user((__force void __user *) to, from, n);
3348 }
3349
3350 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h
3351 --- linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h 2011-05-19 00:06:34.000000000 -0400
3352 +++ linux-2.6.39.4/arch/sparc/include/asm/uaccess_64.h 2011-08-05 19:44:33.000000000 -0400
3353 @@ -10,6 +10,7 @@
3354 #include <linux/compiler.h>
3355 #include <linux/string.h>
3356 #include <linux/thread_info.h>
3357 +#include <linux/kernel.h>
3358 #include <asm/asi.h>
3359 #include <asm/system.h>
3360 #include <asm/spitfire.h>
3361 @@ -213,8 +214,15 @@ extern unsigned long copy_from_user_fixu
3362 static inline unsigned long __must_check
3363 copy_from_user(void *to, const void __user *from, unsigned long size)
3364 {
3365 - unsigned long ret = ___copy_from_user(to, from, size);
3366 + unsigned long ret;
3367
3368 + if ((long)size < 0 || size > INT_MAX)
3369 + return size;
3370 +
3371 + if (!__builtin_constant_p(size))
3372 + check_object_size(to, size, false);
3373 +
3374 + ret = ___copy_from_user(to, from, size);
3375 if (unlikely(ret))
3376 ret = copy_from_user_fixup(to, from, size);
3377
3378 @@ -230,8 +238,15 @@ extern unsigned long copy_to_user_fixup(
3379 static inline unsigned long __must_check
3380 copy_to_user(void __user *to, const void *from, unsigned long size)
3381 {
3382 - unsigned long ret = ___copy_to_user(to, from, size);
3383 + unsigned long ret;
3384 +
3385 + if ((long)size < 0 || size > INT_MAX)
3386 + return size;
3387 +
3388 + if (!__builtin_constant_p(size))
3389 + check_object_size(from, size, true);
3390
3391 + ret = ___copy_to_user(to, from, size);
3392 if (unlikely(ret))
3393 ret = copy_to_user_fixup(to, from, size);
3394 return ret;
3395 diff -urNp linux-2.6.39.4/arch/sparc/include/asm/uaccess.h linux-2.6.39.4/arch/sparc/include/asm/uaccess.h
3396 --- linux-2.6.39.4/arch/sparc/include/asm/uaccess.h 2011-05-19 00:06:34.000000000 -0400
3397 +++ linux-2.6.39.4/arch/sparc/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
3398 @@ -1,5 +1,13 @@
3399 #ifndef ___ASM_SPARC_UACCESS_H
3400 #define ___ASM_SPARC_UACCESS_H
3401 +
3402 +#ifdef __KERNEL__
3403 +#ifndef __ASSEMBLY__
3404 +#include <linux/types.h>
3405 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
3406 +#endif
3407 +#endif
3408 +
3409 #if defined(__sparc__) && defined(__arch64__)
3410 #include <asm/uaccess_64.h>
3411 #else
3412 diff -urNp linux-2.6.39.4/arch/sparc/kernel/Makefile linux-2.6.39.4/arch/sparc/kernel/Makefile
3413 --- linux-2.6.39.4/arch/sparc/kernel/Makefile 2011-05-19 00:06:34.000000000 -0400
3414 +++ linux-2.6.39.4/arch/sparc/kernel/Makefile 2011-08-05 19:44:33.000000000 -0400
3415 @@ -3,7 +3,7 @@
3416 #
3417
3418 asflags-y := -ansi
3419 -ccflags-y := -Werror
3420 +#ccflags-y := -Werror
3421
3422 extra-y := head_$(BITS).o
3423 extra-y += init_task.o
3424 diff -urNp linux-2.6.39.4/arch/sparc/kernel/process_32.c linux-2.6.39.4/arch/sparc/kernel/process_32.c
3425 --- linux-2.6.39.4/arch/sparc/kernel/process_32.c 2011-05-19 00:06:34.000000000 -0400
3426 +++ linux-2.6.39.4/arch/sparc/kernel/process_32.c 2011-08-05 19:44:33.000000000 -0400
3427 @@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
3428 rw->ins[4], rw->ins[5],
3429 rw->ins[6],
3430 rw->ins[7]);
3431 - printk("%pS\n", (void *) rw->ins[7]);
3432 + printk("%pA\n", (void *) rw->ins[7]);
3433 rw = (struct reg_window32 *) rw->ins[6];
3434 }
3435 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
3436 @@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
3437
3438 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
3439 r->psr, r->pc, r->npc, r->y, print_tainted());
3440 - printk("PC: <%pS>\n", (void *) r->pc);
3441 + printk("PC: <%pA>\n", (void *) r->pc);
3442 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3443 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
3444 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
3445 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3446 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
3447 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
3448 - printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
3449 + printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
3450
3451 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
3452 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
3453 @@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
3454 rw = (struct reg_window32 *) fp;
3455 pc = rw->ins[7];
3456 printk("[%08lx : ", pc);
3457 - printk("%pS ] ", (void *) pc);
3458 + printk("%pA ] ", (void *) pc);
3459 fp = rw->ins[6];
3460 } while (++count < 16);
3461 printk("\n");
3462 diff -urNp linux-2.6.39.4/arch/sparc/kernel/process_64.c linux-2.6.39.4/arch/sparc/kernel/process_64.c
3463 --- linux-2.6.39.4/arch/sparc/kernel/process_64.c 2011-05-19 00:06:34.000000000 -0400
3464 +++ linux-2.6.39.4/arch/sparc/kernel/process_64.c 2011-08-05 19:44:33.000000000 -0400
3465 @@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
3466 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
3467 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
3468 if (regs->tstate & TSTATE_PRIV)
3469 - printk("I7: <%pS>\n", (void *) rwk->ins[7]);
3470 + printk("I7: <%pA>\n", (void *) rwk->ins[7]);
3471 }
3472
3473 void show_regs(struct pt_regs *regs)
3474 {
3475 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
3476 regs->tpc, regs->tnpc, regs->y, print_tainted());
3477 - printk("TPC: <%pS>\n", (void *) regs->tpc);
3478 + printk("TPC: <%pA>\n", (void *) regs->tpc);
3479 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
3480 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
3481 regs->u_regs[3]);
3482 @@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
3483 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
3484 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
3485 regs->u_regs[15]);
3486 - printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
3487 + printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
3488 show_regwindow(regs);
3489 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
3490 }
3491 @@ -285,7 +285,7 @@ void arch_trigger_all_cpu_backtrace(void
3492 ((tp && tp->task) ? tp->task->pid : -1));
3493
3494 if (gp->tstate & TSTATE_PRIV) {
3495 - printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
3496 + printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
3497 (void *) gp->tpc,
3498 (void *) gp->o7,
3499 (void *) gp->i7,
3500 diff -urNp linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c
3501 --- linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c 2011-05-19 00:06:34.000000000 -0400
3502 +++ linux-2.6.39.4/arch/sparc/kernel/sys_sparc_32.c 2011-08-05 19:44:33.000000000 -0400
3503 @@ -56,7 +56,7 @@ unsigned long arch_get_unmapped_area(str
3504 if (ARCH_SUN4C && len > 0x20000000)
3505 return -ENOMEM;
3506 if (!addr)
3507 - addr = TASK_UNMAPPED_BASE;
3508 + addr = current->mm->mmap_base;
3509
3510 if (flags & MAP_SHARED)
3511 addr = COLOUR_ALIGN(addr);
3512 @@ -71,7 +71,7 @@ unsigned long arch_get_unmapped_area(str
3513 }
3514 if (TASK_SIZE - PAGE_SIZE - len < addr)
3515 return -ENOMEM;
3516 - if (!vmm || addr + len <= vmm->vm_start)
3517 + if (check_heap_stack_gap(vmm, addr, len))
3518 return addr;
3519 addr = vmm->vm_end;
3520 if (flags & MAP_SHARED)
3521 diff -urNp linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c
3522 --- linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c 2011-05-19 00:06:34.000000000 -0400
3523 +++ linux-2.6.39.4/arch/sparc/kernel/sys_sparc_64.c 2011-08-05 19:44:33.000000000 -0400
3524 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str
3525 /* We do not accept a shared mapping if it would violate
3526 * cache aliasing constraints.
3527 */
3528 - if ((flags & MAP_SHARED) &&
3529 + if ((filp || (flags & MAP_SHARED)) &&
3530 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3531 return -EINVAL;
3532 return addr;
3533 @@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str
3534 if (filp || (flags & MAP_SHARED))
3535 do_color_align = 1;
3536
3537 +#ifdef CONFIG_PAX_RANDMMAP
3538 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3539 +#endif
3540 +
3541 if (addr) {
3542 if (do_color_align)
3543 addr = COLOUR_ALIGN(addr, pgoff);
3544 @@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str
3545 addr = PAGE_ALIGN(addr);
3546
3547 vma = find_vma(mm, addr);
3548 - if (task_size - len >= addr &&
3549 - (!vma || addr + len <= vma->vm_start))
3550 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3551 return addr;
3552 }
3553
3554 if (len > mm->cached_hole_size) {
3555 - start_addr = addr = mm->free_area_cache;
3556 + start_addr = addr = mm->free_area_cache;
3557 } else {
3558 - start_addr = addr = TASK_UNMAPPED_BASE;
3559 + start_addr = addr = mm->mmap_base;
3560 mm->cached_hole_size = 0;
3561 }
3562
3563 @@ -174,14 +177,14 @@ full_search:
3564 vma = find_vma(mm, VA_EXCLUDE_END);
3565 }
3566 if (unlikely(task_size < addr)) {
3567 - if (start_addr != TASK_UNMAPPED_BASE) {
3568 - start_addr = addr = TASK_UNMAPPED_BASE;
3569 + if (start_addr != mm->mmap_base) {
3570 + start_addr = addr = mm->mmap_base;
3571 mm->cached_hole_size = 0;
3572 goto full_search;
3573 }
3574 return -ENOMEM;
3575 }
3576 - if (likely(!vma || addr + len <= vma->vm_start)) {
3577 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3578 /*
3579 * Remember the place where we stopped the search:
3580 */
3581 @@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi
3582 /* We do not accept a shared mapping if it would violate
3583 * cache aliasing constraints.
3584 */
3585 - if ((flags & MAP_SHARED) &&
3586 + if ((filp || (flags & MAP_SHARED)) &&
3587 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
3588 return -EINVAL;
3589 return addr;
3590 @@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi
3591 addr = PAGE_ALIGN(addr);
3592
3593 vma = find_vma(mm, addr);
3594 - if (task_size - len >= addr &&
3595 - (!vma || addr + len <= vma->vm_start))
3596 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
3597 return addr;
3598 }
3599
3600 @@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi
3601 /* make sure it can fit in the remaining address space */
3602 if (likely(addr > len)) {
3603 vma = find_vma(mm, addr-len);
3604 - if (!vma || addr <= vma->vm_start) {
3605 + if (check_heap_stack_gap(vma, addr - len, len)) {
3606 /* remember the address as a hint for next time */
3607 return (mm->free_area_cache = addr-len);
3608 }
3609 @@ -267,18 +269,18 @@ arch_get_unmapped_area_topdown(struct fi
3610 if (unlikely(mm->mmap_base < len))
3611 goto bottomup;
3612
3613 - addr = mm->mmap_base-len;
3614 - if (do_color_align)
3615 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3616 + addr = mm->mmap_base - len;
3617
3618 do {
3619 + if (do_color_align)
3620 + addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3621 /*
3622 * Lookup failure means no vma is above this address,
3623 * else if new region fits below vma->vm_start,
3624 * return with success:
3625 */
3626 vma = find_vma(mm, addr);
3627 - if (likely(!vma || addr+len <= vma->vm_start)) {
3628 + if (likely(check_heap_stack_gap(vma, addr, len))) {
3629 /* remember the address as a hint for next time */
3630 return (mm->free_area_cache = addr);
3631 }
3632 @@ -288,10 +290,8 @@ arch_get_unmapped_area_topdown(struct fi
3633 mm->cached_hole_size = vma->vm_start - addr;
3634
3635 /* try just below the current vma->vm_start */
3636 - addr = vma->vm_start-len;
3637 - if (do_color_align)
3638 - addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3639 - } while (likely(len < vma->vm_start));
3640 + addr = skip_heap_stack_gap(vma, len);
3641 + } while (!IS_ERR_VALUE(addr));
3642
3643 bottomup:
3644 /*
3645 @@ -390,6 +390,12 @@ void arch_pick_mmap_layout(struct mm_str
3646 gap == RLIM_INFINITY ||
3647 sysctl_legacy_va_layout) {
3648 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3649 +
3650 +#ifdef CONFIG_PAX_RANDMMAP
3651 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3652 + mm->mmap_base += mm->delta_mmap;
3653 +#endif
3654 +
3655 mm->get_unmapped_area = arch_get_unmapped_area;
3656 mm->unmap_area = arch_unmap_area;
3657 } else {
3658 @@ -402,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str
3659 gap = (task_size / 6 * 5);
3660
3661 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
3662 +
3663 +#ifdef CONFIG_PAX_RANDMMAP
3664 + if (mm->pax_flags & MF_PAX_RANDMMAP)
3665 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3666 +#endif
3667 +
3668 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3669 mm->unmap_area = arch_unmap_area_topdown;
3670 }
3671 diff -urNp linux-2.6.39.4/arch/sparc/kernel/traps_32.c linux-2.6.39.4/arch/sparc/kernel/traps_32.c
3672 --- linux-2.6.39.4/arch/sparc/kernel/traps_32.c 2011-05-19 00:06:34.000000000 -0400
3673 +++ linux-2.6.39.4/arch/sparc/kernel/traps_32.c 2011-08-05 19:44:33.000000000 -0400
3674 @@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
3675 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
3676 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
3677
3678 +extern void gr_handle_kernel_exploit(void);
3679 +
3680 void die_if_kernel(char *str, struct pt_regs *regs)
3681 {
3682 static int die_counter;
3683 @@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
3684 count++ < 30 &&
3685 (((unsigned long) rw) >= PAGE_OFFSET) &&
3686 !(((unsigned long) rw) & 0x7)) {
3687 - printk("Caller[%08lx]: %pS\n", rw->ins[7],
3688 + printk("Caller[%08lx]: %pA\n", rw->ins[7],
3689 (void *) rw->ins[7]);
3690 rw = (struct reg_window32 *)rw->ins[6];
3691 }
3692 }
3693 printk("Instruction DUMP:");
3694 instruction_dump ((unsigned long *) regs->pc);
3695 - if(regs->psr & PSR_PS)
3696 + if(regs->psr & PSR_PS) {
3697 + gr_handle_kernel_exploit();
3698 do_exit(SIGKILL);
3699 + }
3700 do_exit(SIGSEGV);
3701 }
3702
3703 diff -urNp linux-2.6.39.4/arch/sparc/kernel/traps_64.c linux-2.6.39.4/arch/sparc/kernel/traps_64.c
3704 --- linux-2.6.39.4/arch/sparc/kernel/traps_64.c 2011-05-19 00:06:34.000000000 -0400
3705 +++ linux-2.6.39.4/arch/sparc/kernel/traps_64.c 2011-08-05 19:44:33.000000000 -0400
3706 @@ -75,7 +75,7 @@ static void dump_tl1_traplog(struct tl1_
3707 i + 1,
3708 p->trapstack[i].tstate, p->trapstack[i].tpc,
3709 p->trapstack[i].tnpc, p->trapstack[i].tt);
3710 - printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
3711 + printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
3712 }
3713 }
3714
3715 @@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long
3716
3717 lvl -= 0x100;
3718 if (regs->tstate & TSTATE_PRIV) {
3719 +
3720 +#ifdef CONFIG_PAX_REFCOUNT
3721 + if (lvl == 6)
3722 + pax_report_refcount_overflow(regs);
3723 +#endif
3724 +
3725 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
3726 die_if_kernel(buffer, regs);
3727 }
3728 @@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long
3729 void bad_trap_tl1(struct pt_regs *regs, long lvl)
3730 {
3731 char buffer[32];
3732 -
3733 +
3734 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
3735 0, lvl, SIGTRAP) == NOTIFY_STOP)
3736 return;
3737
3738 +#ifdef CONFIG_PAX_REFCOUNT
3739 + if (lvl == 6)
3740 + pax_report_refcount_overflow(regs);
3741 +#endif
3742 +
3743 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
3744
3745 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
3746 @@ -1141,7 +1152,7 @@ static void cheetah_log_errors(struct pt
3747 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
3748 printk("%s" "ERROR(%d): ",
3749 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
3750 - printk("TPC<%pS>\n", (void *) regs->tpc);
3751 + printk("TPC<%pA>\n", (void *) regs->tpc);
3752 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
3753 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
3754 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
3755 @@ -1748,7 +1759,7 @@ void cheetah_plus_parity_error(int type,
3756 smp_processor_id(),
3757 (type & 0x1) ? 'I' : 'D',
3758 regs->tpc);
3759 - printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
3760 + printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
3761 panic("Irrecoverable Cheetah+ parity error.");
3762 }
3763
3764 @@ -1756,7 +1767,7 @@ void cheetah_plus_parity_error(int type,
3765 smp_processor_id(),
3766 (type & 0x1) ? 'I' : 'D',
3767 regs->tpc);
3768 - printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
3769 + printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
3770 }
3771
3772 struct sun4v_error_entry {
3773 @@ -1963,9 +1974,9 @@ void sun4v_itlb_error_report(struct pt_r
3774
3775 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
3776 regs->tpc, tl);
3777 - printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
3778 + printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
3779 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3780 - printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
3781 + printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
3782 (void *) regs->u_regs[UREG_I7]);
3783 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
3784 "pte[%lx] error[%lx]\n",
3785 @@ -1987,9 +1998,9 @@ void sun4v_dtlb_error_report(struct pt_r
3786
3787 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
3788 regs->tpc, tl);
3789 - printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
3790 + printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
3791 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
3792 - printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
3793 + printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
3794 (void *) regs->u_regs[UREG_I7]);
3795 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
3796 "pte[%lx] error[%lx]\n",
3797 @@ -2195,13 +2206,13 @@ void show_stack(struct task_struct *tsk,
3798 fp = (unsigned long)sf->fp + STACK_BIAS;
3799 }
3800
3801 - printk(" [%016lx] %pS\n", pc, (void *) pc);
3802 + printk(" [%016lx] %pA\n", pc, (void *) pc);
3803 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3804 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
3805 int index = tsk->curr_ret_stack;
3806 if (tsk->ret_stack && index >= graph) {
3807 pc = tsk->ret_stack[index - graph].ret;
3808 - printk(" [%016lx] %pS\n", pc, (void *) pc);
3809 + printk(" [%016lx] %pA\n", pc, (void *) pc);
3810 graph++;
3811 }
3812 }
3813 @@ -2226,6 +2237,8 @@ static inline struct reg_window *kernel_
3814 return (struct reg_window *) (fp + STACK_BIAS);
3815 }
3816
3817 +extern void gr_handle_kernel_exploit(void);
3818 +
3819 void die_if_kernel(char *str, struct pt_regs *regs)
3820 {
3821 static int die_counter;
3822 @@ -2254,7 +2267,7 @@ void die_if_kernel(char *str, struct pt_
3823 while (rw &&
3824 count++ < 30 &&
3825 kstack_valid(tp, (unsigned long) rw)) {
3826 - printk("Caller[%016lx]: %pS\n", rw->ins[7],
3827 + printk("Caller[%016lx]: %pA\n", rw->ins[7],
3828 (void *) rw->ins[7]);
3829
3830 rw = kernel_stack_up(rw);
3831 @@ -2267,8 +2280,10 @@ void die_if_kernel(char *str, struct pt_
3832 }
3833 user_instruction_dump ((unsigned int __user *) regs->tpc);
3834 }
3835 - if (regs->tstate & TSTATE_PRIV)
3836 + if (regs->tstate & TSTATE_PRIV) {
3837 + gr_handle_kernel_exploit();
3838 do_exit(SIGKILL);
3839 + }
3840 do_exit(SIGSEGV);
3841 }
3842 EXPORT_SYMBOL(die_if_kernel);
3843 diff -urNp linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c
3844 --- linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c 2011-05-19 00:06:34.000000000 -0400
3845 +++ linux-2.6.39.4/arch/sparc/kernel/unaligned_64.c 2011-08-05 19:44:33.000000000 -0400
3846 @@ -278,7 +278,7 @@ static void log_unaligned(struct pt_regs
3847 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
3848
3849 if (__ratelimit(&ratelimit)) {
3850 - printk("Kernel unaligned access at TPC[%lx] %pS\n",
3851 + printk("Kernel unaligned access at TPC[%lx] %pA\n",
3852 regs->tpc, (void *) regs->tpc);
3853 }
3854 }
3855 diff -urNp linux-2.6.39.4/arch/sparc/lib/atomic_64.S linux-2.6.39.4/arch/sparc/lib/atomic_64.S
3856 --- linux-2.6.39.4/arch/sparc/lib/atomic_64.S 2011-05-19 00:06:34.000000000 -0400
3857 +++ linux-2.6.39.4/arch/sparc/lib/atomic_64.S 2011-08-05 19:44:33.000000000 -0400
3858 @@ -18,7 +18,12 @@
3859 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
3860 BACKOFF_SETUP(%o2)
3861 1: lduw [%o1], %g1
3862 - add %g1, %o0, %g7
3863 + addcc %g1, %o0, %g7
3864 +
3865 +#ifdef CONFIG_PAX_REFCOUNT
3866 + tvs %icc, 6
3867 +#endif
3868 +
3869 cas [%o1], %g1, %g7
3870 cmp %g1, %g7
3871 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3872 @@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
3873 2: BACKOFF_SPIN(%o2, %o3, 1b)
3874 .size atomic_add, .-atomic_add
3875
3876 + .globl atomic_add_unchecked
3877 + .type atomic_add_unchecked,#function
3878 +atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3879 + BACKOFF_SETUP(%o2)
3880 +1: lduw [%o1], %g1
3881 + add %g1, %o0, %g7
3882 + cas [%o1], %g1, %g7
3883 + cmp %g1, %g7
3884 + bne,pn %icc, 2f
3885 + nop
3886 + retl
3887 + nop
3888 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3889 + .size atomic_add_unchecked, .-atomic_add_unchecked
3890 +
3891 .globl atomic_sub
3892 .type atomic_sub,#function
3893 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
3894 BACKOFF_SETUP(%o2)
3895 1: lduw [%o1], %g1
3896 - sub %g1, %o0, %g7
3897 + subcc %g1, %o0, %g7
3898 +
3899 +#ifdef CONFIG_PAX_REFCOUNT
3900 + tvs %icc, 6
3901 +#endif
3902 +
3903 cas [%o1], %g1, %g7
3904 cmp %g1, %g7
3905 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3906 @@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
3907 2: BACKOFF_SPIN(%o2, %o3, 1b)
3908 .size atomic_sub, .-atomic_sub
3909
3910 + .globl atomic_sub_unchecked
3911 + .type atomic_sub_unchecked,#function
3912 +atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
3913 + BACKOFF_SETUP(%o2)
3914 +1: lduw [%o1], %g1
3915 + sub %g1, %o0, %g7
3916 + cas [%o1], %g1, %g7
3917 + cmp %g1, %g7
3918 + bne,pn %icc, 2f
3919 + nop
3920 + retl
3921 + nop
3922 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3923 + .size atomic_sub_unchecked, .-atomic_sub_unchecked
3924 +
3925 .globl atomic_add_ret
3926 .type atomic_add_ret,#function
3927 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
3928 BACKOFF_SETUP(%o2)
3929 1: lduw [%o1], %g1
3930 - add %g1, %o0, %g7
3931 + addcc %g1, %o0, %g7
3932 +
3933 +#ifdef CONFIG_PAX_REFCOUNT
3934 + tvs %icc, 6
3935 +#endif
3936 +
3937 cas [%o1], %g1, %g7
3938 cmp %g1, %g7
3939 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3940 @@ -58,12 +103,33 @@ atomic_add_ret: /* %o0 = increment, %o1
3941 2: BACKOFF_SPIN(%o2, %o3, 1b)
3942 .size atomic_add_ret, .-atomic_add_ret
3943
3944 + .globl atomic_add_ret_unchecked
3945 + .type atomic_add_ret_unchecked,#function
3946 +atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3947 + BACKOFF_SETUP(%o2)
3948 +1: lduw [%o1], %g1
3949 + addcc %g1, %o0, %g7
3950 + cas [%o1], %g1, %g7
3951 + cmp %g1, %g7
3952 + bne,pn %icc, 2f
3953 + add %g7, %o0, %g7
3954 + sra %g7, 0, %o0
3955 + retl
3956 + nop
3957 +2: BACKOFF_SPIN(%o2, %o3, 1b)
3958 + .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
3959 +
3960 .globl atomic_sub_ret
3961 .type atomic_sub_ret,#function
3962 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
3963 BACKOFF_SETUP(%o2)
3964 1: lduw [%o1], %g1
3965 - sub %g1, %o0, %g7
3966 + subcc %g1, %o0, %g7
3967 +
3968 +#ifdef CONFIG_PAX_REFCOUNT
3969 + tvs %icc, 6
3970 +#endif
3971 +
3972 cas [%o1], %g1, %g7
3973 cmp %g1, %g7
3974 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
3975 @@ -78,7 +144,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
3976 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
3977 BACKOFF_SETUP(%o2)
3978 1: ldx [%o1], %g1
3979 - add %g1, %o0, %g7
3980 + addcc %g1, %o0, %g7
3981 +
3982 +#ifdef CONFIG_PAX_REFCOUNT
3983 + tvs %xcc, 6
3984 +#endif
3985 +
3986 casx [%o1], %g1, %g7
3987 cmp %g1, %g7
3988 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
3989 @@ -88,12 +159,32 @@ atomic64_add: /* %o0 = increment, %o1 =
3990 2: BACKOFF_SPIN(%o2, %o3, 1b)
3991 .size atomic64_add, .-atomic64_add
3992
3993 + .globl atomic64_add_unchecked
3994 + .type atomic64_add_unchecked,#function
3995 +atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
3996 + BACKOFF_SETUP(%o2)
3997 +1: ldx [%o1], %g1
3998 + addcc %g1, %o0, %g7
3999 + casx [%o1], %g1, %g7
4000 + cmp %g1, %g7
4001 + bne,pn %xcc, 2f
4002 + nop
4003 + retl
4004 + nop
4005 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4006 + .size atomic64_add_unchecked, .-atomic64_add_unchecked
4007 +
4008 .globl atomic64_sub
4009 .type atomic64_sub,#function
4010 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4011 BACKOFF_SETUP(%o2)
4012 1: ldx [%o1], %g1
4013 - sub %g1, %o0, %g7
4014 + subcc %g1, %o0, %g7
4015 +
4016 +#ifdef CONFIG_PAX_REFCOUNT
4017 + tvs %xcc, 6
4018 +#endif
4019 +
4020 casx [%o1], %g1, %g7
4021 cmp %g1, %g7
4022 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4023 @@ -103,12 +194,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4024 2: BACKOFF_SPIN(%o2, %o3, 1b)
4025 .size atomic64_sub, .-atomic64_sub
4026
4027 + .globl atomic64_sub_unchecked
4028 + .type atomic64_sub_unchecked,#function
4029 +atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4030 + BACKOFF_SETUP(%o2)
4031 +1: ldx [%o1], %g1
4032 + subcc %g1, %o0, %g7
4033 + casx [%o1], %g1, %g7
4034 + cmp %g1, %g7
4035 + bne,pn %xcc, 2f
4036 + nop
4037 + retl
4038 + nop
4039 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4040 + .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4041 +
4042 .globl atomic64_add_ret
4043 .type atomic64_add_ret,#function
4044 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4045 BACKOFF_SETUP(%o2)
4046 1: ldx [%o1], %g1
4047 - add %g1, %o0, %g7
4048 + addcc %g1, %o0, %g7
4049 +
4050 +#ifdef CONFIG_PAX_REFCOUNT
4051 + tvs %xcc, 6
4052 +#endif
4053 +
4054 casx [%o1], %g1, %g7
4055 cmp %g1, %g7
4056 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4057 @@ -118,12 +229,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4058 2: BACKOFF_SPIN(%o2, %o3, 1b)
4059 .size atomic64_add_ret, .-atomic64_add_ret
4060
4061 + .globl atomic64_add_ret_unchecked
4062 + .type atomic64_add_ret_unchecked,#function
4063 +atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4064 + BACKOFF_SETUP(%o2)
4065 +1: ldx [%o1], %g1
4066 + addcc %g1, %o0, %g7
4067 + casx [%o1], %g1, %g7
4068 + cmp %g1, %g7
4069 + bne,pn %xcc, 2f
4070 + add %g7, %o0, %g7
4071 + mov %g7, %o0
4072 + retl
4073 + nop
4074 +2: BACKOFF_SPIN(%o2, %o3, 1b)
4075 + .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4076 +
4077 .globl atomic64_sub_ret
4078 .type atomic64_sub_ret,#function
4079 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4080 BACKOFF_SETUP(%o2)
4081 1: ldx [%o1], %g1
4082 - sub %g1, %o0, %g7
4083 + subcc %g1, %o0, %g7
4084 +
4085 +#ifdef CONFIG_PAX_REFCOUNT
4086 + tvs %xcc, 6
4087 +#endif
4088 +
4089 casx [%o1], %g1, %g7
4090 cmp %g1, %g7
4091 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
4092 diff -urNp linux-2.6.39.4/arch/sparc/lib/ksyms.c linux-2.6.39.4/arch/sparc/lib/ksyms.c
4093 --- linux-2.6.39.4/arch/sparc/lib/ksyms.c 2011-05-19 00:06:34.000000000 -0400
4094 +++ linux-2.6.39.4/arch/sparc/lib/ksyms.c 2011-08-05 19:44:33.000000000 -0400
4095 @@ -142,12 +142,17 @@ EXPORT_SYMBOL(__downgrade_write);
4096
4097 /* Atomic counter implementation. */
4098 EXPORT_SYMBOL(atomic_add);
4099 +EXPORT_SYMBOL(atomic_add_unchecked);
4100 EXPORT_SYMBOL(atomic_add_ret);
4101 EXPORT_SYMBOL(atomic_sub);
4102 +EXPORT_SYMBOL(atomic_sub_unchecked);
4103 EXPORT_SYMBOL(atomic_sub_ret);
4104 EXPORT_SYMBOL(atomic64_add);
4105 +EXPORT_SYMBOL(atomic64_add_unchecked);
4106 EXPORT_SYMBOL(atomic64_add_ret);
4107 +EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4108 EXPORT_SYMBOL(atomic64_sub);
4109 +EXPORT_SYMBOL(atomic64_sub_unchecked);
4110 EXPORT_SYMBOL(atomic64_sub_ret);
4111
4112 /* Atomic bit operations. */
4113 diff -urNp linux-2.6.39.4/arch/sparc/lib/Makefile linux-2.6.39.4/arch/sparc/lib/Makefile
4114 --- linux-2.6.39.4/arch/sparc/lib/Makefile 2011-05-19 00:06:34.000000000 -0400
4115 +++ linux-2.6.39.4/arch/sparc/lib/Makefile 2011-08-05 19:44:33.000000000 -0400
4116 @@ -2,7 +2,7 @@
4117 #
4118
4119 asflags-y := -ansi -DST_DIV0=0x02
4120 -ccflags-y := -Werror
4121 +#ccflags-y := -Werror
4122
4123 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4124 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4125 diff -urNp linux-2.6.39.4/arch/sparc/Makefile linux-2.6.39.4/arch/sparc/Makefile
4126 --- linux-2.6.39.4/arch/sparc/Makefile 2011-05-19 00:06:34.000000000 -0400
4127 +++ linux-2.6.39.4/arch/sparc/Makefile 2011-08-05 19:44:33.000000000 -0400
4128 @@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
4129 # Export what is needed by arch/sparc/boot/Makefile
4130 export VMLINUX_INIT VMLINUX_MAIN
4131 VMLINUX_INIT := $(head-y) $(init-y)
4132 -VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4133 +VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4134 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4135 VMLINUX_MAIN += $(drivers-y) $(net-y)
4136
4137 diff -urNp linux-2.6.39.4/arch/sparc/mm/fault_32.c linux-2.6.39.4/arch/sparc/mm/fault_32.c
4138 --- linux-2.6.39.4/arch/sparc/mm/fault_32.c 2011-05-19 00:06:34.000000000 -0400
4139 +++ linux-2.6.39.4/arch/sparc/mm/fault_32.c 2011-08-05 19:44:33.000000000 -0400
4140 @@ -22,6 +22,9 @@
4141 #include <linux/interrupt.h>
4142 #include <linux/module.h>
4143 #include <linux/kdebug.h>
4144 +#include <linux/slab.h>
4145 +#include <linux/pagemap.h>
4146 +#include <linux/compiler.h>
4147
4148 #include <asm/system.h>
4149 #include <asm/page.h>
4150 @@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str
4151 return safe_compute_effective_address(regs, insn);
4152 }
4153
4154 +#ifdef CONFIG_PAX_PAGEEXEC
4155 +#ifdef CONFIG_PAX_DLRESOLVE
4156 +static void pax_emuplt_close(struct vm_area_struct *vma)
4157 +{
4158 + vma->vm_mm->call_dl_resolve = 0UL;
4159 +}
4160 +
4161 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4162 +{
4163 + unsigned int *kaddr;
4164 +
4165 + vmf->page = alloc_page(GFP_HIGHUSER);
4166 + if (!vmf->page)
4167 + return VM_FAULT_OOM;
4168 +
4169 + kaddr = kmap(vmf->page);
4170 + memset(kaddr, 0, PAGE_SIZE);
4171 + kaddr[0] = 0x9DE3BFA8U; /* save */
4172 + flush_dcache_page(vmf->page);
4173 + kunmap(vmf->page);
4174 + return VM_FAULT_MAJOR;
4175 +}
4176 +
4177 +static const struct vm_operations_struct pax_vm_ops = {
4178 + .close = pax_emuplt_close,
4179 + .fault = pax_emuplt_fault
4180 +};
4181 +
4182 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4183 +{
4184 + int ret;
4185 +
4186 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4187 + vma->vm_mm = current->mm;
4188 + vma->vm_start = addr;
4189 + vma->vm_end = addr + PAGE_SIZE;
4190 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4191 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4192 + vma->vm_ops = &pax_vm_ops;
4193 +
4194 + ret = insert_vm_struct(current->mm, vma);
4195 + if (ret)
4196 + return ret;
4197 +
4198 + ++current->mm->total_vm;
4199 + return 0;
4200 +}
4201 +#endif
4202 +
4203 +/*
4204 + * PaX: decide what to do with offenders (regs->pc = fault address)
4205 + *
4206 + * returns 1 when task should be killed
4207 + * 2 when patched PLT trampoline was detected
4208 + * 3 when unpatched PLT trampoline was detected
4209 + */
4210 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4211 +{
4212 +
4213 +#ifdef CONFIG_PAX_EMUPLT
4214 + int err;
4215 +
4216 + do { /* PaX: patched PLT emulation #1 */
4217 + unsigned int sethi1, sethi2, jmpl;
4218 +
4219 + err = get_user(sethi1, (unsigned int *)regs->pc);
4220 + err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
4221 + err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
4222 +
4223 + if (err)
4224 + break;
4225 +
4226 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4227 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4228 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4229 + {
4230 + unsigned int addr;
4231 +
4232 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4233 + addr = regs->u_regs[UREG_G1];
4234 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4235 + regs->pc = addr;
4236 + regs->npc = addr+4;
4237 + return 2;
4238 + }
4239 + } while (0);
4240 +
4241 + { /* PaX: patched PLT emulation #2 */
4242 + unsigned int ba;
4243 +
4244 + err = get_user(ba, (unsigned int *)regs->pc);
4245 +
4246 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4247 + unsigned int addr;
4248 +
4249 + addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4250 + regs->pc = addr;
4251 + regs->npc = addr+4;
4252 + return 2;
4253 + }
4254 + }
4255 +
4256 + do { /* PaX: patched PLT emulation #3 */
4257 + unsigned int sethi, jmpl, nop;
4258 +
4259 + err = get_user(sethi, (unsigned int *)regs->pc);
4260 + err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
4261 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4262 +
4263 + if (err)
4264 + break;
4265 +
4266 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4267 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4268 + nop == 0x01000000U)
4269 + {
4270 + unsigned int addr;
4271 +
4272 + addr = (sethi & 0x003FFFFFU) << 10;
4273 + regs->u_regs[UREG_G1] = addr;
4274 + addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4275 + regs->pc = addr;
4276 + regs->npc = addr+4;
4277 + return 2;
4278 + }
4279 + } while (0);
4280 +
4281 + do { /* PaX: unpatched PLT emulation step 1 */
4282 + unsigned int sethi, ba, nop;
4283 +
4284 + err = get_user(sethi, (unsigned int *)regs->pc);
4285 + err |= get_user(ba, (unsigned int *)(regs->pc+4));
4286 + err |= get_user(nop, (unsigned int *)(regs->pc+8));
4287 +
4288 + if (err)
4289 + break;
4290 +
4291 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4292 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4293 + nop == 0x01000000U)
4294 + {
4295 + unsigned int addr, save, call;
4296 +
4297 + if ((ba & 0xFFC00000U) == 0x30800000U)
4298 + addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
4299 + else
4300 + addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
4301 +
4302 + err = get_user(save, (unsigned int *)addr);
4303 + err |= get_user(call, (unsigned int *)(addr+4));
4304 + err |= get_user(nop, (unsigned int *)(addr+8));
4305 + if (err)
4306 + break;
4307 +
4308 +#ifdef CONFIG_PAX_DLRESOLVE
4309 + if (save == 0x9DE3BFA8U &&
4310 + (call & 0xC0000000U) == 0x40000000U &&
4311 + nop == 0x01000000U)
4312 + {
4313 + struct vm_area_struct *vma;
4314 + unsigned long call_dl_resolve;
4315 +
4316 + down_read(&current->mm->mmap_sem);
4317 + call_dl_resolve = current->mm->call_dl_resolve;
4318 + up_read(&current->mm->mmap_sem);
4319 + if (likely(call_dl_resolve))
4320 + goto emulate;
4321 +
4322 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4323 +
4324 + down_write(&current->mm->mmap_sem);
4325 + if (current->mm->call_dl_resolve) {
4326 + call_dl_resolve = current->mm->call_dl_resolve;
4327 + up_write(&current->mm->mmap_sem);
4328 + if (vma)
4329 + kmem_cache_free(vm_area_cachep, vma);
4330 + goto emulate;
4331 + }
4332 +
4333 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4334 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4335 + up_write(&current->mm->mmap_sem);
4336 + if (vma)
4337 + kmem_cache_free(vm_area_cachep, vma);
4338 + return 1;
4339 + }
4340 +
4341 + if (pax_insert_vma(vma, call_dl_resolve)) {
4342 + up_write(&current->mm->mmap_sem);
4343 + kmem_cache_free(vm_area_cachep, vma);
4344 + return 1;
4345 + }
4346 +
4347 + current->mm->call_dl_resolve = call_dl_resolve;
4348 + up_write(&current->mm->mmap_sem);
4349 +
4350 +emulate:
4351 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4352 + regs->pc = call_dl_resolve;
4353 + regs->npc = addr+4;
4354 + return 3;
4355 + }
4356 +#endif
4357 +
4358 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4359 + if ((save & 0xFFC00000U) == 0x05000000U &&
4360 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4361 + nop == 0x01000000U)
4362 + {
4363 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4364 + regs->u_regs[UREG_G2] = addr + 4;
4365 + addr = (save & 0x003FFFFFU) << 10;
4366 + addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
4367 + regs->pc = addr;
4368 + regs->npc = addr+4;
4369 + return 3;
4370 + }
4371 + }
4372 + } while (0);
4373 +
4374 + do { /* PaX: unpatched PLT emulation step 2 */
4375 + unsigned int save, call, nop;
4376 +
4377 + err = get_user(save, (unsigned int *)(regs->pc-4));
4378 + err |= get_user(call, (unsigned int *)regs->pc);
4379 + err |= get_user(nop, (unsigned int *)(regs->pc+4));
4380 + if (err)
4381 + break;
4382 +
4383 + if (save == 0x9DE3BFA8U &&
4384 + (call & 0xC0000000U) == 0x40000000U &&
4385 + nop == 0x01000000U)
4386 + {
4387 + unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
4388 +
4389 + regs->u_regs[UREG_RETPC] = regs->pc;
4390 + regs->pc = dl_resolve;
4391 + regs->npc = dl_resolve+4;
4392 + return 3;
4393 + }
4394 + } while (0);
4395 +#endif
4396 +
4397 + return 1;
4398 +}
4399 +
4400 +void pax_report_insns(void *pc, void *sp)
4401 +{
4402 + unsigned long i;
4403 +
4404 + printk(KERN_ERR "PAX: bytes at PC: ");
4405 + for (i = 0; i < 8; i++) {
4406 + unsigned int c;
4407 + if (get_user(c, (unsigned int *)pc+i))
4408 + printk(KERN_CONT "???????? ");
4409 + else
4410 + printk(KERN_CONT "%08x ", c);
4411 + }
4412 + printk("\n");
4413 +}
4414 +#endif
4415 +
4416 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
4417 int text_fault)
4418 {
4419 @@ -281,6 +546,24 @@ good_area:
4420 if(!(vma->vm_flags & VM_WRITE))
4421 goto bad_area;
4422 } else {
4423 +
4424 +#ifdef CONFIG_PAX_PAGEEXEC
4425 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
4426 + up_read(&mm->mmap_sem);
4427 + switch (pax_handle_fetch_fault(regs)) {
4428 +
4429 +#ifdef CONFIG_PAX_EMUPLT
4430 + case 2:
4431 + case 3:
4432 + return;
4433 +#endif
4434 +
4435 + }
4436 + pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
4437 + do_group_exit(SIGKILL);
4438 + }
4439 +#endif
4440 +
4441 /* Allow reads even for write-only mappings */
4442 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
4443 goto bad_area;
4444 diff -urNp linux-2.6.39.4/arch/sparc/mm/fault_64.c linux-2.6.39.4/arch/sparc/mm/fault_64.c
4445 --- linux-2.6.39.4/arch/sparc/mm/fault_64.c 2011-05-19 00:06:34.000000000 -0400
4446 +++ linux-2.6.39.4/arch/sparc/mm/fault_64.c 2011-08-05 19:44:33.000000000 -0400
4447 @@ -21,6 +21,9 @@
4448 #include <linux/kprobes.h>
4449 #include <linux/kdebug.h>
4450 #include <linux/percpu.h>
4451 +#include <linux/slab.h>
4452 +#include <linux/pagemap.h>
4453 +#include <linux/compiler.h>
4454
4455 #include <asm/page.h>
4456 #include <asm/pgtable.h>
4457 @@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(stru
4458 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
4459 regs->tpc);
4460 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
4461 - printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
4462 + printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
4463 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
4464 dump_stack();
4465 unhandled_fault(regs->tpc, current, regs);
4466 @@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b
4467 show_regs(regs);
4468 }
4469
4470 +#ifdef CONFIG_PAX_PAGEEXEC
4471 +#ifdef CONFIG_PAX_DLRESOLVE
4472 +static void pax_emuplt_close(struct vm_area_struct *vma)
4473 +{
4474 + vma->vm_mm->call_dl_resolve = 0UL;
4475 +}
4476 +
4477 +static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
4478 +{
4479 + unsigned int *kaddr;
4480 +
4481 + vmf->page = alloc_page(GFP_HIGHUSER);
4482 + if (!vmf->page)
4483 + return VM_FAULT_OOM;
4484 +
4485 + kaddr = kmap(vmf->page);
4486 + memset(kaddr, 0, PAGE_SIZE);
4487 + kaddr[0] = 0x9DE3BFA8U; /* save */
4488 + flush_dcache_page(vmf->page);
4489 + kunmap(vmf->page);
4490 + return VM_FAULT_MAJOR;
4491 +}
4492 +
4493 +static const struct vm_operations_struct pax_vm_ops = {
4494 + .close = pax_emuplt_close,
4495 + .fault = pax_emuplt_fault
4496 +};
4497 +
4498 +static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
4499 +{
4500 + int ret;
4501 +
4502 + INIT_LIST_HEAD(&vma->anon_vma_chain);
4503 + vma->vm_mm = current->mm;
4504 + vma->vm_start = addr;
4505 + vma->vm_end = addr + PAGE_SIZE;
4506 + vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
4507 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
4508 + vma->vm_ops = &pax_vm_ops;
4509 +
4510 + ret = insert_vm_struct(current->mm, vma);
4511 + if (ret)
4512 + return ret;
4513 +
4514 + ++current->mm->total_vm;
4515 + return 0;
4516 +}
4517 +#endif
4518 +
4519 +/*
4520 + * PaX: decide what to do with offenders (regs->tpc = fault address)
4521 + *
4522 + * returns 1 when task should be killed
4523 + * 2 when patched PLT trampoline was detected
4524 + * 3 when unpatched PLT trampoline was detected
4525 + */
4526 +static int pax_handle_fetch_fault(struct pt_regs *regs)
4527 +{
4528 +
4529 +#ifdef CONFIG_PAX_EMUPLT
4530 + int err;
4531 +
4532 + do { /* PaX: patched PLT emulation #1 */
4533 + unsigned int sethi1, sethi2, jmpl;
4534 +
4535 + err = get_user(sethi1, (unsigned int *)regs->tpc);
4536 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
4537 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
4538 +
4539 + if (err)
4540 + break;
4541 +
4542 + if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
4543 + (sethi2 & 0xFFC00000U) == 0x03000000U &&
4544 + (jmpl & 0xFFFFE000U) == 0x81C06000U)
4545 + {
4546 + unsigned long addr;
4547 +
4548 + regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
4549 + addr = regs->u_regs[UREG_G1];
4550 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4551 +
4552 + if (test_thread_flag(TIF_32BIT))
4553 + addr &= 0xFFFFFFFFUL;
4554 +
4555 + regs->tpc = addr;
4556 + regs->tnpc = addr+4;
4557 + return 2;
4558 + }
4559 + } while (0);
4560 +
4561 + { /* PaX: patched PLT emulation #2 */
4562 + unsigned int ba;
4563 +
4564 + err = get_user(ba, (unsigned int *)regs->tpc);
4565 +
4566 + if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
4567 + unsigned long addr;
4568 +
4569 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4570 +
4571 + if (test_thread_flag(TIF_32BIT))
4572 + addr &= 0xFFFFFFFFUL;
4573 +
4574 + regs->tpc = addr;
4575 + regs->tnpc = addr+4;
4576 + return 2;
4577 + }
4578 + }
4579 +
4580 + do { /* PaX: patched PLT emulation #3 */
4581 + unsigned int sethi, jmpl, nop;
4582 +
4583 + err = get_user(sethi, (unsigned int *)regs->tpc);
4584 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
4585 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4586 +
4587 + if (err)
4588 + break;
4589 +
4590 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4591 + (jmpl & 0xFFFFE000U) == 0x81C06000U &&
4592 + nop == 0x01000000U)
4593 + {
4594 + unsigned long addr;
4595 +
4596 + addr = (sethi & 0x003FFFFFU) << 10;
4597 + regs->u_regs[UREG_G1] = addr;
4598 + addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4599 +
4600 + if (test_thread_flag(TIF_32BIT))
4601 + addr &= 0xFFFFFFFFUL;
4602 +
4603 + regs->tpc = addr;
4604 + regs->tnpc = addr+4;
4605 + return 2;
4606 + }
4607 + } while (0);
4608 +
4609 + do { /* PaX: patched PLT emulation #4 */
4610 + unsigned int sethi, mov1, call, mov2;
4611 +
4612 + err = get_user(sethi, (unsigned int *)regs->tpc);
4613 + err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
4614 + err |= get_user(call, (unsigned int *)(regs->tpc+8));
4615 + err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
4616 +
4617 + if (err)
4618 + break;
4619 +
4620 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4621 + mov1 == 0x8210000FU &&
4622 + (call & 0xC0000000U) == 0x40000000U &&
4623 + mov2 == 0x9E100001U)
4624 + {
4625 + unsigned long addr;
4626 +
4627 + regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
4628 + addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4629 +
4630 + if (test_thread_flag(TIF_32BIT))
4631 + addr &= 0xFFFFFFFFUL;
4632 +
4633 + regs->tpc = addr;
4634 + regs->tnpc = addr+4;
4635 + return 2;
4636 + }
4637 + } while (0);
4638 +
4639 + do { /* PaX: patched PLT emulation #5 */
4640 + unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
4641 +
4642 + err = get_user(sethi, (unsigned int *)regs->tpc);
4643 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4644 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4645 + err |= get_user(or1, (unsigned int *)(regs->tpc+12));
4646 + err |= get_user(or2, (unsigned int *)(regs->tpc+16));
4647 + err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
4648 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
4649 + err |= get_user(nop, (unsigned int *)(regs->tpc+28));
4650 +
4651 + if (err)
4652 + break;
4653 +
4654 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4655 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4656 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4657 + (or1 & 0xFFFFE000U) == 0x82106000U &&
4658 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
4659 + sllx == 0x83287020U &&
4660 + jmpl == 0x81C04005U &&
4661 + nop == 0x01000000U)
4662 + {
4663 + unsigned long addr;
4664 +
4665 + regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4666 + regs->u_regs[UREG_G1] <<= 32;
4667 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4668 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4669 + regs->tpc = addr;
4670 + regs->tnpc = addr+4;
4671 + return 2;
4672 + }
4673 + } while (0);
4674 +
4675 + do { /* PaX: patched PLT emulation #6 */
4676 + unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
4677 +
4678 + err = get_user(sethi, (unsigned int *)regs->tpc);
4679 + err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
4680 + err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
4681 + err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
4682 + err |= get_user(or, (unsigned int *)(regs->tpc+16));
4683 + err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
4684 + err |= get_user(nop, (unsigned int *)(regs->tpc+24));
4685 +
4686 + if (err)
4687 + break;
4688 +
4689 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4690 + (sethi1 & 0xFFC00000U) == 0x03000000U &&
4691 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4692 + sllx == 0x83287020U &&
4693 + (or & 0xFFFFE000U) == 0x8A116000U &&
4694 + jmpl == 0x81C04005U &&
4695 + nop == 0x01000000U)
4696 + {
4697 + unsigned long addr;
4698 +
4699 + regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
4700 + regs->u_regs[UREG_G1] <<= 32;
4701 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
4702 + addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
4703 + regs->tpc = addr;
4704 + regs->tnpc = addr+4;
4705 + return 2;
4706 + }
4707 + } while (0);
4708 +
4709 + do { /* PaX: unpatched PLT emulation step 1 */
4710 + unsigned int sethi, ba, nop;
4711 +
4712 + err = get_user(sethi, (unsigned int *)regs->tpc);
4713 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4714 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4715 +
4716 + if (err)
4717 + break;
4718 +
4719 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4720 + ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
4721 + nop == 0x01000000U)
4722 + {
4723 + unsigned long addr;
4724 + unsigned int save, call;
4725 + unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
4726 +
4727 + if ((ba & 0xFFC00000U) == 0x30800000U)
4728 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
4729 + else
4730 + addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4731 +
4732 + if (test_thread_flag(TIF_32BIT))
4733 + addr &= 0xFFFFFFFFUL;
4734 +
4735 + err = get_user(save, (unsigned int *)addr);
4736 + err |= get_user(call, (unsigned int *)(addr+4));
4737 + err |= get_user(nop, (unsigned int *)(addr+8));
4738 + if (err)
4739 + break;
4740 +
4741 +#ifdef CONFIG_PAX_DLRESOLVE
4742 + if (save == 0x9DE3BFA8U &&
4743 + (call & 0xC0000000U) == 0x40000000U &&
4744 + nop == 0x01000000U)
4745 + {
4746 + struct vm_area_struct *vma;
4747 + unsigned long call_dl_resolve;
4748 +
4749 + down_read(&current->mm->mmap_sem);
4750 + call_dl_resolve = current->mm->call_dl_resolve;
4751 + up_read(&current->mm->mmap_sem);
4752 + if (likely(call_dl_resolve))
4753 + goto emulate;
4754 +
4755 + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
4756 +
4757 + down_write(&current->mm->mmap_sem);
4758 + if (current->mm->call_dl_resolve) {
4759 + call_dl_resolve = current->mm->call_dl_resolve;
4760 + up_write(&current->mm->mmap_sem);
4761 + if (vma)
4762 + kmem_cache_free(vm_area_cachep, vma);
4763 + goto emulate;
4764 + }
4765 +
4766 + call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
4767 + if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
4768 + up_write(&current->mm->mmap_sem);
4769 + if (vma)
4770 + kmem_cache_free(vm_area_cachep, vma);
4771 + return 1;
4772 + }
4773 +
4774 + if (pax_insert_vma(vma, call_dl_resolve)) {
4775 + up_write(&current->mm->mmap_sem);
4776 + kmem_cache_free(vm_area_cachep, vma);
4777 + return 1;
4778 + }
4779 +
4780 + current->mm->call_dl_resolve = call_dl_resolve;
4781 + up_write(&current->mm->mmap_sem);
4782 +
4783 +emulate:
4784 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4785 + regs->tpc = call_dl_resolve;
4786 + regs->tnpc = addr+4;
4787 + return 3;
4788 + }
4789 +#endif
4790 +
4791 + /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
4792 + if ((save & 0xFFC00000U) == 0x05000000U &&
4793 + (call & 0xFFFFE000U) == 0x85C0A000U &&
4794 + nop == 0x01000000U)
4795 + {
4796 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4797 + regs->u_regs[UREG_G2] = addr + 4;
4798 + addr = (save & 0x003FFFFFU) << 10;
4799 + addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
4800 +
4801 + if (test_thread_flag(TIF_32BIT))
4802 + addr &= 0xFFFFFFFFUL;
4803 +
4804 + regs->tpc = addr;
4805 + regs->tnpc = addr+4;
4806 + return 3;
4807 + }
4808 +
4809 + /* PaX: 64-bit PLT stub */
4810 + err = get_user(sethi1, (unsigned int *)addr);
4811 + err |= get_user(sethi2, (unsigned int *)(addr+4));
4812 + err |= get_user(or1, (unsigned int *)(addr+8));
4813 + err |= get_user(or2, (unsigned int *)(addr+12));
4814 + err |= get_user(sllx, (unsigned int *)(addr+16));
4815 + err |= get_user(add, (unsigned int *)(addr+20));
4816 + err |= get_user(jmpl, (unsigned int *)(addr+24));
4817 + err |= get_user(nop, (unsigned int *)(addr+28));
4818 + if (err)
4819 + break;
4820 +
4821 + if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
4822 + (sethi2 & 0xFFC00000U) == 0x0B000000U &&
4823 + (or1 & 0xFFFFE000U) == 0x88112000U &&
4824 + (or2 & 0xFFFFE000U) == 0x8A116000U &&
4825 + sllx == 0x89293020U &&
4826 + add == 0x8A010005U &&
4827 + jmpl == 0x89C14000U &&
4828 + nop == 0x01000000U)
4829 + {
4830 + regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
4831 + regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
4832 + regs->u_regs[UREG_G4] <<= 32;
4833 + regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
4834 + regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
4835 + regs->u_regs[UREG_G4] = addr + 24;
4836 + addr = regs->u_regs[UREG_G5];
4837 + regs->tpc = addr;
4838 + regs->tnpc = addr+4;
4839 + return 3;
4840 + }
4841 + }
4842 + } while (0);
4843 +
4844 +#ifdef CONFIG_PAX_DLRESOLVE
4845 + do { /* PaX: unpatched PLT emulation step 2 */
4846 + unsigned int save, call, nop;
4847 +
4848 + err = get_user(save, (unsigned int *)(regs->tpc-4));
4849 + err |= get_user(call, (unsigned int *)regs->tpc);
4850 + err |= get_user(nop, (unsigned int *)(regs->tpc+4));
4851 + if (err)
4852 + break;
4853 +
4854 + if (save == 0x9DE3BFA8U &&
4855 + (call & 0xC0000000U) == 0x40000000U &&
4856 + nop == 0x01000000U)
4857 + {
4858 + unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
4859 +
4860 + if (test_thread_flag(TIF_32BIT))
4861 + dl_resolve &= 0xFFFFFFFFUL;
4862 +
4863 + regs->u_regs[UREG_RETPC] = regs->tpc;
4864 + regs->tpc = dl_resolve;
4865 + regs->tnpc = dl_resolve+4;
4866 + return 3;
4867 + }
4868 + } while (0);
4869 +#endif
4870 +
4871 + do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
4872 + unsigned int sethi, ba, nop;
4873 +
4874 + err = get_user(sethi, (unsigned int *)regs->tpc);
4875 + err |= get_user(ba, (unsigned int *)(regs->tpc+4));
4876 + err |= get_user(nop, (unsigned int *)(regs->tpc+8));
4877 +
4878 + if (err)
4879 + break;
4880 +
4881 + if ((sethi & 0xFFC00000U) == 0x03000000U &&
4882 + (ba & 0xFFF00000U) == 0x30600000U &&
4883 + nop == 0x01000000U)
4884 + {
4885 + unsigned long addr;
4886 +
4887 + addr = (sethi & 0x003FFFFFU) << 10;
4888 + regs->u_regs[UREG_G1] = addr;
4889 + addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
4890 +
4891 + if (test_thread_flag(TIF_32BIT))
4892 + addr &= 0xFFFFFFFFUL;
4893 +
4894 + regs->tpc = addr;
4895 + regs->tnpc = addr+4;
4896 + return 2;
4897 + }
4898 + } while (0);
4899 +
4900 +#endif
4901 +
4902 + return 1;
4903 +}
4904 +
4905 +void pax_report_insns(void *pc, void *sp)
4906 +{
4907 + unsigned long i;
4908 +
4909 + printk(KERN_ERR "PAX: bytes at PC: ");
4910 + for (i = 0; i < 8; i++) {
4911 + unsigned int c;
4912 + if (get_user(c, (unsigned int *)pc+i))
4913 + printk(KERN_CONT "???????? ");
4914 + else
4915 + printk(KERN_CONT "%08x ", c);
4916 + }
4917 + printk("\n");
4918 +}
4919 +#endif
4920 +
4921 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
4922 {
4923 struct mm_struct *mm = current->mm;
4924 @@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau
4925 if (!vma)
4926 goto bad_area;
4927
4928 +#ifdef CONFIG_PAX_PAGEEXEC
4929 + /* PaX: detect ITLB misses on non-exec pages */
4930 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
4931 + !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
4932 + {
4933 + if (address != regs->tpc)
4934 + goto good_area;
4935 +
4936 + up_read(&mm->mmap_sem);
4937 + switch (pax_handle_fetch_fault(regs)) {
4938 +
4939 +#ifdef CONFIG_PAX_EMUPLT
4940 + case 2:
4941 + case 3:
4942 + return;
4943 +#endif
4944 +
4945 + }
4946 + pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
4947 + do_group_exit(SIGKILL);
4948 + }
4949 +#endif
4950 +
4951 /* Pure DTLB misses do not tell us whether the fault causing
4952 * load/store/atomic was a write or not, it only says that there
4953 * was no match. So in such a case we (carefully) read the
4954 diff -urNp linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c
4955 --- linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c 2011-05-19 00:06:34.000000000 -0400
4956 +++ linux-2.6.39.4/arch/sparc/mm/hugetlbpage.c 2011-08-05 19:44:33.000000000 -0400
4957 @@ -68,7 +68,7 @@ full_search:
4958 }
4959 return -ENOMEM;
4960 }
4961 - if (likely(!vma || addr + len <= vma->vm_start)) {
4962 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4963 /*
4964 * Remember the place where we stopped the search:
4965 */
4966 @@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct
4967 /* make sure it can fit in the remaining address space */
4968 if (likely(addr > len)) {
4969 vma = find_vma(mm, addr-len);
4970 - if (!vma || addr <= vma->vm_start) {
4971 + if (check_heap_stack_gap(vma, addr - len, len)) {
4972 /* remember the address as a hint for next time */
4973 return (mm->free_area_cache = addr-len);
4974 }
4975 @@ -116,16 +116,17 @@ hugetlb_get_unmapped_area_topdown(struct
4976 if (unlikely(mm->mmap_base < len))
4977 goto bottomup;
4978
4979 - addr = (mm->mmap_base-len) & HPAGE_MASK;
4980 + addr = mm->mmap_base - len;
4981
4982 do {
4983 + addr &= HPAGE_MASK;
4984 /*
4985 * Lookup failure means no vma is above this address,
4986 * else if new region fits below vma->vm_start,
4987 * return with success:
4988 */
4989 vma = find_vma(mm, addr);
4990 - if (likely(!vma || addr+len <= vma->vm_start)) {
4991 + if (likely(check_heap_stack_gap(vma, addr, len))) {
4992 /* remember the address as a hint for next time */
4993 return (mm->free_area_cache = addr);
4994 }
4995 @@ -135,8 +136,8 @@ hugetlb_get_unmapped_area_topdown(struct
4996 mm->cached_hole_size = vma->vm_start - addr;
4997
4998 /* try just below the current vma->vm_start */
4999 - addr = (vma->vm_start-len) & HPAGE_MASK;
5000 - } while (likely(len < vma->vm_start));
5001 + addr = skip_heap_stack_gap(vma, len);
5002 + } while (!IS_ERR_VALUE(addr));
5003
5004 bottomup:
5005 /*
5006 @@ -182,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f
5007 if (addr) {
5008 addr = ALIGN(addr, HPAGE_SIZE);
5009 vma = find_vma(mm, addr);
5010 - if (task_size - len >= addr &&
5011 - (!vma || addr + len <= vma->vm_start))
5012 + if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5013 return addr;
5014 }
5015 if (mm->get_unmapped_area == arch_get_unmapped_area)
5016 diff -urNp linux-2.6.39.4/arch/sparc/mm/init_32.c linux-2.6.39.4/arch/sparc/mm/init_32.c
5017 --- linux-2.6.39.4/arch/sparc/mm/init_32.c 2011-05-19 00:06:34.000000000 -0400
5018 +++ linux-2.6.39.4/arch/sparc/mm/init_32.c 2011-08-05 19:44:33.000000000 -0400
5019 @@ -318,6 +318,9 @@ extern void device_scan(void);
5020 pgprot_t PAGE_SHARED __read_mostly;
5021 EXPORT_SYMBOL(PAGE_SHARED);
5022
5023 +pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5024 +EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5025 +
5026 void __init paging_init(void)
5027 {
5028 switch(sparc_cpu_model) {
5029 @@ -346,17 +349,17 @@ void __init paging_init(void)
5030
5031 /* Initialize the protection map with non-constant, MMU dependent values. */
5032 protection_map[0] = PAGE_NONE;
5033 - protection_map[1] = PAGE_READONLY;
5034 - protection_map[2] = PAGE_COPY;
5035 - protection_map[3] = PAGE_COPY;
5036 + protection_map[1] = PAGE_READONLY_NOEXEC;
5037 + protection_map[2] = PAGE_COPY_NOEXEC;
5038 + protection_map[3] = PAGE_COPY_NOEXEC;
5039 protection_map[4] = PAGE_READONLY;
5040 protection_map[5] = PAGE_READONLY;
5041 protection_map[6] = PAGE_COPY;
5042 protection_map[7] = PAGE_COPY;
5043 protection_map[8] = PAGE_NONE;
5044 - protection_map[9] = PAGE_READONLY;
5045 - protection_map[10] = PAGE_SHARED;
5046 - protection_map[11] = PAGE_SHARED;
5047 + protection_map[9] = PAGE_READONLY_NOEXEC;
5048 + protection_map[10] = PAGE_SHARED_NOEXEC;
5049 + protection_map[11] = PAGE_SHARED_NOEXEC;
5050 protection_map[12] = PAGE_READONLY;
5051 protection_map[13] = PAGE_READONLY;
5052 protection_map[14] = PAGE_SHARED;
5053 diff -urNp linux-2.6.39.4/arch/sparc/mm/Makefile linux-2.6.39.4/arch/sparc/mm/Makefile
5054 --- linux-2.6.39.4/arch/sparc/mm/Makefile 2011-05-19 00:06:34.000000000 -0400
5055 +++ linux-2.6.39.4/arch/sparc/mm/Makefile 2011-08-05 19:44:33.000000000 -0400
5056 @@ -2,7 +2,7 @@
5057 #
5058
5059 asflags-y := -ansi
5060 -ccflags-y := -Werror
5061 +#ccflags-y := -Werror
5062
5063 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5064 obj-y += fault_$(BITS).o
5065 diff -urNp linux-2.6.39.4/arch/sparc/mm/srmmu.c linux-2.6.39.4/arch/sparc/mm/srmmu.c
5066 --- linux-2.6.39.4/arch/sparc/mm/srmmu.c 2011-05-19 00:06:34.000000000 -0400
5067 +++ linux-2.6.39.4/arch/sparc/mm/srmmu.c 2011-08-05 19:44:33.000000000 -0400
5068 @@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
5069 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
5070 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
5071 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
5072 +
5073 +#ifdef CONFIG_PAX_PAGEEXEC
5074 + PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
5075 + BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
5076 + BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
5077 +#endif
5078 +
5079 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
5080 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
5081
5082 diff -urNp linux-2.6.39.4/arch/um/include/asm/kmap_types.h linux-2.6.39.4/arch/um/include/asm/kmap_types.h
5083 --- linux-2.6.39.4/arch/um/include/asm/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
5084 +++ linux-2.6.39.4/arch/um/include/asm/kmap_types.h 2011-08-05 19:44:33.000000000 -0400
5085 @@ -23,6 +23,7 @@ enum km_type {
5086 KM_IRQ1,
5087 KM_SOFTIRQ0,
5088 KM_SOFTIRQ1,
5089 + KM_CLEARPAGE,
5090 KM_TYPE_NR
5091 };
5092
5093 diff -urNp linux-2.6.39.4/arch/um/include/asm/page.h linux-2.6.39.4/arch/um/include/asm/page.h
5094 --- linux-2.6.39.4/arch/um/include/asm/page.h 2011-05-19 00:06:34.000000000 -0400
5095 +++ linux-2.6.39.4/arch/um/include/asm/page.h 2011-08-05 19:44:33.000000000 -0400
5096 @@ -14,6 +14,9 @@
5097 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
5098 #define PAGE_MASK (~(PAGE_SIZE-1))
5099
5100 +#define ktla_ktva(addr) (addr)
5101 +#define ktva_ktla(addr) (addr)
5102 +
5103 #ifndef __ASSEMBLY__
5104
5105 struct page;
5106 diff -urNp linux-2.6.39.4/arch/um/kernel/process.c linux-2.6.39.4/arch/um/kernel/process.c
5107 --- linux-2.6.39.4/arch/um/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
5108 +++ linux-2.6.39.4/arch/um/kernel/process.c 2011-08-05 19:44:33.000000000 -0400
5109 @@ -404,22 +404,6 @@ int singlestepping(void * t)
5110 return 2;
5111 }
5112
5113 -/*
5114 - * Only x86 and x86_64 have an arch_align_stack().
5115 - * All other arches have "#define arch_align_stack(x) (x)"
5116 - * in their asm/system.h
5117 - * As this is included in UML from asm-um/system-generic.h,
5118 - * we can use it to behave as the subarch does.
5119 - */
5120 -#ifndef arch_align_stack
5121 -unsigned long arch_align_stack(unsigned long sp)
5122 -{
5123 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5124 - sp -= get_random_int() % 8192;
5125 - return sp & ~0xf;
5126 -}
5127 -#endif
5128 -
5129 unsigned long get_wchan(struct task_struct *p)
5130 {
5131 unsigned long stack_page, sp, ip;
5132 diff -urNp linux-2.6.39.4/arch/um/sys-i386/syscalls.c linux-2.6.39.4/arch/um/sys-i386/syscalls.c
5133 --- linux-2.6.39.4/arch/um/sys-i386/syscalls.c 2011-05-19 00:06:34.000000000 -0400
5134 +++ linux-2.6.39.4/arch/um/sys-i386/syscalls.c 2011-08-05 19:44:33.000000000 -0400
5135 @@ -11,6 +11,21 @@
5136 #include "asm/uaccess.h"
5137 #include "asm/unistd.h"
5138
5139 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
5140 +{
5141 + unsigned long pax_task_size = TASK_SIZE;
5142 +
5143 +#ifdef CONFIG_PAX_SEGMEXEC
5144 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
5145 + pax_task_size = SEGMEXEC_TASK_SIZE;
5146 +#endif
5147 +
5148 + if (len > pax_task_size || addr > pax_task_size - len)
5149 + return -EINVAL;
5150 +
5151 + return 0;
5152 +}
5153 +
5154 /*
5155 * The prototype on i386 is:
5156 *
5157 diff -urNp linux-2.6.39.4/arch/x86/boot/bitops.h linux-2.6.39.4/arch/x86/boot/bitops.h
5158 --- linux-2.6.39.4/arch/x86/boot/bitops.h 2011-05-19 00:06:34.000000000 -0400
5159 +++ linux-2.6.39.4/arch/x86/boot/bitops.h 2011-08-05 19:44:33.000000000 -0400
5160 @@ -26,7 +26,7 @@ static inline int variable_test_bit(int
5161 u8 v;
5162 const u32 *p = (const u32 *)addr;
5163
5164 - asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5165 + asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
5166 return v;
5167 }
5168
5169 @@ -37,7 +37,7 @@ static inline int variable_test_bit(int
5170
5171 static inline void set_bit(int nr, void *addr)
5172 {
5173 - asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5174 + asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
5175 }
5176
5177 #endif /* BOOT_BITOPS_H */
5178 diff -urNp linux-2.6.39.4/arch/x86/boot/boot.h linux-2.6.39.4/arch/x86/boot/boot.h
5179 --- linux-2.6.39.4/arch/x86/boot/boot.h 2011-05-19 00:06:34.000000000 -0400
5180 +++ linux-2.6.39.4/arch/x86/boot/boot.h 2011-08-05 19:44:33.000000000 -0400
5181 @@ -85,7 +85,7 @@ static inline void io_delay(void)
5182 static inline u16 ds(void)
5183 {
5184 u16 seg;
5185 - asm("movw %%ds,%0" : "=rm" (seg));
5186 + asm volatile("movw %%ds,%0" : "=rm" (seg));
5187 return seg;
5188 }
5189
5190 @@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t
5191 static inline int memcmp(const void *s1, const void *s2, size_t len)
5192 {
5193 u8 diff;
5194 - asm("repe; cmpsb; setnz %0"
5195 + asm volatile("repe; cmpsb; setnz %0"
5196 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
5197 return diff;
5198 }
5199 diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/head_32.S linux-2.6.39.4/arch/x86/boot/compressed/head_32.S
5200 --- linux-2.6.39.4/arch/x86/boot/compressed/head_32.S 2011-05-19 00:06:34.000000000 -0400
5201 +++ linux-2.6.39.4/arch/x86/boot/compressed/head_32.S 2011-08-05 19:44:33.000000000 -0400
5202 @@ -76,7 +76,7 @@ ENTRY(startup_32)
5203 notl %eax
5204 andl %eax, %ebx
5205 #else
5206 - movl $LOAD_PHYSICAL_ADDR, %ebx
5207 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5208 #endif
5209
5210 /* Target address to relocate to for decompression */
5211 @@ -162,7 +162,7 @@ relocated:
5212 * and where it was actually loaded.
5213 */
5214 movl %ebp, %ebx
5215 - subl $LOAD_PHYSICAL_ADDR, %ebx
5216 + subl $____LOAD_PHYSICAL_ADDR, %ebx
5217 jz 2f /* Nothing to be done if loaded at compiled addr. */
5218 /*
5219 * Process relocations.
5220 @@ -170,8 +170,7 @@ relocated:
5221
5222 1: subl $4, %edi
5223 movl (%edi), %ecx
5224 - testl %ecx, %ecx
5225 - jz 2f
5226 + jecxz 2f
5227 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
5228 jmp 1b
5229 2:
5230 diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/head_64.S linux-2.6.39.4/arch/x86/boot/compressed/head_64.S
5231 --- linux-2.6.39.4/arch/x86/boot/compressed/head_64.S 2011-05-19 00:06:34.000000000 -0400
5232 +++ linux-2.6.39.4/arch/x86/boot/compressed/head_64.S 2011-08-05 19:44:33.000000000 -0400
5233 @@ -91,7 +91,7 @@ ENTRY(startup_32)
5234 notl %eax
5235 andl %eax, %ebx
5236 #else
5237 - movl $LOAD_PHYSICAL_ADDR, %ebx
5238 + movl $____LOAD_PHYSICAL_ADDR, %ebx
5239 #endif
5240
5241 /* Target address to relocate to for decompression */
5242 @@ -233,7 +233,7 @@ ENTRY(startup_64)
5243 notq %rax
5244 andq %rax, %rbp
5245 #else
5246 - movq $LOAD_PHYSICAL_ADDR, %rbp
5247 + movq $____LOAD_PHYSICAL_ADDR, %rbp
5248 #endif
5249
5250 /* Target address to relocate to for decompression */
5251 diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/Makefile linux-2.6.39.4/arch/x86/boot/compressed/Makefile
5252 --- linux-2.6.39.4/arch/x86/boot/compressed/Makefile 2011-05-19 00:06:34.000000000 -0400
5253 +++ linux-2.6.39.4/arch/x86/boot/compressed/Makefile 2011-08-05 20:34:06.000000000 -0400
5254 @@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
5255 KBUILD_CFLAGS += $(cflags-y)
5256 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
5257 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
5258 +ifdef CONSTIFY_PLUGIN
5259 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5260 +endif
5261
5262 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5263 GCOV_PROFILE := n
5264 diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/misc.c linux-2.6.39.4/arch/x86/boot/compressed/misc.c
5265 --- linux-2.6.39.4/arch/x86/boot/compressed/misc.c 2011-05-19 00:06:34.000000000 -0400
5266 +++ linux-2.6.39.4/arch/x86/boot/compressed/misc.c 2011-08-05 19:44:33.000000000 -0400
5267 @@ -310,7 +310,7 @@ static void parse_elf(void *output)
5268 case PT_LOAD:
5269 #ifdef CONFIG_RELOCATABLE
5270 dest = output;
5271 - dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
5272 + dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
5273 #else
5274 dest = (void *)(phdr->p_paddr);
5275 #endif
5276 @@ -363,7 +363,7 @@ asmlinkage void decompress_kernel(void *
5277 error("Destination address too large");
5278 #endif
5279 #ifndef CONFIG_RELOCATABLE
5280 - if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
5281 + if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
5282 error("Wrong destination address");
5283 #endif
5284
5285 diff -urNp linux-2.6.39.4/arch/x86/boot/compressed/relocs.c linux-2.6.39.4/arch/x86/boot/compressed/relocs.c
5286 --- linux-2.6.39.4/arch/x86/boot/compressed/relocs.c 2011-05-19 00:06:34.000000000 -0400
5287 +++ linux-2.6.39.4/arch/x86/boot/compressed/relocs.c 2011-08-05 19:44:33.000000000 -0400
5288 @@ -13,8 +13,11 @@
5289
5290 static void die(char *fmt, ...);
5291
5292 +#include "../../../../include/generated/autoconf.h"
5293 +
5294 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
5295 static Elf32_Ehdr ehdr;
5296 +static Elf32_Phdr *phdr;
5297 static unsigned long reloc_count, reloc_idx;
5298 static unsigned long *relocs;
5299
5300 @@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp)
5301 }
5302 }
5303
5304 +static void read_phdrs(FILE *fp)
5305 +{
5306 + unsigned int i;
5307 +
5308 + phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
5309 + if (!phdr) {
5310 + die("Unable to allocate %d program headers\n",
5311 + ehdr.e_phnum);
5312 + }
5313 + if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
5314 + die("Seek to %d failed: %s\n",
5315 + ehdr.e_phoff, strerror(errno));
5316 + }
5317 + if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
5318 + die("Cannot read ELF program headers: %s\n",
5319 + strerror(errno));
5320 + }
5321 + for(i = 0; i < ehdr.e_phnum; i++) {
5322 + phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
5323 + phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
5324 + phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
5325 + phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
5326 + phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
5327 + phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
5328 + phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
5329 + phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
5330 + }
5331 +
5332 +}
5333 +
5334 static void read_shdrs(FILE *fp)
5335 {
5336 - int i;
5337 + unsigned int i;
5338 Elf32_Shdr shdr;
5339
5340 secs = calloc(ehdr.e_shnum, sizeof(struct section));
5341 @@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp)
5342
5343 static void read_strtabs(FILE *fp)
5344 {
5345 - int i;
5346 + unsigned int i;
5347 for (i = 0; i < ehdr.e_shnum; i++) {
5348 struct section *sec = &secs[i];
5349 if (sec->shdr.sh_type != SHT_STRTAB) {
5350 @@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp)
5351
5352 static void read_symtabs(FILE *fp)
5353 {
5354 - int i,j;
5355 + unsigned int i,j;
5356 for (i = 0; i < ehdr.e_shnum; i++) {
5357 struct section *sec = &secs[i];
5358 if (sec->shdr.sh_type != SHT_SYMTAB) {
5359 @@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp)
5360
5361 static void read_relocs(FILE *fp)
5362 {
5363 - int i,j;
5364 + unsigned int i,j;
5365 + uint32_t base;
5366 +
5367 for (i = 0; i < ehdr.e_shnum; i++) {
5368 struct section *sec = &secs[i];
5369 if (sec->shdr.sh_type != SHT_REL) {
5370 @@ -385,9 +420,18 @@ static void read_relocs(FILE *fp)
5371 die("Cannot read symbol table: %s\n",
5372 strerror(errno));
5373 }
5374 + base = 0;
5375 + for (j = 0; j < ehdr.e_phnum; j++) {
5376 + if (phdr[j].p_type != PT_LOAD )
5377 + continue;
5378 + if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
5379 + continue;
5380 + base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
5381 + break;
5382 + }
5383 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
5384 Elf32_Rel *rel = &sec->reltab[j];
5385 - rel->r_offset = elf32_to_cpu(rel->r_offset);
5386 + rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
5387 rel->r_info = elf32_to_cpu(rel->r_info);
5388 }
5389 }
5390 @@ -396,14 +440,14 @@ static void read_relocs(FILE *fp)
5391
5392 static void print_absolute_symbols(void)
5393 {
5394 - int i;
5395 + unsigned int i;
5396 printf("Absolute symbols\n");
5397 printf(" Num: Value Size Type Bind Visibility Name\n");
5398 for (i = 0; i < ehdr.e_shnum; i++) {
5399 struct section *sec = &secs[i];
5400 char *sym_strtab;
5401 Elf32_Sym *sh_symtab;
5402 - int j;
5403 + unsigned int j;
5404
5405 if (sec->shdr.sh_type != SHT_SYMTAB) {
5406 continue;
5407 @@ -431,14 +475,14 @@ static void print_absolute_symbols(void)
5408
5409 static void print_absolute_relocs(void)
5410 {
5411 - int i, printed = 0;
5412 + unsigned int i, printed = 0;
5413
5414 for (i = 0; i < ehdr.e_shnum; i++) {
5415 struct section *sec = &secs[i];
5416 struct section *sec_applies, *sec_symtab;
5417 char *sym_strtab;
5418 Elf32_Sym *sh_symtab;
5419 - int j;
5420 + unsigned int j;
5421 if (sec->shdr.sh_type != SHT_REL) {
5422 continue;
5423 }
5424 @@ -499,13 +543,13 @@ static void print_absolute_relocs(void)
5425
5426 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
5427 {
5428 - int i;
5429 + unsigned int i;
5430 /* Walk through the relocations */
5431 for (i = 0; i < ehdr.e_shnum; i++) {
5432 char *sym_strtab;
5433 Elf32_Sym *sh_symtab;
5434 struct section *sec_applies, *sec_symtab;
5435 - int j;
5436 + unsigned int j;
5437 struct section *sec = &secs[i];
5438
5439 if (sec->shdr.sh_type != SHT_REL) {
5440 @@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El
5441 !is_rel_reloc(sym_name(sym_strtab, sym))) {
5442 continue;
5443 }
5444 + /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
5445 + if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
5446 + continue;
5447 +
5448 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
5449 + /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
5450 + if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
5451 + continue;
5452 + if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
5453 + continue;
5454 + if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
5455 + continue;
5456 + if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
5457 + continue;
5458 +#endif
5459 +
5460 switch (r_type) {
5461 case R_386_NONE:
5462 case R_386_PC32:
5463 @@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co
5464
5465 static void emit_relocs(int as_text)
5466 {
5467 - int i;
5468 + unsigned int i;
5469 /* Count how many relocations I have and allocate space for them. */
5470 reloc_count = 0;
5471 walk_relocs(count_reloc);
5472 @@ -665,6 +725,7 @@ int main(int argc, char **argv)
5473 fname, strerror(errno));
5474 }
5475 read_ehdr(fp);
5476 + read_phdrs(fp);
5477 read_shdrs(fp);
5478 read_strtabs(fp);
5479 read_symtabs(fp);
5480 diff -urNp linux-2.6.39.4/arch/x86/boot/cpucheck.c linux-2.6.39.4/arch/x86/boot/cpucheck.c
5481 --- linux-2.6.39.4/arch/x86/boot/cpucheck.c 2011-05-19 00:06:34.000000000 -0400
5482 +++ linux-2.6.39.4/arch/x86/boot/cpucheck.c 2011-08-05 19:44:33.000000000 -0400
5483 @@ -74,7 +74,7 @@ static int has_fpu(void)
5484 u16 fcw = -1, fsw = -1;
5485 u32 cr0;
5486
5487 - asm("movl %%cr0,%0" : "=r" (cr0));
5488 + asm volatile("movl %%cr0,%0" : "=r" (cr0));
5489 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
5490 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
5491 asm volatile("movl %0,%%cr0" : : "r" (cr0));
5492 @@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
5493 {
5494 u32 f0, f1;
5495
5496 - asm("pushfl ; "
5497 + asm volatile("pushfl ; "
5498 "pushfl ; "
5499 "popl %0 ; "
5500 "movl %0,%1 ; "
5501 @@ -115,7 +115,7 @@ static void get_flags(void)
5502 set_bit(X86_FEATURE_FPU, cpu.flags);
5503
5504 if (has_eflag(X86_EFLAGS_ID)) {
5505 - asm("cpuid"
5506 + asm volatile("cpuid"
5507 : "=a" (max_intel_level),
5508 "=b" (cpu_vendor[0]),
5509 "=d" (cpu_vendor[1]),
5510 @@ -124,7 +124,7 @@ static void get_flags(void)
5511
5512 if (max_intel_level >= 0x00000001 &&
5513 max_intel_level <= 0x0000ffff) {
5514 - asm("cpuid"
5515 + asm volatile("cpuid"
5516 : "=a" (tfms),
5517 "=c" (cpu.flags[4]),
5518 "=d" (cpu.flags[0])
5519 @@ -136,7 +136,7 @@ static void get_flags(void)
5520 cpu.model += ((tfms >> 16) & 0xf) << 4;
5521 }
5522
5523 - asm("cpuid"
5524 + asm volatile("cpuid"
5525 : "=a" (max_amd_level)
5526 : "a" (0x80000000)
5527 : "ebx", "ecx", "edx");
5528 @@ -144,7 +144,7 @@ static void get_flags(void)
5529 if (max_amd_level >= 0x80000001 &&
5530 max_amd_level <= 0x8000ffff) {
5531 u32 eax = 0x80000001;
5532 - asm("cpuid"
5533 + asm volatile("cpuid"
5534 : "+a" (eax),
5535 "=c" (cpu.flags[6]),
5536 "=d" (cpu.flags[1])
5537 @@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5538 u32 ecx = MSR_K7_HWCR;
5539 u32 eax, edx;
5540
5541 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5542 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5543 eax &= ~(1 << 15);
5544 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5545 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5546
5547 get_flags(); /* Make sure it really did something */
5548 err = check_flags();
5549 @@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
5550 u32 ecx = MSR_VIA_FCR;
5551 u32 eax, edx;
5552
5553 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5554 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5555 eax |= (1<<1)|(1<<7);
5556 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5557 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5558
5559 set_bit(X86_FEATURE_CX8, cpu.flags);
5560 err = check_flags();
5561 @@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
5562 u32 eax, edx;
5563 u32 level = 1;
5564
5565 - asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5566 - asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5567 - asm("cpuid"
5568 + asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
5569 + asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
5570 + asm volatile("cpuid"
5571 : "+a" (level), "=d" (cpu.flags[0])
5572 : : "ecx", "ebx");
5573 - asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5574 + asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
5575
5576 err = check_flags();
5577 }
5578 diff -urNp linux-2.6.39.4/arch/x86/boot/header.S linux-2.6.39.4/arch/x86/boot/header.S
5579 --- linux-2.6.39.4/arch/x86/boot/header.S 2011-05-19 00:06:34.000000000 -0400
5580 +++ linux-2.6.39.4/arch/x86/boot/header.S 2011-08-05 19:44:33.000000000 -0400
5581 @@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
5582 # single linked list of
5583 # struct setup_data
5584
5585 -pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
5586 +pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
5587
5588 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
5589 #define VO_INIT_SIZE (VO__end - VO__text)
5590 diff -urNp linux-2.6.39.4/arch/x86/boot/Makefile linux-2.6.39.4/arch/x86/boot/Makefile
5591 --- linux-2.6.39.4/arch/x86/boot/Makefile 2011-05-19 00:06:34.000000000 -0400
5592 +++ linux-2.6.39.4/arch/x86/boot/Makefile 2011-08-05 20:34:06.000000000 -0400
5593 @@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
5594 $(call cc-option, -fno-stack-protector) \
5595 $(call cc-option, -mpreferred-stack-boundary=2)
5596 KBUILD_CFLAGS += $(call cc-option, -m32)
5597 +ifdef CONSTIFY_PLUGIN
5598 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
5599 +endif
5600 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
5601 GCOV_PROFILE := n
5602
5603 diff -urNp linux-2.6.39.4/arch/x86/boot/memory.c linux-2.6.39.4/arch/x86/boot/memory.c
5604 --- linux-2.6.39.4/arch/x86/boot/memory.c 2011-05-19 00:06:34.000000000 -0400
5605 +++ linux-2.6.39.4/arch/x86/boot/memory.c 2011-08-05 19:44:33.000000000 -0400
5606 @@ -19,7 +19,7 @@
5607
5608 static int detect_memory_e820(void)
5609 {
5610 - int count = 0;
5611 + unsigned int count = 0;
5612 struct biosregs ireg, oreg;
5613 struct e820entry *desc = boot_params.e820_map;
5614 static struct e820entry buf; /* static so it is zeroed */
5615 diff -urNp linux-2.6.39.4/arch/x86/boot/video.c linux-2.6.39.4/arch/x86/boot/video.c
5616 --- linux-2.6.39.4/arch/x86/boot/video.c 2011-05-19 00:06:34.000000000 -0400
5617 +++ linux-2.6.39.4/arch/x86/boot/video.c 2011-08-05 19:44:33.000000000 -0400
5618 @@ -96,7 +96,7 @@ static void store_mode_params(void)
5619 static unsigned int get_entry(void)
5620 {
5621 char entry_buf[4];
5622 - int i, len = 0;
5623 + unsigned int i, len = 0;
5624 int key;
5625 unsigned int v;
5626
5627 diff -urNp linux-2.6.39.4/arch/x86/boot/video-vesa.c linux-2.6.39.4/arch/x86/boot/video-vesa.c
5628 --- linux-2.6.39.4/arch/x86/boot/video-vesa.c 2011-05-19 00:06:34.000000000 -0400
5629 +++ linux-2.6.39.4/arch/x86/boot/video-vesa.c 2011-08-05 19:44:33.000000000 -0400
5630 @@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
5631
5632 boot_params.screen_info.vesapm_seg = oreg.es;
5633 boot_params.screen_info.vesapm_off = oreg.di;
5634 + boot_params.screen_info.vesapm_size = oreg.cx;
5635 }
5636
5637 /*
5638 diff -urNp linux-2.6.39.4/arch/x86/ia32/ia32_aout.c linux-2.6.39.4/arch/x86/ia32/ia32_aout.c
5639 --- linux-2.6.39.4/arch/x86/ia32/ia32_aout.c 2011-05-19 00:06:34.000000000 -0400
5640 +++ linux-2.6.39.4/arch/x86/ia32/ia32_aout.c 2011-08-05 19:44:33.000000000 -0400
5641 @@ -162,6 +162,8 @@ static int aout_core_dump(long signr, st
5642 unsigned long dump_start, dump_size;
5643 struct user32 dump;
5644
5645 + memset(&dump, 0, sizeof(dump));
5646 +
5647 fs = get_fs();
5648 set_fs(KERNEL_DS);
5649 has_dumped = 1;
5650 diff -urNp linux-2.6.39.4/arch/x86/ia32/ia32entry.S linux-2.6.39.4/arch/x86/ia32/ia32entry.S
5651 --- linux-2.6.39.4/arch/x86/ia32/ia32entry.S 2011-05-19 00:06:34.000000000 -0400
5652 +++ linux-2.6.39.4/arch/x86/ia32/ia32entry.S 2011-08-05 19:44:33.000000000 -0400
5653 @@ -13,6 +13,7 @@
5654 #include <asm/thread_info.h>
5655 #include <asm/segment.h>
5656 #include <asm/irqflags.h>
5657 +#include <asm/pgtable.h>
5658 #include <linux/linkage.h>
5659
5660 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
5661 @@ -95,6 +96,32 @@ ENTRY(native_irq_enable_sysexit)
5662 ENDPROC(native_irq_enable_sysexit)
5663 #endif
5664
5665 + .macro pax_enter_kernel_user
5666 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5667 + call pax_enter_kernel_user
5668 +#endif
5669 + .endm
5670 +
5671 + .macro pax_exit_kernel_user
5672 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5673 + call pax_exit_kernel_user
5674 +#endif
5675 +#ifdef CONFIG_PAX_RANDKSTACK
5676 + pushq %rax
5677 + call pax_randomize_kstack
5678 + popq %rax
5679 +#endif
5680 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5681 + call pax_erase_kstack
5682 +#endif
5683 + .endm
5684 +
5685 + .macro pax_erase_kstack
5686 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
5687 + call pax_erase_kstack
5688 +#endif
5689 + .endm
5690 +
5691 /*
5692 * 32bit SYSENTER instruction entry.
5693 *
5694 @@ -121,7 +148,7 @@ ENTRY(ia32_sysenter_target)
5695 CFI_REGISTER rsp,rbp
5696 SWAPGS_UNSAFE_STACK
5697 movq PER_CPU_VAR(kernel_stack), %rsp
5698 - addq $(KERNEL_STACK_OFFSET),%rsp
5699 + pax_enter_kernel_user
5700 /*
5701 * No need to follow this irqs on/off section: the syscall
5702 * disabled irqs, here we enable it straight after entry:
5703 @@ -134,7 +161,8 @@ ENTRY(ia32_sysenter_target)
5704 CFI_REL_OFFSET rsp,0
5705 pushfq_cfi
5706 /*CFI_REL_OFFSET rflags,0*/
5707 - movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
5708 + GET_THREAD_INFO(%r10)
5709 + movl TI_sysenter_return(%r10), %r10d
5710 CFI_REGISTER rip,r10
5711 pushq_cfi $__USER32_CS
5712 /*CFI_REL_OFFSET cs,0*/
5713 @@ -146,6 +174,12 @@ ENTRY(ia32_sysenter_target)
5714 SAVE_ARGS 0,0,1
5715 /* no need to do an access_ok check here because rbp has been
5716 32bit zero extended */
5717 +
5718 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5719 + mov $PAX_USER_SHADOW_BASE,%r10
5720 + add %r10,%rbp
5721 +#endif
5722 +
5723 1: movl (%rbp),%ebp
5724 .section __ex_table,"a"
5725 .quad 1b,ia32_badarg
5726 @@ -168,6 +202,7 @@ sysenter_dispatch:
5727 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5728 jnz sysexit_audit
5729 sysexit_from_sys_call:
5730 + pax_exit_kernel_user
5731 andl $~TS_COMPAT,TI_status(%r10)
5732 /* clear IF, that popfq doesn't enable interrupts early */
5733 andl $~0x200,EFLAGS-R11(%rsp)
5734 @@ -194,6 +229,9 @@ sysexit_from_sys_call:
5735 movl %eax,%esi /* 2nd arg: syscall number */
5736 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
5737 call audit_syscall_entry
5738 +
5739 + pax_erase_kstack
5740 +
5741 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
5742 cmpq $(IA32_NR_syscalls-1),%rax
5743 ja ia32_badsys
5744 @@ -246,6 +284,9 @@ sysenter_tracesys:
5745 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
5746 movq %rsp,%rdi /* &pt_regs -> arg1 */
5747 call syscall_trace_enter
5748 +
5749 + pax_erase_kstack
5750 +
5751 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5752 RESTORE_REST
5753 cmpq $(IA32_NR_syscalls-1),%rax
5754 @@ -277,19 +318,24 @@ ENDPROC(ia32_sysenter_target)
5755 ENTRY(ia32_cstar_target)
5756 CFI_STARTPROC32 simple
5757 CFI_SIGNAL_FRAME
5758 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
5759 + CFI_DEF_CFA rsp,0
5760 CFI_REGISTER rip,rcx
5761 /*CFI_REGISTER rflags,r11*/
5762 SWAPGS_UNSAFE_STACK
5763 movl %esp,%r8d
5764 CFI_REGISTER rsp,r8
5765 movq PER_CPU_VAR(kernel_stack),%rsp
5766 +
5767 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5768 + pax_enter_kernel_user
5769 +#endif
5770 +
5771 /*
5772 * No need to follow this irqs on/off section: the syscall
5773 * disabled irqs and here we enable it straight after entry:
5774 */
5775 ENABLE_INTERRUPTS(CLBR_NONE)
5776 - SAVE_ARGS 8,1,1
5777 + SAVE_ARGS 8*6,1,1
5778 movl %eax,%eax /* zero extension */
5779 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
5780 movq %rcx,RIP-ARGOFFSET(%rsp)
5781 @@ -305,6 +351,12 @@ ENTRY(ia32_cstar_target)
5782 /* no need to do an access_ok check here because r8 has been
5783 32bit zero extended */
5784 /* hardware stack frame is complete now */
5785 +
5786 +#ifdef CONFIG_PAX_MEMORY_UDEREF
5787 + mov $PAX_USER_SHADOW_BASE,%r10
5788 + add %r10,%r8
5789 +#endif
5790 +
5791 1: movl (%r8),%r9d
5792 .section __ex_table,"a"
5793 .quad 1b,ia32_badarg
5794 @@ -327,6 +379,7 @@ cstar_dispatch:
5795 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
5796 jnz sysretl_audit
5797 sysretl_from_sys_call:
5798 + pax_exit_kernel_user
5799 andl $~TS_COMPAT,TI_status(%r10)
5800 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
5801 movl RIP-ARGOFFSET(%rsp),%ecx
5802 @@ -364,6 +417,9 @@ cstar_tracesys:
5803 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5804 movq %rsp,%rdi /* &pt_regs -> arg1 */
5805 call syscall_trace_enter
5806 +
5807 + pax_erase_kstack
5808 +
5809 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
5810 RESTORE_REST
5811 xchgl %ebp,%r9d
5812 @@ -409,6 +465,7 @@ ENTRY(ia32_syscall)
5813 CFI_REL_OFFSET rip,RIP-RIP
5814 PARAVIRT_ADJUST_EXCEPTION_FRAME
5815 SWAPGS
5816 + pax_enter_kernel_user
5817 /*
5818 * No need to follow this irqs on/off section: the syscall
5819 * disabled irqs and here we enable it straight after entry:
5820 @@ -441,6 +498,9 @@ ia32_tracesys:
5821 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
5822 movq %rsp,%rdi /* &pt_regs -> arg1 */
5823 call syscall_trace_enter
5824 +
5825 + pax_erase_kstack
5826 +
5827 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
5828 RESTORE_REST
5829 cmpq $(IA32_NR_syscalls-1),%rax
5830 diff -urNp linux-2.6.39.4/arch/x86/ia32/ia32_signal.c linux-2.6.39.4/arch/x86/ia32/ia32_signal.c
5831 --- linux-2.6.39.4/arch/x86/ia32/ia32_signal.c 2011-05-19 00:06:34.000000000 -0400
5832 +++ linux-2.6.39.4/arch/x86/ia32/ia32_signal.c 2011-08-05 19:44:33.000000000 -0400
5833 @@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
5834 sp -= frame_size;
5835 /* Align the stack pointer according to the i386 ABI,
5836 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
5837 - sp = ((sp + 4) & -16ul) - 4;
5838 + sp = ((sp - 12) & -16ul) - 4;
5839 return (void __user *) sp;
5840 }
5841
5842 @@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
5843 * These are actually not used anymore, but left because some
5844 * gdb versions depend on them as a marker.
5845 */
5846 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5847 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5848 } put_user_catch(err);
5849
5850 if (err)
5851 @@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
5852 0xb8,
5853 __NR_ia32_rt_sigreturn,
5854 0x80cd,
5855 - 0,
5856 + 0
5857 };
5858
5859 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
5860 @@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
5861
5862 if (ka->sa.sa_flags & SA_RESTORER)
5863 restorer = ka->sa.sa_restorer;
5864 + else if (current->mm->context.vdso)
5865 + /* Return stub is in 32bit vsyscall page */
5866 + restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
5867 else
5868 - restorer = VDSO32_SYMBOL(current->mm->context.vdso,
5869 - rt_sigreturn);
5870 + restorer = &frame->retcode;
5871 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
5872
5873 /*
5874 * Not actually used anymore, but left because some gdb
5875 * versions need it.
5876 */
5877 - put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
5878 + put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
5879 } put_user_catch(err);
5880
5881 if (err)
5882 diff -urNp linux-2.6.39.4/arch/x86/include/asm/alternative.h linux-2.6.39.4/arch/x86/include/asm/alternative.h
5883 --- linux-2.6.39.4/arch/x86/include/asm/alternative.h 2011-05-19 00:06:34.000000000 -0400
5884 +++ linux-2.6.39.4/arch/x86/include/asm/alternative.h 2011-08-05 19:44:33.000000000 -0400
5885 @@ -94,7 +94,7 @@ static inline int alternatives_text_rese
5886 ".section .discard,\"aw\",@progbits\n" \
5887 " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
5888 ".previous\n" \
5889 - ".section .altinstr_replacement, \"ax\"\n" \
5890 + ".section .altinstr_replacement, \"a\"\n" \
5891 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
5892 ".previous"
5893
5894 diff -urNp linux-2.6.39.4/arch/x86/include/asm/apic.h linux-2.6.39.4/arch/x86/include/asm/apic.h
5895 --- linux-2.6.39.4/arch/x86/include/asm/apic.h 2011-05-19 00:06:34.000000000 -0400
5896 +++ linux-2.6.39.4/arch/x86/include/asm/apic.h 2011-08-17 20:01:35.000000000 -0400
5897 @@ -45,7 +45,7 @@ static inline void generic_apic_probe(vo
5898
5899 #ifdef CONFIG_X86_LOCAL_APIC
5900
5901 -extern unsigned int apic_verbosity;
5902 +extern int apic_verbosity;
5903 extern int local_apic_timer_c2_ok;
5904
5905 extern int disable_apic;
5906 diff -urNp linux-2.6.39.4/arch/x86/include/asm/apm.h linux-2.6.39.4/arch/x86/include/asm/apm.h
5907 --- linux-2.6.39.4/arch/x86/include/asm/apm.h 2011-05-19 00:06:34.000000000 -0400
5908 +++ linux-2.6.39.4/arch/x86/include/asm/apm.h 2011-08-05 19:44:33.000000000 -0400
5909 @@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
5910 __asm__ __volatile__(APM_DO_ZERO_SEGS
5911 "pushl %%edi\n\t"
5912 "pushl %%ebp\n\t"
5913 - "lcall *%%cs:apm_bios_entry\n\t"
5914 + "lcall *%%ss:apm_bios_entry\n\t"
5915 "setc %%al\n\t"
5916 "popl %%ebp\n\t"
5917 "popl %%edi\n\t"
5918 @@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
5919 __asm__ __volatile__(APM_DO_ZERO_SEGS
5920 "pushl %%edi\n\t"
5921 "pushl %%ebp\n\t"
5922 - "lcall *%%cs:apm_bios_entry\n\t"
5923 + "lcall *%%ss:apm_bios_entry\n\t"
5924 "setc %%bl\n\t"
5925 "popl %%ebp\n\t"
5926 "popl %%edi\n\t"
5927 diff -urNp linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h
5928 --- linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h 2011-05-19 00:06:34.000000000 -0400
5929 +++ linux-2.6.39.4/arch/x86/include/asm/atomic64_32.h 2011-08-05 19:44:33.000000000 -0400
5930 @@ -12,6 +12,14 @@ typedef struct {
5931 u64 __aligned(8) counter;
5932 } atomic64_t;
5933
5934 +#ifdef CONFIG_PAX_REFCOUNT
5935 +typedef struct {
5936 + u64 __aligned(8) counter;
5937 +} atomic64_unchecked_t;
5938 +#else
5939 +typedef atomic64_t atomic64_unchecked_t;
5940 +#endif
5941 +
5942 #define ATOMIC64_INIT(val) { (val) }
5943
5944 #ifdef CONFIG_X86_CMPXCHG64
5945 @@ -38,6 +46,21 @@ static inline long long atomic64_cmpxchg
5946 }
5947
5948 /**
5949 + * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
5950 + * @p: pointer to type atomic64_unchecked_t
5951 + * @o: expected value
5952 + * @n: new value
5953 + *
5954 + * Atomically sets @v to @n if it was equal to @o and returns
5955 + * the old value.
5956 + */
5957 +
5958 +static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
5959 +{
5960 + return cmpxchg64(&v->counter, o, n);
5961 +}
5962 +
5963 +/**
5964 * atomic64_xchg - xchg atomic64 variable
5965 * @v: pointer to type atomic64_t
5966 * @n: value to assign
5967 @@ -77,6 +100,24 @@ static inline void atomic64_set(atomic64
5968 }
5969
5970 /**
5971 + * atomic64_set_unchecked - set atomic64 variable
5972 + * @v: pointer to type atomic64_unchecked_t
5973 + * @n: value to assign
5974 + *
5975 + * Atomically sets the value of @v to @n.
5976 + */
5977 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
5978 +{
5979 + unsigned high = (unsigned)(i >> 32);
5980 + unsigned low = (unsigned)i;
5981 + asm volatile(ATOMIC64_ALTERNATIVE(set)
5982 + : "+b" (low), "+c" (high)
5983 + : "S" (v)
5984 + : "eax", "edx", "memory"
5985 + );
5986 +}
5987 +
5988 +/**
5989 * atomic64_read - read atomic64 variable
5990 * @v: pointer to type atomic64_t
5991 *
5992 @@ -93,6 +134,22 @@ static inline long long atomic64_read(at
5993 }
5994
5995 /**
5996 + * atomic64_read_unchecked - read atomic64 variable
5997 + * @v: pointer to type atomic64_unchecked_t
5998 + *
5999 + * Atomically reads the value of @v and returns it.
6000 + */
6001 +static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
6002 +{
6003 + long long r;
6004 + asm volatile(ATOMIC64_ALTERNATIVE(read_unchecked)
6005 + : "=A" (r), "+c" (v)
6006 + : : "memory"
6007 + );
6008 + return r;
6009 + }
6010 +
6011 +/**
6012 * atomic64_add_return - add and return
6013 * @i: integer value to add
6014 * @v: pointer to type atomic64_t
6015 @@ -108,6 +165,22 @@ static inline long long atomic64_add_ret
6016 return i;
6017 }
6018
6019 +/**
6020 + * atomic64_add_return_unchecked - add and return
6021 + * @i: integer value to add
6022 + * @v: pointer to type atomic64_unchecked_t
6023 + *
6024 + * Atomically adds @i to @v and returns @i + *@v
6025 + */
6026 +static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
6027 +{
6028 + asm volatile(ATOMIC64_ALTERNATIVE(add_return_unchecked)
6029 + : "+A" (i), "+c" (v)
6030 + : : "memory"
6031 + );
6032 + return i;
6033 +}
6034 +
6035 /*
6036 * Other variants with different arithmetic operators:
6037 */
6038 @@ -131,6 +204,17 @@ static inline long long atomic64_inc_ret
6039 return a;
6040 }
6041
6042 +static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6043 +{
6044 + long long a;
6045 + asm volatile(ATOMIC64_ALTERNATIVE(inc_return_unchecked)
6046 + : "=A" (a)
6047 + : "S" (v)
6048 + : "memory", "ecx"
6049 + );
6050 + return a;
6051 +}
6052 +
6053 static inline long long atomic64_dec_return(atomic64_t *v)
6054 {
6055 long long a;
6056 @@ -159,6 +243,22 @@ static inline long long atomic64_add(lon
6057 }
6058
6059 /**
6060 + * atomic64_add_unchecked - add integer to atomic64 variable
6061 + * @i: integer value to add
6062 + * @v: pointer to type atomic64_unchecked_t
6063 + *
6064 + * Atomically adds @i to @v.
6065 + */
6066 +static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
6067 +{
6068 + asm volatile(ATOMIC64_ALTERNATIVE_(add_unchecked, add_return_unchecked)
6069 + : "+A" (i), "+c" (v)
6070 + : : "memory"
6071 + );
6072 + return i;
6073 +}
6074 +
6075 +/**
6076 * atomic64_sub - subtract the atomic64 variable
6077 * @i: integer value to subtract
6078 * @v: pointer to type atomic64_t
6079 diff -urNp linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h
6080 --- linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h 2011-05-19 00:06:34.000000000 -0400
6081 +++ linux-2.6.39.4/arch/x86/include/asm/atomic64_64.h 2011-08-05 19:44:33.000000000 -0400
6082 @@ -18,7 +18,19 @@
6083 */
6084 static inline long atomic64_read(const atomic64_t *v)
6085 {
6086 - return (*(volatile long *)&(v)->counter);
6087 + return (*(volatile const long *)&(v)->counter);
6088 +}
6089 +
6090 +/**
6091 + * atomic64_read_unchecked - read atomic64 variable
6092 + * @v: pointer of type atomic64_unchecked_t
6093 + *
6094 + * Atomically reads the value of @v.
6095 + * Doesn't imply a read memory barrier.
6096 + */
6097 +static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
6098 +{
6099 + return (*(volatile const long *)&(v)->counter);
6100 }
6101
6102 /**
6103 @@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64
6104 }
6105
6106 /**
6107 + * atomic64_set_unchecked - set atomic64 variable
6108 + * @v: pointer to type atomic64_unchecked_t
6109 + * @i: required value
6110 + *
6111 + * Atomically sets the value of @v to @i.
6112 + */
6113 +static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
6114 +{
6115 + v->counter = i;
6116 +}
6117 +
6118 +/**
6119 * atomic64_add - add integer to atomic64 variable
6120 * @i: integer value to add
6121 * @v: pointer to type atomic64_t
6122 @@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64
6123 */
6124 static inline void atomic64_add(long i, atomic64_t *v)
6125 {
6126 + asm volatile(LOCK_PREFIX "addq %1,%0\n"
6127 +
6128 +#ifdef CONFIG_PAX_REFCOUNT
6129 + "jno 0f\n"
6130 + LOCK_PREFIX "subq %1,%0\n"
6131 + "int $4\n0:\n"
6132 + _ASM_EXTABLE(0b, 0b)
6133 +#endif
6134 +
6135 + : "=m" (v->counter)
6136 + : "er" (i), "m" (v->counter));
6137 +}
6138 +
6139 +/**
6140 + * atomic64_add_unchecked - add integer to atomic64 variable
6141 + * @i: integer value to add
6142 + * @v: pointer to type atomic64_unchecked_t
6143 + *
6144 + * Atomically adds @i to @v.
6145 + */
6146 +static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
6147 +{
6148 asm volatile(LOCK_PREFIX "addq %1,%0"
6149 : "=m" (v->counter)
6150 : "er" (i), "m" (v->counter));
6151 @@ -56,7 +102,29 @@ static inline void atomic64_add(long i,
6152 */
6153 static inline void atomic64_sub(long i, atomic64_t *v)
6154 {
6155 - asm volatile(LOCK_PREFIX "subq %1,%0"
6156 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6157 +
6158 +#ifdef CONFIG_PAX_REFCOUNT
6159 + "jno 0f\n"
6160 + LOCK_PREFIX "addq %1,%0\n"
6161 + "int $4\n0:\n"
6162 + _ASM_EXTABLE(0b, 0b)
6163 +#endif
6164 +
6165 + : "=m" (v->counter)
6166 + : "er" (i), "m" (v->counter));
6167 +}
6168 +
6169 +/**
6170 + * atomic64_sub_unchecked - subtract the atomic64 variable
6171 + * @i: integer value to subtract
6172 + * @v: pointer to type atomic64_unchecked_t
6173 + *
6174 + * Atomically subtracts @i from @v.
6175 + */
6176 +static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
6177 +{
6178 + asm volatile(LOCK_PREFIX "subq %1,%0\n"
6179 : "=m" (v->counter)
6180 : "er" (i), "m" (v->counter));
6181 }
6182 @@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(
6183 {
6184 unsigned char c;
6185
6186 - asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
6187 + asm volatile(LOCK_PREFIX "subq %2,%0\n"
6188 +
6189 +#ifdef CONFIG_PAX_REFCOUNT
6190 + "jno 0f\n"
6191 + LOCK_PREFIX "addq %2,%0\n"
6192 + "int $4\n0:\n"
6193 + _ASM_EXTABLE(0b, 0b)
6194 +#endif
6195 +
6196 + "sete %1\n"
6197 : "=m" (v->counter), "=qm" (c)
6198 : "er" (i), "m" (v->counter) : "memory");
6199 return c;
6200 @@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(
6201 */
6202 static inline void atomic64_inc(atomic64_t *v)
6203 {
6204 + asm volatile(LOCK_PREFIX "incq %0\n"
6205 +
6206 +#ifdef CONFIG_PAX_REFCOUNT
6207 + "jno 0f\n"
6208 + LOCK_PREFIX "decq %0\n"
6209 + "int $4\n0:\n"
6210 + _ASM_EXTABLE(0b, 0b)
6211 +#endif
6212 +
6213 + : "=m" (v->counter)
6214 + : "m" (v->counter));
6215 +}
6216 +
6217 +/**
6218 + * atomic64_inc_unchecked - increment atomic64 variable
6219 + * @v: pointer to type atomic64_unchecked_t
6220 + *
6221 + * Atomically increments @v by 1.
6222 + */
6223 +static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
6224 +{
6225 asm volatile(LOCK_PREFIX "incq %0"
6226 : "=m" (v->counter)
6227 : "m" (v->counter));
6228 @@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64
6229 */
6230 static inline void atomic64_dec(atomic64_t *v)
6231 {
6232 - asm volatile(LOCK_PREFIX "decq %0"
6233 + asm volatile(LOCK_PREFIX "decq %0\n"
6234 +
6235 +#ifdef CONFIG_PAX_REFCOUNT
6236 + "jno 0f\n"
6237 + LOCK_PREFIX "incq %0\n"
6238 + "int $4\n0:\n"
6239 + _ASM_EXTABLE(0b, 0b)
6240 +#endif
6241 +
6242 + : "=m" (v->counter)
6243 + : "m" (v->counter));
6244 +}
6245 +
6246 +/**
6247 + * atomic64_dec_unchecked - decrement atomic64 variable
6248 + * @v: pointer to type atomic64_t
6249 + *
6250 + * Atomically decrements @v by 1.
6251 + */
6252 +static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
6253 +{
6254 + asm volatile(LOCK_PREFIX "decq %0\n"
6255 : "=m" (v->counter)
6256 : "m" (v->counter));
6257 }
6258 @@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(
6259 {
6260 unsigned char c;
6261
6262 - asm volatile(LOCK_PREFIX "decq %0; sete %1"
6263 + asm volatile(LOCK_PREFIX "decq %0\n"
6264 +
6265 +#ifdef CONFIG_PAX_REFCOUNT
6266 + "jno 0f\n"
6267 + LOCK_PREFIX "incq %0\n"
6268 + "int $4\n0:\n"
6269 + _ASM_EXTABLE(0b, 0b)
6270 +#endif
6271 +
6272 + "sete %1\n"
6273 : "=m" (v->counter), "=qm" (c)
6274 : "m" (v->counter) : "memory");
6275 return c != 0;
6276 @@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(
6277 {
6278 unsigned char c;
6279
6280 - asm volatile(LOCK_PREFIX "incq %0; sete %1"
6281 + asm volatile(LOCK_PREFIX "incq %0\n"
6282 +
6283 +#ifdef CONFIG_PAX_REFCOUNT
6284 + "jno 0f\n"
6285 + LOCK_PREFIX "decq %0\n"
6286 + "int $4\n0:\n"
6287 + _ASM_EXTABLE(0b, 0b)
6288 +#endif
6289 +
6290 + "sete %1\n"
6291 : "=m" (v->counter), "=qm" (c)
6292 : "m" (v->counter) : "memory");
6293 return c != 0;
6294 @@ -155,7 +292,16 @@ static inline int atomic64_add_negative(
6295 {
6296 unsigned char c;
6297
6298 - asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
6299 + asm volatile(LOCK_PREFIX "addq %2,%0\n"
6300 +
6301 +#ifdef CONFIG_PAX_REFCOUNT
6302 + "jno 0f\n"
6303 + LOCK_PREFIX "subq %2,%0\n"
6304 + "int $4\n0:\n"
6305 + _ASM_EXTABLE(0b, 0b)
6306 +#endif
6307 +
6308 + "sets %1\n"
6309 : "=m" (v->counter), "=qm" (c)
6310 : "er" (i), "m" (v->counter) : "memory");
6311 return c;
6312 @@ -171,7 +317,31 @@ static inline int atomic64_add_negative(
6313 static inline long atomic64_add_return(long i, atomic64_t *v)
6314 {
6315 long __i = i;
6316 - asm volatile(LOCK_PREFIX "xaddq %0, %1;"
6317 + asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
6318 +
6319 +#ifdef CONFIG_PAX_REFCOUNT
6320 + "jno 0f\n"
6321 + "movq %0, %1\n"
6322 + "int $4\n0:\n"
6323 + _ASM_EXTABLE(0b, 0b)
6324 +#endif
6325 +
6326 + : "+r" (i), "+m" (v->counter)
6327 + : : "memory");
6328 + return i + __i;
6329 +}
6330 +
6331 +/**
6332 + * atomic64_add_return_unchecked - add and return
6333 + * @i: integer value to add
6334 + * @v: pointer to type atomic64_unchecked_t
6335 + *
6336 + * Atomically adds @i to @v and returns @i + @v
6337 + */
6338 +static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
6339 +{
6340 + long __i = i;
6341 + asm volatile(LOCK_PREFIX "xaddq %0, %1"
6342 : "+r" (i), "+m" (v->counter)
6343 : : "memory");
6344 return i + __i;
6345 @@ -183,6 +353,10 @@ static inline long atomic64_sub_return(l
6346 }
6347
6348 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
6349 +static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
6350 +{
6351 + return atomic64_add_return_unchecked(1, v);
6352 +}
6353 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
6354
6355 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
6356 @@ -190,6 +364,11 @@ static inline long atomic64_cmpxchg(atom
6357 return cmpxchg(&v->counter, old, new);
6358 }
6359
6360 +static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
6361 +{
6362 + return cmpxchg(&v->counter, old, new);
6363 +}
6364 +
6365 static inline long atomic64_xchg(atomic64_t *v, long new)
6366 {
6367 return xchg(&v->counter, new);
6368 @@ -206,17 +385,30 @@ static inline long atomic64_xchg(atomic6
6369 */
6370 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
6371 {
6372 - long c, old;
6373 + long c, old, new;
6374 c = atomic64_read(v);
6375 for (;;) {
6376 - if (unlikely(c == (u)))
6377 + if (unlikely(c == u))
6378 break;
6379 - old = atomic64_cmpxchg((v), c, c + (a));
6380 +
6381 + asm volatile("add %2,%0\n"
6382 +
6383 +#ifdef CONFIG_PAX_REFCOUNT
6384 + "jno 0f\n"
6385 + "sub %2,%0\n"
6386 + "int $4\n0:\n"
6387 + _ASM_EXTABLE(0b, 0b)
6388 +#endif
6389 +
6390 + : "=r" (new)
6391 + : "0" (c), "ir" (a));
6392 +
6393 + old = atomic64_cmpxchg(v, c, new);
6394 if (likely(old == c))
6395 break;
6396 c = old;
6397 }
6398 - return c != (u);
6399 + return c != u;
6400 }
6401
6402 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
6403 diff -urNp linux-2.6.39.4/arch/x86/include/asm/atomic.h linux-2.6.39.4/arch/x86/include/asm/atomic.h
6404 --- linux-2.6.39.4/arch/x86/include/asm/atomic.h 2011-05-19 00:06:34.000000000 -0400
6405 +++ linux-2.6.39.4/arch/x86/include/asm/atomic.h 2011-08-05 19:44:33.000000000 -0400
6406 @@ -22,7 +22,18 @@
6407 */
6408 static inline int atomic_read(const atomic_t *v)
6409 {
6410 - return (*(volatile int *)&(v)->counter);
6411 + return (*(volatile const int *)&(v)->counter);
6412 +}
6413 +
6414 +/**
6415 + * atomic_read_unchecked - read atomic variable
6416 + * @v: pointer of type atomic_unchecked_t
6417 + *
6418 + * Atomically reads the value of @v.
6419 + */
6420 +static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6421 +{
6422 + return (*(volatile const int *)&(v)->counter);
6423 }
6424
6425 /**
6426 @@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *
6427 }
6428
6429 /**
6430 + * atomic_set_unchecked - set atomic variable
6431 + * @v: pointer of type atomic_unchecked_t
6432 + * @i: required value
6433 + *
6434 + * Atomically sets the value of @v to @i.
6435 + */
6436 +static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6437 +{
6438 + v->counter = i;
6439 +}
6440 +
6441 +/**
6442 * atomic_add - add integer to atomic variable
6443 * @i: integer value to add
6444 * @v: pointer of type atomic_t
6445 @@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *
6446 */
6447 static inline void atomic_add(int i, atomic_t *v)
6448 {
6449 - asm volatile(LOCK_PREFIX "addl %1,%0"
6450 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6451 +
6452 +#ifdef CONFIG_PAX_REFCOUNT
6453 + "jno 0f\n"
6454 + LOCK_PREFIX "subl %1,%0\n"
6455 + "int $4\n0:\n"
6456 + _ASM_EXTABLE(0b, 0b)
6457 +#endif
6458 +
6459 + : "+m" (v->counter)
6460 + : "ir" (i));
6461 +}
6462 +
6463 +/**
6464 + * atomic_add_unchecked - add integer to atomic variable
6465 + * @i: integer value to add
6466 + * @v: pointer of type atomic_unchecked_t
6467 + *
6468 + * Atomically adds @i to @v.
6469 + */
6470 +static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6471 +{
6472 + asm volatile(LOCK_PREFIX "addl %1,%0\n"
6473 : "+m" (v->counter)
6474 : "ir" (i));
6475 }
6476 @@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato
6477 */
6478 static inline void atomic_sub(int i, atomic_t *v)
6479 {
6480 - asm volatile(LOCK_PREFIX "subl %1,%0"
6481 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6482 +
6483 +#ifdef CONFIG_PAX_REFCOUNT
6484 + "jno 0f\n"
6485 + LOCK_PREFIX "addl %1,%0\n"
6486 + "int $4\n0:\n"
6487 + _ASM_EXTABLE(0b, 0b)
6488 +#endif
6489 +
6490 + : "+m" (v->counter)
6491 + : "ir" (i));
6492 +}
6493 +
6494 +/**
6495 + * atomic_sub_unchecked - subtract integer from atomic variable
6496 + * @i: integer value to subtract
6497 + * @v: pointer of type atomic_unchecked_t
6498 + *
6499 + * Atomically subtracts @i from @v.
6500 + */
6501 +static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6502 +{
6503 + asm volatile(LOCK_PREFIX "subl %1,%0\n"
6504 : "+m" (v->counter)
6505 : "ir" (i));
6506 }
6507 @@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in
6508 {
6509 unsigned char c;
6510
6511 - asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
6512 + asm volatile(LOCK_PREFIX "subl %2,%0\n"
6513 +
6514 +#ifdef CONFIG_PAX_REFCOUNT
6515 + "jno 0f\n"
6516 + LOCK_PREFIX "addl %2,%0\n"
6517 + "int $4\n0:\n"
6518 + _ASM_EXTABLE(0b, 0b)
6519 +#endif
6520 +
6521 + "sete %1\n"
6522 : "+m" (v->counter), "=qm" (c)
6523 : "ir" (i) : "memory");
6524 return c;
6525 @@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in
6526 */
6527 static inline void atomic_inc(atomic_t *v)
6528 {
6529 - asm volatile(LOCK_PREFIX "incl %0"
6530 + asm volatile(LOCK_PREFIX "incl %0\n"
6531 +
6532 +#ifdef CONFIG_PAX_REFCOUNT
6533 + "jno 0f\n"
6534 + LOCK_PREFIX "decl %0\n"
6535 + "int $4\n0:\n"
6536 + _ASM_EXTABLE(0b, 0b)
6537 +#endif
6538 +
6539 + : "+m" (v->counter));
6540 +}
6541 +
6542 +/**
6543 + * atomic_inc_unchecked - increment atomic variable
6544 + * @v: pointer of type atomic_unchecked_t
6545 + *
6546 + * Atomically increments @v by 1.
6547 + */
6548 +static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
6549 +{
6550 + asm volatile(LOCK_PREFIX "incl %0\n"
6551 : "+m" (v->counter));
6552 }
6553
6554 @@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *
6555 */
6556 static inline void atomic_dec(atomic_t *v)
6557 {
6558 - asm volatile(LOCK_PREFIX "decl %0"
6559 + asm volatile(LOCK_PREFIX "decl %0\n"
6560 +
6561 +#ifdef CONFIG_PAX_REFCOUNT
6562 + "jno 0f\n"
6563 + LOCK_PREFIX "incl %0\n"
6564 + "int $4\n0:\n"
6565 + _ASM_EXTABLE(0b, 0b)
6566 +#endif
6567 +
6568 + : "+m" (v->counter));
6569 +}
6570 +
6571 +/**
6572 + * atomic_dec_unchecked - decrement atomic variable
6573 + * @v: pointer of type atomic_unchecked_t
6574 + *
6575 + * Atomically decrements @v by 1.
6576 + */
6577 +static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
6578 +{
6579 + asm volatile(LOCK_PREFIX "decl %0\n"
6580 : "+m" (v->counter));
6581 }
6582
6583 @@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at
6584 {
6585 unsigned char c;
6586
6587 - asm volatile(LOCK_PREFIX "decl %0; sete %1"
6588 + asm volatile(LOCK_PREFIX "decl %0\n"
6589 +
6590 +#ifdef CONFIG_PAX_REFCOUNT
6591 + "jno 0f\n"
6592 + LOCK_PREFIX "incl %0\n"
6593 + "int $4\n0:\n"
6594 + _ASM_EXTABLE(0b, 0b)
6595 +#endif
6596 +
6597 + "sete %1\n"
6598 : "+m" (v->counter), "=qm" (c)
6599 : : "memory");
6600 return c != 0;
6601 @@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(at
6602 {
6603 unsigned char c;
6604
6605 - asm volatile(LOCK_PREFIX "incl %0; sete %1"
6606 + asm volatile(LOCK_PREFIX "incl %0\n"
6607 +
6608 +#ifdef CONFIG_PAX_REFCOUNT
6609 + "jno 0f\n"
6610 + LOCK_PREFIX "decl %0\n"
6611 + "int $4\n0:\n"
6612 + _ASM_EXTABLE(0b, 0b)
6613 +#endif
6614 +
6615 + "sete %1\n"
6616 + : "+m" (v->counter), "=qm" (c)
6617 + : : "memory");
6618 + return c != 0;
6619 +}
6620 +
6621 +/**
6622 + * atomic_inc_and_test_unchecked - increment and test
6623 + * @v: pointer of type atomic_unchecked_t
6624 + *
6625 + * Atomically increments @v by 1
6626 + * and returns true if the result is zero, or false for all
6627 + * other cases.
6628 + */
6629 +static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
6630 +{
6631 + unsigned char c;
6632 +
6633 + asm volatile(LOCK_PREFIX "incl %0\n"
6634 + "sete %1\n"
6635 : "+m" (v->counter), "=qm" (c)
6636 : : "memory");
6637 return c != 0;
6638 @@ -157,7 +310,16 @@ static inline int atomic_add_negative(in
6639 {
6640 unsigned char c;
6641
6642 - asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
6643 + asm volatile(LOCK_PREFIX "addl %2,%0\n"
6644 +
6645 +#ifdef CONFIG_PAX_REFCOUNT
6646 + "jno 0f\n"
6647 + LOCK_PREFIX "subl %2,%0\n"
6648 + "int $4\n0:\n"
6649 + _ASM_EXTABLE(0b, 0b)
6650 +#endif
6651 +
6652 + "sets %1\n"
6653 : "+m" (v->counter), "=qm" (c)
6654 : "ir" (i) : "memory");
6655 return c;
6656 @@ -180,6 +342,46 @@ static inline int atomic_add_return(int
6657 #endif
6658 /* Modern 486+ processor */
6659 __i = i;
6660 + asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
6661 +
6662 +#ifdef CONFIG_PAX_REFCOUNT
6663 + "jno 0f\n"
6664 + "movl %0, %1\n"
6665 + "int $4\n0:\n"
6666 + _ASM_EXTABLE(0b, 0b)
6667 +#endif
6668 +
6669 + : "+r" (i), "+m" (v->counter)
6670 + : : "memory");
6671 + return i + __i;
6672 +
6673 +#ifdef CONFIG_M386
6674 +no_xadd: /* Legacy 386 processor */
6675 + local_irq_save(flags);
6676 + __i = atomic_read(v);
6677 + atomic_set(v, i + __i);
6678 + local_irq_restore(flags);
6679 + return i + __i;
6680 +#endif
6681 +}
6682 +
6683 +/**
6684 + * atomic_add_return_unchecked - add integer and return
6685 + * @v: pointer of type atomic_unchecked_t
6686 + * @i: integer value to add
6687 + *
6688 + * Atomically adds @i to @v and returns @i + @v
6689 + */
6690 +static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
6691 +{
6692 + int __i;
6693 +#ifdef CONFIG_M386
6694 + unsigned long flags;
6695 + if (unlikely(boot_cpu_data.x86 <= 3))
6696 + goto no_xadd;
6697 +#endif
6698 + /* Modern 486+ processor */
6699 + __i = i;
6700 asm volatile(LOCK_PREFIX "xaddl %0, %1"
6701 : "+r" (i), "+m" (v->counter)
6702 : : "memory");
6703 @@ -208,6 +410,10 @@ static inline int atomic_sub_return(int
6704 }
6705
6706 #define atomic_inc_return(v) (atomic_add_return(1, v))
6707 +static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
6708 +{
6709 + return atomic_add_return_unchecked(1, v);
6710 +}
6711 #define atomic_dec_return(v) (atomic_sub_return(1, v))
6712
6713 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
6714 @@ -215,11 +421,21 @@ static inline int atomic_cmpxchg(atomic_
6715 return cmpxchg(&v->counter, old, new);
6716 }
6717
6718 +static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
6719 +{
6720 + return cmpxchg(&v->counter, old, new);
6721 +}
6722 +
6723 static inline int atomic_xchg(atomic_t *v, int new)
6724 {
6725 return xchg(&v->counter, new);
6726 }
6727
6728 +static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
6729 +{
6730 + return xchg(&v->counter, new);
6731 +}
6732 +
6733 /**
6734 * atomic_add_unless - add unless the number is already a given value
6735 * @v: pointer of type atomic_t
6736 @@ -231,21 +447,77 @@ static inline int atomic_xchg(atomic_t *
6737 */
6738 static inline int atomic_add_unless(atomic_t *v, int a, int u)
6739 {
6740 - int c, old;
6741 + int c, old, new;
6742 c = atomic_read(v);
6743 for (;;) {
6744 - if (unlikely(c == (u)))
6745 + if (unlikely(c == u))
6746 break;
6747 - old = atomic_cmpxchg((v), c, c + (a));
6748 +
6749 + asm volatile("addl %2,%0\n"
6750 +
6751 +#ifdef CONFIG_PAX_REFCOUNT
6752 + "jno 0f\n"
6753 + "subl %2,%0\n"
6754 + "int $4\n0:\n"
6755 + _ASM_EXTABLE(0b, 0b)
6756 +#endif
6757 +
6758 + : "=r" (new)
6759 + : "0" (c), "ir" (a));
6760 +
6761 + old = atomic_cmpxchg(v, c, new);
6762 if (likely(old == c))
6763 break;
6764 c = old;
6765 }
6766 - return c != (u);
6767 + return c != u;
6768 }
6769
6770 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
6771
6772 +/**
6773 + * atomic_inc_not_zero_hint - increment if not null
6774 + * @v: pointer of type atomic_t
6775 + * @hint: probable value of the atomic before the increment
6776 + *
6777 + * This version of atomic_inc_not_zero() gives a hint of probable
6778 + * value of the atomic. This helps processor to not read the memory
6779 + * before doing the atomic read/modify/write cycle, lowering
6780 + * number of bus transactions on some arches.
6781 + *
6782 + * Returns: 0 if increment was not done, 1 otherwise.
6783 + */
6784 +#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
6785 +static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
6786 +{
6787 + int val, c = hint, new;
6788 +
6789 + /* sanity test, should be removed by compiler if hint is a constant */
6790 + if (!hint)
6791 + return atomic_inc_not_zero(v);
6792 +
6793 + do {
6794 + asm volatile("incl %0\n"
6795 +
6796 +#ifdef CONFIG_PAX_REFCOUNT
6797 + "jno 0f\n"
6798 + "decl %0\n"
6799 + "int $4\n0:\n"
6800 + _ASM_EXTABLE(0b, 0b)
6801 +#endif
6802 +
6803 + : "=r" (new)
6804 + : "0" (c));
6805 +
6806 + val = atomic_cmpxchg(v, c, new);
6807 + if (val == c)
6808 + return 1;
6809 + c = val;
6810 + } while (c);
6811 +
6812 + return 0;
6813 +}
6814 +
6815 /*
6816 * atomic_dec_if_positive - decrement by 1 if old value positive
6817 * @v: pointer of type atomic_t
6818 diff -urNp linux-2.6.39.4/arch/x86/include/asm/bitops.h linux-2.6.39.4/arch/x86/include/asm/bitops.h
6819 --- linux-2.6.39.4/arch/x86/include/asm/bitops.h 2011-05-19 00:06:34.000000000 -0400
6820 +++ linux-2.6.39.4/arch/x86/include/asm/bitops.h 2011-08-05 19:44:33.000000000 -0400
6821 @@ -38,7 +38,7 @@
6822 * a mask operation on a byte.
6823 */
6824 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
6825 -#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
6826 +#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
6827 #define CONST_MASK(nr) (1 << ((nr) & 7))
6828
6829 /**
6830 diff -urNp linux-2.6.39.4/arch/x86/include/asm/boot.h linux-2.6.39.4/arch/x86/include/asm/boot.h
6831 --- linux-2.6.39.4/arch/x86/include/asm/boot.h 2011-05-19 00:06:34.000000000 -0400
6832 +++ linux-2.6.39.4/arch/x86/include/asm/boot.h 2011-08-05 19:44:33.000000000 -0400
6833 @@ -11,10 +11,15 @@
6834 #include <asm/pgtable_types.h>
6835
6836 /* Physical address where kernel should be loaded. */
6837 -#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6838 +#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
6839 + (CONFIG_PHYSICAL_ALIGN - 1)) \
6840 & ~(CONFIG_PHYSICAL_ALIGN - 1))
6841
6842 +#ifndef __ASSEMBLY__
6843 +extern unsigned char __LOAD_PHYSICAL_ADDR[];
6844 +#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
6845 +#endif
6846 +
6847 /* Minimum kernel alignment, as a power of two */
6848 #ifdef CONFIG_X86_64
6849 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
6850 diff -urNp linux-2.6.39.4/arch/x86/include/asm/cacheflush.h linux-2.6.39.4/arch/x86/include/asm/cacheflush.h
6851 --- linux-2.6.39.4/arch/x86/include/asm/cacheflush.h 2011-05-19 00:06:34.000000000 -0400
6852 +++ linux-2.6.39.4/arch/x86/include/asm/cacheflush.h 2011-08-05 19:44:33.000000000 -0400
6853 @@ -26,7 +26,7 @@ static inline unsigned long get_page_mem
6854 unsigned long pg_flags = pg->flags & _PGMT_MASK;
6855
6856 if (pg_flags == _PGMT_DEFAULT)
6857 - return -1;
6858 + return ~0UL;
6859 else if (pg_flags == _PGMT_WC)
6860 return _PAGE_CACHE_WC;
6861 else if (pg_flags == _PGMT_UC_MINUS)
6862 diff -urNp linux-2.6.39.4/arch/x86/include/asm/cache.h linux-2.6.39.4/arch/x86/include/asm/cache.h
6863 --- linux-2.6.39.4/arch/x86/include/asm/cache.h 2011-05-19 00:06:34.000000000 -0400
6864 +++ linux-2.6.39.4/arch/x86/include/asm/cache.h 2011-08-05 19:44:33.000000000 -0400
6865 @@ -5,12 +5,13 @@
6866
6867 /* L1 cache line size */
6868 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
6869 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6870 +#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6871
6872 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
6873 +#define __read_only __attribute__((__section__(".data..read_only")))
6874
6875 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
6876 -#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
6877 +#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
6878
6879 #ifdef CONFIG_X86_VSMP
6880 #ifdef CONFIG_SMP
6881 diff -urNp linux-2.6.39.4/arch/x86/include/asm/checksum_32.h linux-2.6.39.4/arch/x86/include/asm/checksum_32.h
6882 --- linux-2.6.39.4/arch/x86/include/asm/checksum_32.h 2011-05-19 00:06:34.000000000 -0400
6883 +++ linux-2.6.39.4/arch/x86/include/asm/checksum_32.h 2011-08-05 19:44:33.000000000 -0400
6884 @@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
6885 int len, __wsum sum,
6886 int *src_err_ptr, int *dst_err_ptr);
6887
6888 +asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
6889 + int len, __wsum sum,
6890 + int *src_err_ptr, int *dst_err_ptr);
6891 +
6892 +asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
6893 + int len, __wsum sum,
6894 + int *src_err_ptr, int *dst_err_ptr);
6895 +
6896 /*
6897 * Note: when you get a NULL pointer exception here this means someone
6898 * passed in an incorrect kernel address to one of these functions.
6899 @@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
6900 int *err_ptr)
6901 {
6902 might_sleep();
6903 - return csum_partial_copy_generic((__force void *)src, dst,
6904 + return csum_partial_copy_generic_from_user((__force void *)src, dst,
6905 len, sum, err_ptr, NULL);
6906 }
6907
6908 @@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
6909 {
6910 might_sleep();
6911 if (access_ok(VERIFY_WRITE, dst, len))
6912 - return csum_partial_copy_generic(src, (__force void *)dst,
6913 + return csum_partial_copy_generic_to_user(src, (__force void *)dst,
6914 len, sum, NULL, err_ptr);
6915
6916 if (len)
6917 diff -urNp linux-2.6.39.4/arch/x86/include/asm/cpufeature.h linux-2.6.39.4/arch/x86/include/asm/cpufeature.h
6918 --- linux-2.6.39.4/arch/x86/include/asm/cpufeature.h 2011-06-03 00:04:13.000000000 -0400
6919 +++ linux-2.6.39.4/arch/x86/include/asm/cpufeature.h 2011-08-05 19:44:33.000000000 -0400
6920 @@ -351,7 +351,7 @@ static __always_inline __pure bool __sta
6921 ".section .discard,\"aw\",@progbits\n"
6922 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
6923 ".previous\n"
6924 - ".section .altinstr_replacement,\"ax\"\n"
6925 + ".section .altinstr_replacement,\"a\"\n"
6926 "3: movb $1,%0\n"
6927 "4:\n"
6928 ".previous\n"
6929 diff -urNp linux-2.6.39.4/arch/x86/include/asm/desc_defs.h linux-2.6.39.4/arch/x86/include/asm/desc_defs.h
6930 --- linux-2.6.39.4/arch/x86/include/asm/desc_defs.h 2011-05-19 00:06:34.000000000 -0400
6931 +++ linux-2.6.39.4/arch/x86/include/asm/desc_defs.h 2011-08-05 19:44:33.000000000 -0400
6932 @@ -31,6 +31,12 @@ struct desc_struct {
6933 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
6934 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
6935 };
6936 + struct {
6937 + u16 offset_low;
6938 + u16 seg;
6939 + unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
6940 + unsigned offset_high: 16;
6941 + } gate;
6942 };
6943 } __attribute__((packed));
6944
6945 diff -urNp linux-2.6.39.4/arch/x86/include/asm/desc.h linux-2.6.39.4/arch/x86/include/asm/desc.h
6946 --- linux-2.6.39.4/arch/x86/include/asm/desc.h 2011-05-19 00:06:34.000000000 -0400
6947 +++ linux-2.6.39.4/arch/x86/include/asm/desc.h 2011-08-05 19:44:33.000000000 -0400
6948 @@ -4,6 +4,7 @@
6949 #include <asm/desc_defs.h>
6950 #include <asm/ldt.h>
6951 #include <asm/mmu.h>
6952 +#include <asm/pgtable.h>
6953 #include <linux/smp.h>
6954
6955 static inline void fill_ldt(struct desc_struct *desc,
6956 @@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
6957 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
6958 desc->type = (info->read_exec_only ^ 1) << 1;
6959 desc->type |= info->contents << 2;
6960 + desc->type |= info->seg_not_present ^ 1;
6961 desc->s = 1;
6962 desc->dpl = 0x3;
6963 desc->p = info->seg_not_present ^ 1;
6964 @@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
6965 }
6966
6967 extern struct desc_ptr idt_descr;
6968 -extern gate_desc idt_table[];
6969 -
6970 -struct gdt_page {
6971 - struct desc_struct gdt[GDT_ENTRIES];
6972 -} __attribute__((aligned(PAGE_SIZE)));
6973 -DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
6974 +extern gate_desc idt_table[256];
6975
6976 +extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
6977 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
6978 {
6979 - return per_cpu(gdt_page, cpu).gdt;
6980 + return cpu_gdt_table[cpu];
6981 }
6982
6983 #ifdef CONFIG_X86_64
6984 @@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
6985 unsigned long base, unsigned dpl, unsigned flags,
6986 unsigned short seg)
6987 {
6988 - gate->a = (seg << 16) | (base & 0xffff);
6989 - gate->b = (base & 0xffff0000) |
6990 - (((0x80 | type | (dpl << 5)) & 0xff) << 8);
6991 + gate->gate.offset_low = base;
6992 + gate->gate.seg = seg;
6993 + gate->gate.reserved = 0;
6994 + gate->gate.type = type;
6995 + gate->gate.s = 0;
6996 + gate->gate.dpl = dpl;
6997 + gate->gate.p = 1;
6998 + gate->gate.offset_high = base >> 16;
6999 }
7000
7001 #endif
7002 @@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
7003 static inline void native_write_idt_entry(gate_desc *idt, int entry,
7004 const gate_desc *gate)
7005 {
7006 + pax_open_kernel();
7007 memcpy(&idt[entry], gate, sizeof(*gate));
7008 + pax_close_kernel();
7009 }
7010
7011 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
7012 const void *desc)
7013 {
7014 + pax_open_kernel();
7015 memcpy(&ldt[entry], desc, 8);
7016 + pax_close_kernel();
7017 }
7018
7019 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
7020 @@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
7021 size = sizeof(struct desc_struct);
7022 break;
7023 }
7024 +
7025 + pax_open_kernel();
7026 memcpy(&gdt[entry], desc, size);
7027 + pax_close_kernel();
7028 }
7029
7030 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
7031 @@ -211,7 +221,9 @@ static inline void native_set_ldt(const
7032
7033 static inline void native_load_tr_desc(void)
7034 {
7035 + pax_open_kernel();
7036 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
7037 + pax_close_kernel();
7038 }
7039
7040 static inline void native_load_gdt(const struct desc_ptr *dtr)
7041 @@ -246,8 +258,10 @@ static inline void native_load_tls(struc
7042 unsigned int i;
7043 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
7044
7045 + pax_open_kernel();
7046 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
7047 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
7048 + pax_close_kernel();
7049 }
7050
7051 #define _LDT_empty(info) \
7052 @@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
7053 desc->limit = (limit >> 16) & 0xf;
7054 }
7055
7056 -static inline void _set_gate(int gate, unsigned type, void *addr,
7057 +static inline void _set_gate(int gate, unsigned type, const void *addr,
7058 unsigned dpl, unsigned ist, unsigned seg)
7059 {
7060 gate_desc s;
7061 @@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
7062 * Pentium F0 0F bugfix can have resulted in the mapped
7063 * IDT being write-protected.
7064 */
7065 -static inline void set_intr_gate(unsigned int n, void *addr)
7066 +static inline void set_intr_gate(unsigned int n, const void *addr)
7067 {
7068 BUG_ON((unsigned)n > 0xFF);
7069 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
7070 @@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
7071 /*
7072 * This routine sets up an interrupt gate at directory privilege level 3.
7073 */
7074 -static inline void set_system_intr_gate(unsigned int n, void *addr)
7075 +static inline void set_system_intr_gate(unsigned int n, const void *addr)
7076 {
7077 BUG_ON((unsigned)n > 0xFF);
7078 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
7079 }
7080
7081 -static inline void set_system_trap_gate(unsigned int n, void *addr)
7082 +static inline void set_system_trap_gate(unsigned int n, const void *addr)
7083 {
7084 BUG_ON((unsigned)n > 0xFF);
7085 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
7086 }
7087
7088 -static inline void set_trap_gate(unsigned int n, void *addr)
7089 +static inline void set_trap_gate(unsigned int n, const void *addr)
7090 {
7091 BUG_ON((unsigned)n > 0xFF);
7092 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
7093 @@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
7094 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
7095 {
7096 BUG_ON((unsigned)n > 0xFF);
7097 - _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
7098 + _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
7099 }
7100
7101 -static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
7102 +static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
7103 {
7104 BUG_ON((unsigned)n > 0xFF);
7105 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
7106 }
7107
7108 -static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
7109 +static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
7110 {
7111 BUG_ON((unsigned)n > 0xFF);
7112 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
7113 }
7114
7115 +#ifdef CONFIG_X86_32
7116 +static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
7117 +{
7118 + struct desc_struct d;
7119 +
7120 + if (likely(limit))
7121 + limit = (limit - 1UL) >> PAGE_SHIFT;
7122 + pack_descriptor(&d, base, limit, 0xFB, 0xC);
7123 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
7124 +}
7125 +#endif
7126 +
7127 #endif /* _ASM_X86_DESC_H */
7128 diff -urNp linux-2.6.39.4/arch/x86/include/asm/e820.h linux-2.6.39.4/arch/x86/include/asm/e820.h
7129 --- linux-2.6.39.4/arch/x86/include/asm/e820.h 2011-05-19 00:06:34.000000000 -0400
7130 +++ linux-2.6.39.4/arch/x86/include/asm/e820.h 2011-08-05 19:44:33.000000000 -0400
7131 @@ -69,7 +69,7 @@ struct e820map {
7132 #define ISA_START_ADDRESS 0xa0000
7133 #define ISA_END_ADDRESS 0x100000
7134
7135 -#define BIOS_BEGIN 0x000a0000
7136 +#define BIOS_BEGIN 0x000c0000
7137 #define BIOS_END 0x00100000
7138
7139 #define BIOS_ROM_BASE 0xffe00000
7140 diff -urNp linux-2.6.39.4/arch/x86/include/asm/elf.h linux-2.6.39.4/arch/x86/include/asm/elf.h
7141 --- linux-2.6.39.4/arch/x86/include/asm/elf.h 2011-05-19 00:06:34.000000000 -0400
7142 +++ linux-2.6.39.4/arch/x86/include/asm/elf.h 2011-08-05 19:44:33.000000000 -0400
7143 @@ -237,7 +237,25 @@ extern int force_personality32;
7144 the loader. We need to make sure that it is out of the way of the program
7145 that it will "exec", and that there is sufficient room for the brk. */
7146
7147 +#ifdef CONFIG_PAX_SEGMEXEC
7148 +#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
7149 +#else
7150 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
7151 +#endif
7152 +
7153 +#ifdef CONFIG_PAX_ASLR
7154 +#ifdef CONFIG_X86_32
7155 +#define PAX_ELF_ET_DYN_BASE 0x10000000UL
7156 +
7157 +#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7158 +#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
7159 +#else
7160 +#define PAX_ELF_ET_DYN_BASE 0x400000UL
7161 +
7162 +#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7163 +#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
7164 +#endif
7165 +#endif
7166
7167 /* This yields a mask that user programs can use to figure out what
7168 instruction set this CPU supports. This could be done in user space,
7169 @@ -291,8 +309,7 @@ do { \
7170 #define ARCH_DLINFO \
7171 do { \
7172 if (vdso_enabled) \
7173 - NEW_AUX_ENT(AT_SYSINFO_EHDR, \
7174 - (unsigned long)current->mm->context.vdso); \
7175 + NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
7176 } while (0)
7177
7178 #define AT_SYSINFO 32
7179 @@ -303,7 +320,7 @@ do { \
7180
7181 #endif /* !CONFIG_X86_32 */
7182
7183 -#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
7184 +#define VDSO_CURRENT_BASE (current->mm->context.vdso)
7185
7186 #define VDSO_ENTRY \
7187 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
7188 @@ -317,7 +334,4 @@ extern int arch_setup_additional_pages(s
7189 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
7190 #define compat_arch_setup_additional_pages syscall32_setup_pages
7191
7192 -extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7193 -#define arch_randomize_brk arch_randomize_brk
7194 -
7195 #endif /* _ASM_X86_ELF_H */
7196 diff -urNp linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h
7197 --- linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h 2011-05-19 00:06:34.000000000 -0400
7198 +++ linux-2.6.39.4/arch/x86/include/asm/emergency-restart.h 2011-08-05 19:44:33.000000000 -0400
7199 @@ -15,6 +15,6 @@ enum reboot_type {
7200
7201 extern enum reboot_type reboot_type;
7202
7203 -extern void machine_emergency_restart(void);
7204 +extern void machine_emergency_restart(void) __noreturn;
7205
7206 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
7207 diff -urNp linux-2.6.39.4/arch/x86/include/asm/futex.h linux-2.6.39.4/arch/x86/include/asm/futex.h
7208 --- linux-2.6.39.4/arch/x86/include/asm/futex.h 2011-05-19 00:06:34.000000000 -0400
7209 +++ linux-2.6.39.4/arch/x86/include/asm/futex.h 2011-08-05 19:44:33.000000000 -0400
7210 @@ -12,16 +12,18 @@
7211 #include <asm/system.h>
7212
7213 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
7214 + typecheck(u32 *, uaddr); \
7215 asm volatile("1:\t" insn "\n" \
7216 "2:\t.section .fixup,\"ax\"\n" \
7217 "3:\tmov\t%3, %1\n" \
7218 "\tjmp\t2b\n" \
7219 "\t.previous\n" \
7220 _ASM_EXTABLE(1b, 3b) \
7221 - : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
7222 + : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
7223 : "i" (-EFAULT), "0" (oparg), "1" (0))
7224
7225 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
7226 + typecheck(u32 *, uaddr); \
7227 asm volatile("1:\tmovl %2, %0\n" \
7228 "\tmovl\t%0, %3\n" \
7229 "\t" insn "\n" \
7230 @@ -34,7 +36,7 @@
7231 _ASM_EXTABLE(1b, 4b) \
7232 _ASM_EXTABLE(2b, 4b) \
7233 : "=&a" (oldval), "=&r" (ret), \
7234 - "+m" (*uaddr), "=&r" (tem) \
7235 + "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
7236 : "r" (oparg), "i" (-EFAULT), "1" (0))
7237
7238 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
7239 @@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
7240
7241 switch (op) {
7242 case FUTEX_OP_SET:
7243 - __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
7244 + __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
7245 break;
7246 case FUTEX_OP_ADD:
7247 - __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
7248 + __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
7249 uaddr, oparg);
7250 break;
7251 case FUTEX_OP_OR:
7252 @@ -123,13 +125,13 @@ static inline int futex_atomic_cmpxchg_i
7253 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
7254 return -EFAULT;
7255
7256 - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
7257 + asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
7258 "2:\t.section .fixup, \"ax\"\n"
7259 "3:\tmov %3, %0\n"
7260 "\tjmp 2b\n"
7261 "\t.previous\n"
7262 _ASM_EXTABLE(1b, 3b)
7263 - : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
7264 + : "+r" (ret), "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
7265 : "i" (-EFAULT), "r" (newval), "1" (oldval)
7266 : "memory"
7267 );
7268 diff -urNp linux-2.6.39.4/arch/x86/include/asm/hw_irq.h linux-2.6.39.4/arch/x86/include/asm/hw_irq.h
7269 --- linux-2.6.39.4/arch/x86/include/asm/hw_irq.h 2011-05-19 00:06:34.000000000 -0400
7270 +++ linux-2.6.39.4/arch/x86/include/asm/hw_irq.h 2011-08-05 19:44:33.000000000 -0400
7271 @@ -137,8 +137,8 @@ extern void setup_ioapic_dest(void);
7272 extern void enable_IO_APIC(void);
7273
7274 /* Statistics */
7275 -extern atomic_t irq_err_count;
7276 -extern atomic_t irq_mis_count;
7277 +extern atomic_unchecked_t irq_err_count;
7278 +extern atomic_unchecked_t irq_mis_count;
7279
7280 /* EISA */
7281 extern void eisa_set_level_irq(unsigned int irq);
7282 diff -urNp linux-2.6.39.4/arch/x86/include/asm/i387.h linux-2.6.39.4/arch/x86/include/asm/i387.h
7283 --- linux-2.6.39.4/arch/x86/include/asm/i387.h 2011-05-19 00:06:34.000000000 -0400
7284 +++ linux-2.6.39.4/arch/x86/include/asm/i387.h 2011-08-05 19:44:33.000000000 -0400
7285 @@ -92,6 +92,11 @@ static inline int fxrstor_checking(struc
7286 {
7287 int err;
7288
7289 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7290 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7291 + fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
7292 +#endif
7293 +
7294 /* See comment in fxsave() below. */
7295 #ifdef CONFIG_AS_FXSAVEQ
7296 asm volatile("1: fxrstorq %[fx]\n\t"
7297 @@ -121,6 +126,11 @@ static inline int fxsave_user(struct i38
7298 {
7299 int err;
7300
7301 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7302 + if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
7303 + fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
7304 +#endif
7305 +
7306 /*
7307 * Clear the bytes not touched by the fxsave and reserved
7308 * for the SW usage.
7309 @@ -213,13 +223,8 @@ static inline void fpu_fxsave(struct fpu
7310 #endif /* CONFIG_X86_64 */
7311
7312 /* We need a safe address that is cheap to find and that is already
7313 - in L1 during context switch. The best choices are unfortunately
7314 - different for UP and SMP */
7315 -#ifdef CONFIG_SMP
7316 -#define safe_address (__per_cpu_offset[0])
7317 -#else
7318 -#define safe_address (kstat_cpu(0).cpustat.user)
7319 -#endif
7320 + in L1 during context switch. */
7321 +#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
7322
7323 /*
7324 * These must be called with preempt disabled
7325 @@ -312,7 +317,7 @@ static inline void kernel_fpu_begin(void
7326 struct thread_info *me = current_thread_info();
7327 preempt_disable();
7328 if (me->status & TS_USEDFPU)
7329 - __save_init_fpu(me->task);
7330 + __save_init_fpu(current);
7331 else
7332 clts();
7333 }
7334 diff -urNp linux-2.6.39.4/arch/x86/include/asm/io.h linux-2.6.39.4/arch/x86/include/asm/io.h
7335 --- linux-2.6.39.4/arch/x86/include/asm/io.h 2011-05-19 00:06:34.000000000 -0400
7336 +++ linux-2.6.39.4/arch/x86/include/asm/io.h 2011-08-05 19:44:33.000000000 -0400
7337 @@ -216,6 +216,17 @@ extern void set_iounmap_nonlazy(void);
7338
7339 #include <linux/vmalloc.h>
7340
7341 +#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
7342 +static inline int valid_phys_addr_range(unsigned long addr, size_t count)
7343 +{
7344 + return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7345 +}
7346 +
7347 +static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
7348 +{
7349 + return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
7350 +}
7351 +
7352 /*
7353 * Convert a virtual cached pointer to an uncached pointer
7354 */
7355 diff -urNp linux-2.6.39.4/arch/x86/include/asm/irqflags.h linux-2.6.39.4/arch/x86/include/asm/irqflags.h
7356 --- linux-2.6.39.4/arch/x86/include/asm/irqflags.h 2011-05-19 00:06:34.000000000 -0400
7357 +++ linux-2.6.39.4/arch/x86/include/asm/irqflags.h 2011-08-05 19:44:33.000000000 -0400
7358 @@ -140,6 +140,11 @@ static inline unsigned long arch_local_i
7359 sti; \
7360 sysexit
7361
7362 +#define GET_CR0_INTO_RDI mov %cr0, %rdi
7363 +#define SET_RDI_INTO_CR0 mov %rdi, %cr0
7364 +#define GET_CR3_INTO_RDI mov %cr3, %rdi
7365 +#define SET_RDI_INTO_CR3 mov %rdi, %cr3
7366 +
7367 #else
7368 #define INTERRUPT_RETURN iret
7369 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
7370 diff -urNp linux-2.6.39.4/arch/x86/include/asm/kprobes.h linux-2.6.39.4/arch/x86/include/asm/kprobes.h
7371 --- linux-2.6.39.4/arch/x86/include/asm/kprobes.h 2011-05-19 00:06:34.000000000 -0400
7372 +++ linux-2.6.39.4/arch/x86/include/asm/kprobes.h 2011-08-05 19:44:33.000000000 -0400
7373 @@ -37,13 +37,8 @@ typedef u8 kprobe_opcode_t;
7374 #define RELATIVEJUMP_SIZE 5
7375 #define RELATIVECALL_OPCODE 0xe8
7376 #define RELATIVE_ADDR_SIZE 4
7377 -#define MAX_STACK_SIZE 64
7378 -#define MIN_STACK_SIZE(ADDR) \
7379 - (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
7380 - THREAD_SIZE - (unsigned long)(ADDR))) \
7381 - ? (MAX_STACK_SIZE) \
7382 - : (((unsigned long)current_thread_info()) + \
7383 - THREAD_SIZE - (unsigned long)(ADDR)))
7384 +#define MAX_STACK_SIZE 64UL
7385 +#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
7386
7387 #define flush_insn_slot(p) do { } while (0)
7388
7389 diff -urNp linux-2.6.39.4/arch/x86/include/asm/kvm_host.h linux-2.6.39.4/arch/x86/include/asm/kvm_host.h
7390 --- linux-2.6.39.4/arch/x86/include/asm/kvm_host.h 2011-05-19 00:06:34.000000000 -0400
7391 +++ linux-2.6.39.4/arch/x86/include/asm/kvm_host.h 2011-08-05 20:34:06.000000000 -0400
7392 @@ -419,7 +419,7 @@ struct kvm_arch {
7393 unsigned int n_used_mmu_pages;
7394 unsigned int n_requested_mmu_pages;
7395 unsigned int n_max_mmu_pages;
7396 - atomic_t invlpg_counter;
7397 + atomic_unchecked_t invlpg_counter;
7398 struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
7399 /*
7400 * Hash table of struct kvm_mmu_page.
7401 @@ -589,7 +589,7 @@ struct kvm_x86_ops {
7402 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
7403
7404 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
7405 - const struct trace_print_flags *exit_reasons_str;
7406 + const struct trace_print_flags * const exit_reasons_str;
7407 };
7408
7409 struct kvm_arch_async_pf {
7410 diff -urNp linux-2.6.39.4/arch/x86/include/asm/local.h linux-2.6.39.4/arch/x86/include/asm/local.h
7411 --- linux-2.6.39.4/arch/x86/include/asm/local.h 2011-05-19 00:06:34.000000000 -0400
7412 +++ linux-2.6.39.4/arch/x86/include/asm/local.h 2011-08-05 19:44:33.000000000 -0400
7413 @@ -18,26 +18,58 @@ typedef struct {
7414
7415 static inline void local_inc(local_t *l)
7416 {
7417 - asm volatile(_ASM_INC "%0"
7418 + asm volatile(_ASM_INC "%0\n"
7419 +
7420 +#ifdef CONFIG_PAX_REFCOUNT
7421 + "jno 0f\n"
7422 + _ASM_DEC "%0\n"
7423 + "int $4\n0:\n"
7424 + _ASM_EXTABLE(0b, 0b)
7425 +#endif
7426 +
7427 : "+m" (l->a.counter));
7428 }
7429
7430 static inline void local_dec(local_t *l)
7431 {
7432 - asm volatile(_ASM_DEC "%0"
7433 + asm volatile(_ASM_DEC "%0\n"
7434 +
7435 +#ifdef CONFIG_PAX_REFCOUNT
7436 + "jno 0f\n"
7437 + _ASM_INC "%0\n"
7438 + "int $4\n0:\n"
7439 + _ASM_EXTABLE(0b, 0b)
7440 +#endif
7441 +
7442 : "+m" (l->a.counter));
7443 }
7444
7445 static inline void local_add(long i, local_t *l)
7446 {
7447 - asm volatile(_ASM_ADD "%1,%0"
7448 + asm volatile(_ASM_ADD "%1,%0\n"
7449 +
7450 +#ifdef CONFIG_PAX_REFCOUNT
7451 + "jno 0f\n"
7452 + _ASM_SUB "%1,%0\n"
7453 + "int $4\n0:\n"
7454 + _ASM_EXTABLE(0b, 0b)
7455 +#endif
7456 +
7457 : "+m" (l->a.counter)
7458 : "ir" (i));
7459 }
7460
7461 static inline void local_sub(long i, local_t *l)
7462 {
7463 - asm volatile(_ASM_SUB "%1,%0"
7464 + asm volatile(_ASM_SUB "%1,%0\n"
7465 +
7466 +#ifdef CONFIG_PAX_REFCOUNT
7467 + "jno 0f\n"
7468 + _ASM_ADD "%1,%0\n"
7469 + "int $4\n0:\n"
7470 + _ASM_EXTABLE(0b, 0b)
7471 +#endif
7472 +
7473 : "+m" (l->a.counter)
7474 : "ir" (i));
7475 }
7476 @@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
7477 {
7478 unsigned char c;
7479
7480 - asm volatile(_ASM_SUB "%2,%0; sete %1"
7481 + asm volatile(_ASM_SUB "%2,%0\n"
7482 +
7483 +#ifdef CONFIG_PAX_REFCOUNT
7484 + "jno 0f\n"
7485 + _ASM_ADD "%2,%0\n"
7486 + "int $4\n0:\n"
7487 + _ASM_EXTABLE(0b, 0b)
7488 +#endif
7489 +
7490 + "sete %1\n"
7491 : "+m" (l->a.counter), "=qm" (c)
7492 : "ir" (i) : "memory");
7493 return c;
7494 @@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
7495 {
7496 unsigned char c;
7497
7498 - asm volatile(_ASM_DEC "%0; sete %1"
7499 + asm volatile(_ASM_DEC "%0\n"
7500 +
7501 +#ifdef CONFIG_PAX_REFCOUNT
7502 + "jno 0f\n"
7503 + _ASM_INC "%0\n"
7504 + "int $4\n0:\n"
7505 + _ASM_EXTABLE(0b, 0b)
7506 +#endif
7507 +
7508 + "sete %1\n"
7509 : "+m" (l->a.counter), "=qm" (c)
7510 : : "memory");
7511 return c != 0;
7512 @@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
7513 {
7514 unsigned char c;
7515
7516 - asm volatile(_ASM_INC "%0; sete %1"
7517 + asm volatile(_ASM_INC "%0\n"
7518 +
7519 +#ifdef CONFIG_PAX_REFCOUNT
7520 + "jno 0f\n"
7521 + _ASM_DEC "%0\n"
7522 + "int $4\n0:\n"
7523 + _ASM_EXTABLE(0b, 0b)
7524 +#endif
7525 +
7526 + "sete %1\n"
7527 : "+m" (l->a.counter), "=qm" (c)
7528 : : "memory");
7529 return c != 0;
7530 @@ -110,7 +169,16 @@ static inline int local_add_negative(lon
7531 {
7532 unsigned char c;
7533
7534 - asm volatile(_ASM_ADD "%2,%0; sets %1"
7535 + asm volatile(_ASM_ADD "%2,%0\n"
7536 +
7537 +#ifdef CONFIG_PAX_REFCOUNT
7538 + "jno 0f\n"
7539 + _ASM_SUB "%2,%0\n"
7540 + "int $4\n0:\n"
7541 + _ASM_EXTABLE(0b, 0b)
7542 +#endif
7543 +
7544 + "sets %1\n"
7545 : "+m" (l->a.counter), "=qm" (c)
7546 : "ir" (i) : "memory");
7547 return c;
7548 @@ -133,7 +201,15 @@ static inline long local_add_return(long
7549 #endif
7550 /* Modern 486+ processor */
7551 __i = i;
7552 - asm volatile(_ASM_XADD "%0, %1;"
7553 + asm volatile(_ASM_XADD "%0, %1\n"
7554 +
7555 +#ifdef CONFIG_PAX_REFCOUNT
7556 + "jno 0f\n"
7557 + _ASM_MOV "%0,%1\n"
7558 + "int $4\n0:\n"
7559 + _ASM_EXTABLE(0b, 0b)
7560 +#endif
7561 +
7562 : "+r" (i), "+m" (l->a.counter)
7563 : : "memory");
7564 return i + __i;
7565 diff -urNp linux-2.6.39.4/arch/x86/include/asm/mman.h linux-2.6.39.4/arch/x86/include/asm/mman.h
7566 --- linux-2.6.39.4/arch/x86/include/asm/mman.h 2011-05-19 00:06:34.000000000 -0400
7567 +++ linux-2.6.39.4/arch/x86/include/asm/mman.h 2011-08-05 19:44:33.000000000 -0400
7568 @@ -5,4 +5,14 @@
7569
7570 #include <asm-generic/mman.h>
7571
7572 +#ifdef __KERNEL__
7573 +#ifndef __ASSEMBLY__
7574 +#ifdef CONFIG_X86_32
7575 +#define arch_mmap_check i386_mmap_check
7576 +int i386_mmap_check(unsigned long addr, unsigned long len,
7577 + unsigned long flags);
7578 +#endif
7579 +#endif
7580 +#endif
7581 +
7582 #endif /* _ASM_X86_MMAN_H */
7583 diff -urNp linux-2.6.39.4/arch/x86/include/asm/mmu_context.h linux-2.6.39.4/arch/x86/include/asm/mmu_context.h
7584 --- linux-2.6.39.4/arch/x86/include/asm/mmu_context.h 2011-05-19 00:06:34.000000000 -0400
7585 +++ linux-2.6.39.4/arch/x86/include/asm/mmu_context.h 2011-08-17 19:42:21.000000000 -0400
7586 @@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
7587
7588 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
7589 {
7590 +
7591 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
7592 + unsigned int i;
7593 + pgd_t *pgd;
7594 +
7595 + pax_open_kernel();
7596 + pgd = get_cpu_pgd(smp_processor_id());
7597 + for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
7598 + if (paravirt_enabled())
7599 + set_pgd(pgd+i, native_make_pgd(0));
7600 + else
7601 + pgd[i] = native_make_pgd(0);
7602 + pax_close_kernel();
7603 +#endif
7604 +
7605 #ifdef CONFIG_SMP
7606 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
7607 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
7608 @@ -34,16 +49,30 @@ static inline void switch_mm(struct mm_s
7609 struct task_struct *tsk)
7610 {
7611 unsigned cpu = smp_processor_id();
7612 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
7613 + int tlbstate = TLBSTATE_OK;
7614 +#endif
7615
7616 if (likely(prev != next)) {
7617 #ifdef CONFIG_SMP
7618 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7619 + tlbstate = percpu_read(cpu_tlbstate.state);
7620 +#endif
7621 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7622 percpu_write(cpu_tlbstate.active_mm, next);
7623 #endif
7624 cpumask_set_cpu(cpu, mm_cpumask(next));
7625
7626 /* Re-load page tables */
7627 +#ifdef CONFIG_PAX_PER_CPU_PGD
7628 + pax_open_kernel();
7629 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7630 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7631 + pax_close_kernel();
7632 + load_cr3(get_cpu_pgd(cpu));
7633 +#else
7634 load_cr3(next->pgd);
7635 +#endif
7636
7637 /* stop flush ipis for the previous mm */
7638 cpumask_clear_cpu(cpu, mm_cpumask(prev));
7639 @@ -53,9 +82,38 @@ static inline void switch_mm(struct mm_s
7640 */
7641 if (unlikely(prev->context.ldt != next->context.ldt))
7642 load_LDT_nolock(&next->context);
7643 - }
7644 +
7645 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7646 + if (!(__supported_pte_mask & _PAGE_NX)) {
7647 + smp_mb__before_clear_bit();
7648 + cpu_clear(cpu, prev->context.cpu_user_cs_mask);
7649 + smp_mb__after_clear_bit();
7650 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7651 + }
7652 +#endif
7653 +
7654 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7655 + if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
7656 + prev->context.user_cs_limit != next->context.user_cs_limit))
7657 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7658 #ifdef CONFIG_SMP
7659 + else if (unlikely(tlbstate != TLBSTATE_OK))
7660 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7661 +#endif
7662 +#endif
7663 +
7664 + }
7665 else {
7666 +
7667 +#ifdef CONFIG_PAX_PER_CPU_PGD
7668 + pax_open_kernel();
7669 + __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
7670 + __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
7671 + pax_close_kernel();
7672 + load_cr3(get_cpu_pgd(cpu));
7673 +#endif
7674 +
7675 +#ifdef CONFIG_SMP
7676 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
7677 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
7678
7679 @@ -64,11 +122,28 @@ static inline void switch_mm(struct mm_s
7680 * tlb flush IPI delivery. We must reload CR3
7681 * to make sure to use no freed page tables.
7682 */
7683 +
7684 +#ifndef CONFIG_PAX_PER_CPU_PGD
7685 load_cr3(next->pgd);
7686 +#endif
7687 +
7688 load_LDT_nolock(&next->context);
7689 +
7690 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
7691 + if (!(__supported_pte_mask & _PAGE_NX))
7692 + cpu_set(cpu, next->context.cpu_user_cs_mask);
7693 +#endif
7694 +
7695 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
7696 +#ifdef CONFIG_PAX_PAGEEXEC
7697 + if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
7698 +#endif
7699 + set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
7700 +#endif
7701 +
7702 }
7703 - }
7704 #endif
7705 + }
7706 }
7707
7708 #define activate_mm(prev, next) \
7709 diff -urNp linux-2.6.39.4/arch/x86/include/asm/mmu.h linux-2.6.39.4/arch/x86/include/asm/mmu.h
7710 --- linux-2.6.39.4/arch/x86/include/asm/mmu.h 2011-05-19 00:06:34.000000000 -0400
7711 +++ linux-2.6.39.4/arch/x86/include/asm/mmu.h 2011-08-05 19:44:33.000000000 -0400
7712 @@ -9,10 +9,22 @@
7713 * we put the segment information here.
7714 */
7715 typedef struct {
7716 - void *ldt;
7717 + struct desc_struct *ldt;
7718 int size;
7719 struct mutex lock;
7720 - void *vdso;
7721 + unsigned long vdso;
7722 +
7723 +#ifdef CONFIG_X86_32
7724 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
7725 + unsigned long user_cs_base;
7726 + unsigned long user_cs_limit;
7727 +
7728 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
7729 + cpumask_t cpu_user_cs_mask;
7730 +#endif
7731 +
7732 +#endif
7733 +#endif
7734
7735 #ifdef CONFIG_X86_64
7736 /* True if mm supports a task running in 32 bit compatibility mode. */
7737 diff -urNp linux-2.6.39.4/arch/x86/include/asm/module.h linux-2.6.39.4/arch/x86/include/asm/module.h
7738 --- linux-2.6.39.4/arch/x86/include/asm/module.h 2011-05-19 00:06:34.000000000 -0400
7739 +++ linux-2.6.39.4/arch/x86/include/asm/module.h 2011-08-05 19:44:33.000000000 -0400
7740 @@ -5,6 +5,7 @@
7741
7742 #ifdef CONFIG_X86_64
7743 /* X86_64 does not define MODULE_PROC_FAMILY */
7744 +#define MODULE_PROC_FAMILY ""
7745 #elif defined CONFIG_M386
7746 #define MODULE_PROC_FAMILY "386 "
7747 #elif defined CONFIG_M486
7748 @@ -59,8 +60,30 @@
7749 #error unknown processor family
7750 #endif
7751
7752 -#ifdef CONFIG_X86_32
7753 -# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
7754 +#ifdef CONFIG_PAX_MEMORY_UDEREF
7755 +#define MODULE_PAX_UDEREF "UDEREF "
7756 +#else
7757 +#define MODULE_PAX_UDEREF ""
7758 +#endif
7759 +
7760 +#ifdef CONFIG_PAX_KERNEXEC
7761 +#define MODULE_PAX_KERNEXEC "KERNEXEC "
7762 +#else
7763 +#define MODULE_PAX_KERNEXEC ""
7764 #endif
7765
7766 +#ifdef CONFIG_PAX_REFCOUNT
7767 +#define MODULE_PAX_REFCOUNT "REFCOUNT "
7768 +#else
7769 +#define MODULE_PAX_REFCOUNT ""
7770 +#endif
7771 +
7772 +#ifdef CONFIG_GRKERNSEC
7773 +#define MODULE_GRSEC "GRSECURITY "
7774 +#else
7775 +#define MODULE_GRSEC ""
7776 +#endif
7777 +
7778 +#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
7779 +
7780 #endif /* _ASM_X86_MODULE_H */
7781 diff -urNp linux-2.6.39.4/arch/x86/include/asm/page_64_types.h linux-2.6.39.4/arch/x86/include/asm/page_64_types.h
7782 --- linux-2.6.39.4/arch/x86/include/asm/page_64_types.h 2011-05-19 00:06:34.000000000 -0400
7783 +++ linux-2.6.39.4/arch/x86/include/asm/page_64_types.h 2011-08-05 19:44:33.000000000 -0400
7784 @@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
7785
7786 /* duplicated to the one in bootmem.h */
7787 extern unsigned long max_pfn;
7788 -extern unsigned long phys_base;
7789 +extern const unsigned long phys_base;
7790
7791 extern unsigned long __phys_addr(unsigned long);
7792 #define __phys_reloc_hide(x) (x)
7793 diff -urNp linux-2.6.39.4/arch/x86/include/asm/paravirt.h linux-2.6.39.4/arch/x86/include/asm/paravirt.h
7794 --- linux-2.6.39.4/arch/x86/include/asm/paravirt.h 2011-05-19 00:06:34.000000000 -0400
7795 +++ linux-2.6.39.4/arch/x86/include/asm/paravirt.h 2011-08-05 19:44:33.000000000 -0400
7796 @@ -739,6 +739,21 @@ static inline void __set_fixmap(unsigned
7797 pv_mmu_ops.set_fixmap(idx, phys, flags);
7798 }
7799
7800 +#ifdef CONFIG_PAX_KERNEXEC
7801 +static inline unsigned long pax_open_kernel(void)
7802 +{
7803 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
7804 +}
7805 +
7806 +static inline unsigned long pax_close_kernel(void)
7807 +{
7808 + return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
7809 +}
7810 +#else
7811 +static inline unsigned long pax_open_kernel(void) { return 0; }
7812 +static inline unsigned long pax_close_kernel(void) { return 0; }
7813 +#endif
7814 +
7815 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
7816
7817 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
7818 @@ -955,7 +970,7 @@ extern void default_banner(void);
7819
7820 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
7821 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
7822 -#define PARA_INDIRECT(addr) *%cs:addr
7823 +#define PARA_INDIRECT(addr) *%ss:addr
7824 #endif
7825
7826 #define INTERRUPT_RETURN \
7827 @@ -1032,6 +1047,21 @@ extern void default_banner(void);
7828 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
7829 CLBR_NONE, \
7830 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
7831 +
7832 +#define GET_CR0_INTO_RDI \
7833 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
7834 + mov %rax,%rdi
7835 +
7836 +#define SET_RDI_INTO_CR0 \
7837 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
7838 +
7839 +#define GET_CR3_INTO_RDI \
7840 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
7841 + mov %rax,%rdi
7842 +
7843 +#define SET_RDI_INTO_CR3 \
7844 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
7845 +
7846 #endif /* CONFIG_X86_32 */
7847
7848 #endif /* __ASSEMBLY__ */
7849 diff -urNp linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h
7850 --- linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h 2011-05-19 00:06:34.000000000 -0400
7851 +++ linux-2.6.39.4/arch/x86/include/asm/paravirt_types.h 2011-08-05 20:34:06.000000000 -0400
7852 @@ -78,19 +78,19 @@ struct pv_init_ops {
7853 */
7854 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
7855 unsigned long addr, unsigned len);
7856 -};
7857 +} __no_const;
7858
7859
7860 struct pv_lazy_ops {
7861 /* Set deferred update mode, used for batching operations. */
7862 void (*enter)(void);
7863 void (*leave)(void);
7864 -};
7865 +} __no_const;
7866
7867 struct pv_time_ops {
7868 unsigned long long (*sched_clock)(void);
7869 unsigned long (*get_tsc_khz)(void);
7870 -};
7871 +} __no_const;
7872
7873 struct pv_cpu_ops {
7874 /* hooks for various privileged instructions */
7875 @@ -186,7 +186,7 @@ struct pv_cpu_ops {
7876
7877 void (*start_context_switch)(struct task_struct *prev);
7878 void (*end_context_switch)(struct task_struct *next);
7879 -};
7880 +} __no_const;
7881
7882 struct pv_irq_ops {
7883 /*
7884 @@ -217,7 +217,7 @@ struct pv_apic_ops {
7885 unsigned long start_eip,
7886 unsigned long start_esp);
7887 #endif
7888 -};
7889 +} __no_const;
7890
7891 struct pv_mmu_ops {
7892 unsigned long (*read_cr2)(void);
7893 @@ -317,6 +317,12 @@ struct pv_mmu_ops {
7894 an mfn. We can tell which is which from the index. */
7895 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
7896 phys_addr_t phys, pgprot_t flags);
7897 +
7898 +#ifdef CONFIG_PAX_KERNEXEC
7899 + unsigned long (*pax_open_kernel)(void);
7900 + unsigned long (*pax_close_kernel)(void);
7901 +#endif
7902 +
7903 };
7904
7905 struct arch_spinlock;
7906 @@ -327,7 +333,7 @@ struct pv_lock_ops {
7907 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
7908 int (*spin_trylock)(struct arch_spinlock *lock);
7909 void (*spin_unlock)(struct arch_spinlock *lock);
7910 -};
7911 +} __no_const;
7912
7913 /* This contains all the paravirt structures: we get a convenient
7914 * number for each function using the offset which we use to indicate
7915 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgalloc.h linux-2.6.39.4/arch/x86/include/asm/pgalloc.h
7916 --- linux-2.6.39.4/arch/x86/include/asm/pgalloc.h 2011-05-19 00:06:34.000000000 -0400
7917 +++ linux-2.6.39.4/arch/x86/include/asm/pgalloc.h 2011-08-05 19:44:33.000000000 -0400
7918 @@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
7919 pmd_t *pmd, pte_t *pte)
7920 {
7921 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7922 + set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
7923 +}
7924 +
7925 +static inline void pmd_populate_user(struct mm_struct *mm,
7926 + pmd_t *pmd, pte_t *pte)
7927 +{
7928 + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
7929 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
7930 }
7931
7932 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h
7933 --- linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h 2011-05-19 00:06:34.000000000 -0400
7934 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable-2level.h 2011-08-05 19:44:33.000000000 -0400
7935 @@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
7936
7937 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
7938 {
7939 + pax_open_kernel();
7940 *pmdp = pmd;
7941 + pax_close_kernel();
7942 }
7943
7944 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
7945 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h
7946 --- linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h 2011-05-19 00:06:34.000000000 -0400
7947 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable_32.h 2011-08-05 19:44:33.000000000 -0400
7948 @@ -25,9 +25,6 @@
7949 struct mm_struct;
7950 struct vm_area_struct;
7951
7952 -extern pgd_t swapper_pg_dir[1024];
7953 -extern pgd_t initial_page_table[1024];
7954 -
7955 static inline void pgtable_cache_init(void) { }
7956 static inline void check_pgt_cache(void) { }
7957 void paging_init(void);
7958 @@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, u
7959 # include <asm/pgtable-2level.h>
7960 #endif
7961
7962 +extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
7963 +extern pgd_t initial_page_table[PTRS_PER_PGD];
7964 +#ifdef CONFIG_X86_PAE
7965 +extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
7966 +#endif
7967 +
7968 #if defined(CONFIG_HIGHPTE)
7969 #define pte_offset_map(dir, address) \
7970 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
7971 @@ -62,7 +65,9 @@ extern void set_pmd_pfn(unsigned long, u
7972 /* Clear a kernel PTE and flush it from the TLB */
7973 #define kpte_clear_flush(ptep, vaddr) \
7974 do { \
7975 + pax_open_kernel(); \
7976 pte_clear(&init_mm, (vaddr), (ptep)); \
7977 + pax_close_kernel(); \
7978 __flush_tlb_one((vaddr)); \
7979 } while (0)
7980
7981 @@ -74,6 +79,9 @@ do { \
7982
7983 #endif /* !__ASSEMBLY__ */
7984
7985 +#define HAVE_ARCH_UNMAPPED_AREA
7986 +#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
7987 +
7988 /*
7989 * kern_addr_valid() is (1) for FLATMEM and (0) for
7990 * SPARSEMEM and DISCONTIGMEM
7991 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h
7992 --- linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h 2011-05-19 00:06:34.000000000 -0400
7993 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable_32_types.h 2011-08-05 19:44:33.000000000 -0400
7994 @@ -8,7 +8,7 @@
7995 */
7996 #ifdef CONFIG_X86_PAE
7997 # include <asm/pgtable-3level_types.h>
7998 -# define PMD_SIZE (1UL << PMD_SHIFT)
7999 +# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
8000 # define PMD_MASK (~(PMD_SIZE - 1))
8001 #else
8002 # include <asm/pgtable-2level_types.h>
8003 @@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
8004 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
8005 #endif
8006
8007 +#ifdef CONFIG_PAX_KERNEXEC
8008 +#ifndef __ASSEMBLY__
8009 +extern unsigned char MODULES_EXEC_VADDR[];
8010 +extern unsigned char MODULES_EXEC_END[];
8011 +#endif
8012 +#include <asm/boot.h>
8013 +#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
8014 +#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
8015 +#else
8016 +#define ktla_ktva(addr) (addr)
8017 +#define ktva_ktla(addr) (addr)
8018 +#endif
8019 +
8020 #define MODULES_VADDR VMALLOC_START
8021 #define MODULES_END VMALLOC_END
8022 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
8023 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h
8024 --- linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h 2011-05-19 00:06:34.000000000 -0400
8025 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable-3level.h 2011-08-05 19:44:33.000000000 -0400
8026 @@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
8027
8028 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8029 {
8030 + pax_open_kernel();
8031 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
8032 + pax_close_kernel();
8033 }
8034
8035 static inline void native_set_pud(pud_t *pudp, pud_t pud)
8036 {
8037 + pax_open_kernel();
8038 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
8039 + pax_close_kernel();
8040 }
8041
8042 /*
8043 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h
8044 --- linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h 2011-05-19 00:06:34.000000000 -0400
8045 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable_64.h 2011-08-05 19:44:33.000000000 -0400
8046 @@ -16,10 +16,13 @@
8047
8048 extern pud_t level3_kernel_pgt[512];
8049 extern pud_t level3_ident_pgt[512];
8050 +extern pud_t level3_vmalloc_pgt[512];
8051 +extern pud_t level3_vmemmap_pgt[512];
8052 +extern pud_t level2_vmemmap_pgt[512];
8053 extern pmd_t level2_kernel_pgt[512];
8054 extern pmd_t level2_fixmap_pgt[512];
8055 -extern pmd_t level2_ident_pgt[512];
8056 -extern pgd_t init_level4_pgt[];
8057 +extern pmd_t level2_ident_pgt[512*2];
8058 +extern pgd_t init_level4_pgt[512];
8059
8060 #define swapper_pg_dir init_level4_pgt
8061
8062 @@ -61,7 +64,9 @@ static inline void native_set_pte_atomic
8063
8064 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
8065 {
8066 + pax_open_kernel();
8067 *pmdp = pmd;
8068 + pax_close_kernel();
8069 }
8070
8071 static inline void native_pmd_clear(pmd_t *pmd)
8072 @@ -107,7 +112,9 @@ static inline void native_pud_clear(pud_
8073
8074 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
8075 {
8076 + pax_open_kernel();
8077 *pgdp = pgd;
8078 + pax_close_kernel();
8079 }
8080
8081 static inline void native_pgd_clear(pgd_t *pgd)
8082 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h
8083 --- linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h 2011-05-19 00:06:34.000000000 -0400
8084 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable_64_types.h 2011-08-05 19:44:33.000000000 -0400
8085 @@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
8086 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
8087 #define MODULES_END _AC(0xffffffffff000000, UL)
8088 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
8089 +#define MODULES_EXEC_VADDR MODULES_VADDR
8090 +#define MODULES_EXEC_END MODULES_END
8091 +
8092 +#define ktla_ktva(addr) (addr)
8093 +#define ktva_ktla(addr) (addr)
8094
8095 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
8096 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable.h linux-2.6.39.4/arch/x86/include/asm/pgtable.h
8097 --- linux-2.6.39.4/arch/x86/include/asm/pgtable.h 2011-05-19 00:06:34.000000000 -0400
8098 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable.h 2011-08-05 19:44:33.000000000 -0400
8099 @@ -81,12 +81,51 @@ extern struct mm_struct *pgd_page_get_mm
8100
8101 #define arch_end_context_switch(prev) do {} while(0)
8102
8103 +#define pax_open_kernel() native_pax_open_kernel()
8104 +#define pax_close_kernel() native_pax_close_kernel()
8105 #endif /* CONFIG_PARAVIRT */
8106
8107 +#define __HAVE_ARCH_PAX_OPEN_KERNEL
8108 +#define __HAVE_ARCH_PAX_CLOSE_KERNEL
8109 +
8110 +#ifdef CONFIG_PAX_KERNEXEC
8111 +static inline unsigned long native_pax_open_kernel(void)
8112 +{
8113 + unsigned long cr0;
8114 +
8115 + preempt_disable();
8116 + barrier();
8117 + cr0 = read_cr0() ^ X86_CR0_WP;
8118 + BUG_ON(unlikely(cr0 & X86_CR0_WP));
8119 + write_cr0(cr0);
8120 + return cr0 ^ X86_CR0_WP;
8121 +}
8122 +
8123 +static inline unsigned long native_pax_close_kernel(void)
8124 +{
8125 + unsigned long cr0;
8126 +
8127 + cr0 = read_cr0() ^ X86_CR0_WP;
8128 + BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
8129 + write_cr0(cr0);
8130 + barrier();
8131 + preempt_enable_no_resched();
8132 + return cr0 ^ X86_CR0_WP;
8133 +}
8134 +#else
8135 +static inline unsigned long native_pax_open_kernel(void) { return 0; }
8136 +static inline unsigned long native_pax_close_kernel(void) { return 0; }
8137 +#endif
8138 +
8139 /*
8140 * The following only work if pte_present() is true.
8141 * Undefined behaviour if not..
8142 */
8143 +static inline int pte_user(pte_t pte)
8144 +{
8145 + return pte_val(pte) & _PAGE_USER;
8146 +}
8147 +
8148 static inline int pte_dirty(pte_t pte)
8149 {
8150 return pte_flags(pte) & _PAGE_DIRTY;
8151 @@ -196,9 +235,29 @@ static inline pte_t pte_wrprotect(pte_t
8152 return pte_clear_flags(pte, _PAGE_RW);
8153 }
8154
8155 +static inline pte_t pte_mkread(pte_t pte)
8156 +{
8157 + return __pte(pte_val(pte) | _PAGE_USER);
8158 +}
8159 +
8160 static inline pte_t pte_mkexec(pte_t pte)
8161 {
8162 - return pte_clear_flags(pte, _PAGE_NX);
8163 +#ifdef CONFIG_X86_PAE
8164 + if (__supported_pte_mask & _PAGE_NX)
8165 + return pte_clear_flags(pte, _PAGE_NX);
8166 + else
8167 +#endif
8168 + return pte_set_flags(pte, _PAGE_USER);
8169 +}
8170 +
8171 +static inline pte_t pte_exprotect(pte_t pte)
8172 +{
8173 +#ifdef CONFIG_X86_PAE
8174 + if (__supported_pte_mask & _PAGE_NX)
8175 + return pte_set_flags(pte, _PAGE_NX);
8176 + else
8177 +#endif
8178 + return pte_clear_flags(pte, _PAGE_USER);
8179 }
8180
8181 static inline pte_t pte_mkdirty(pte_t pte)
8182 @@ -390,6 +449,15 @@ pte_t *populate_extra_pte(unsigned long
8183 #endif
8184
8185 #ifndef __ASSEMBLY__
8186 +
8187 +#ifdef CONFIG_PAX_PER_CPU_PGD
8188 +extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
8189 +static inline pgd_t *get_cpu_pgd(unsigned int cpu)
8190 +{
8191 + return cpu_pgd[cpu];
8192 +}
8193 +#endif
8194 +
8195 #include <linux/mm_types.h>
8196
8197 static inline int pte_none(pte_t pte)
8198 @@ -560,7 +628,7 @@ static inline pud_t *pud_offset(pgd_t *p
8199
8200 static inline int pgd_bad(pgd_t pgd)
8201 {
8202 - return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
8203 + return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
8204 }
8205
8206 static inline int pgd_none(pgd_t pgd)
8207 @@ -583,7 +651,12 @@ static inline int pgd_none(pgd_t pgd)
8208 * pgd_offset() returns a (pgd_t *)
8209 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
8210 */
8211 -#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
8212 +#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
8213 +
8214 +#ifdef CONFIG_PAX_PER_CPU_PGD
8215 +#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
8216 +#endif
8217 +
8218 /*
8219 * a shortcut which implies the use of the kernel's pgd, instead
8220 * of a process's
8221 @@ -594,6 +667,20 @@ static inline int pgd_none(pgd_t pgd)
8222 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
8223 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
8224
8225 +#ifdef CONFIG_X86_32
8226 +#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
8227 +#else
8228 +#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
8229 +#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
8230 +
8231 +#ifdef CONFIG_PAX_MEMORY_UDEREF
8232 +#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
8233 +#else
8234 +#define PAX_USER_SHADOW_BASE (_AC(0,UL))
8235 +#endif
8236 +
8237 +#endif
8238 +
8239 #ifndef __ASSEMBLY__
8240
8241 extern int direct_gbpages;
8242 @@ -758,11 +845,23 @@ static inline void pmdp_set_wrprotect(st
8243 * dst and src can be on the same page, but the range must not overlap,
8244 * and must not cross a page boundary.
8245 */
8246 -static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
8247 +static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
8248 {
8249 - memcpy(dst, src, count * sizeof(pgd_t));
8250 + pax_open_kernel();
8251 + while (count--)
8252 + *dst++ = *src++;
8253 + pax_close_kernel();
8254 }
8255
8256 +#ifdef CONFIG_PAX_PER_CPU_PGD
8257 +extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8258 +#endif
8259 +
8260 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8261 +extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
8262 +#else
8263 +static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
8264 +#endif
8265
8266 #include <asm-generic/pgtable.h>
8267 #endif /* __ASSEMBLY__ */
8268 diff -urNp linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h
8269 --- linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h 2011-05-19 00:06:34.000000000 -0400
8270 +++ linux-2.6.39.4/arch/x86/include/asm/pgtable_types.h 2011-08-05 19:44:33.000000000 -0400
8271 @@ -16,13 +16,12 @@
8272 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
8273 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
8274 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
8275 -#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
8276 +#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
8277 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
8278 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
8279 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
8280 -#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
8281 -#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
8282 -#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
8283 +#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
8284 +#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
8285 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
8286
8287 /* If _PAGE_BIT_PRESENT is clear, we use these: */
8288 @@ -40,7 +39,6 @@
8289 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
8290 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
8291 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
8292 -#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
8293 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
8294 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
8295 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
8296 @@ -57,8 +55,10 @@
8297
8298 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
8299 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
8300 -#else
8301 +#elif defined(CONFIG_KMEMCHECK)
8302 #define _PAGE_NX (_AT(pteval_t, 0))
8303 +#else
8304 +#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
8305 #endif
8306
8307 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
8308 @@ -96,6 +96,9 @@
8309 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
8310 _PAGE_ACCESSED)
8311
8312 +#define PAGE_READONLY_NOEXEC PAGE_READONLY
8313 +#define PAGE_SHARED_NOEXEC PAGE_SHARED
8314 +
8315 #define __PAGE_KERNEL_EXEC \
8316 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
8317 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
8318 @@ -106,8 +109,8 @@
8319 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
8320 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
8321 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
8322 -#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
8323 -#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
8324 +#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
8325 +#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
8326 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
8327 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
8328 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
8329 @@ -166,8 +169,8 @@
8330 * bits are combined, this will alow user to access the high address mapped
8331 * VDSO in the presence of CONFIG_COMPAT_VDSO
8332 */
8333 -#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
8334 -#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
8335 +#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8336 +#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
8337 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
8338 #endif
8339
8340 @@ -205,7 +208,17 @@ static inline pgdval_t pgd_flags(pgd_t p
8341 {
8342 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
8343 }
8344 +#endif
8345
8346 +#if PAGETABLE_LEVELS == 3
8347 +#include <asm-generic/pgtable-nopud.h>
8348 +#endif
8349 +
8350 +#if PAGETABLE_LEVELS == 2
8351 +#include <asm-generic/pgtable-nopmd.h>
8352 +#endif
8353 +
8354 +#ifndef __ASSEMBLY__
8355 #if PAGETABLE_LEVELS > 3
8356 typedef struct { pudval_t pud; } pud_t;
8357
8358 @@ -219,8 +232,6 @@ static inline pudval_t native_pud_val(pu
8359 return pud.pud;
8360 }
8361 #else
8362 -#include <asm-generic/pgtable-nopud.h>
8363 -
8364 static inline pudval_t native_pud_val(pud_t pud)
8365 {
8366 return native_pgd_val(pud.pgd);
8367 @@ -240,8 +251,6 @@ static inline pmdval_t native_pmd_val(pm
8368 return pmd.pmd;
8369 }
8370 #else
8371 -#include <asm-generic/pgtable-nopmd.h>
8372 -
8373 static inline pmdval_t native_pmd_val(pmd_t pmd)
8374 {
8375 return native_pgd_val(pmd.pud.pgd);
8376 @@ -281,7 +290,6 @@ typedef struct page *pgtable_t;
8377
8378 extern pteval_t __supported_pte_mask;
8379 extern void set_nx(void);
8380 -extern int nx_enabled;
8381
8382 #define pgprot_writecombine pgprot_writecombine
8383 extern pgprot_t pgprot_writecombine(pgprot_t prot);
8384 diff -urNp linux-2.6.39.4/arch/x86/include/asm/processor.h linux-2.6.39.4/arch/x86/include/asm/processor.h
8385 --- linux-2.6.39.4/arch/x86/include/asm/processor.h 2011-05-19 00:06:34.000000000 -0400
8386 +++ linux-2.6.39.4/arch/x86/include/asm/processor.h 2011-08-05 19:44:33.000000000 -0400
8387 @@ -266,7 +266,7 @@ struct tss_struct {
8388
8389 } ____cacheline_aligned;
8390
8391 -DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
8392 +extern struct tss_struct init_tss[NR_CPUS];
8393
8394 /*
8395 * Save the original ist values for checking stack pointers during debugging
8396 @@ -860,11 +860,18 @@ static inline void spin_lock_prefetch(co
8397 */
8398 #define TASK_SIZE PAGE_OFFSET
8399 #define TASK_SIZE_MAX TASK_SIZE
8400 +
8401 +#ifdef CONFIG_PAX_SEGMEXEC
8402 +#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
8403 +#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
8404 +#else
8405 #define STACK_TOP TASK_SIZE
8406 -#define STACK_TOP_MAX STACK_TOP
8407 +#endif
8408 +
8409 +#define STACK_TOP_MAX TASK_SIZE
8410
8411 #define INIT_THREAD { \
8412 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8413 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8414 .vm86_info = NULL, \
8415 .sysenter_cs = __KERNEL_CS, \
8416 .io_bitmap_ptr = NULL, \
8417 @@ -878,7 +885,7 @@ static inline void spin_lock_prefetch(co
8418 */
8419 #define INIT_TSS { \
8420 .x86_tss = { \
8421 - .sp0 = sizeof(init_stack) + (long)&init_stack, \
8422 + .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
8423 .ss0 = __KERNEL_DS, \
8424 .ss1 = __KERNEL_CS, \
8425 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
8426 @@ -889,11 +896,7 @@ static inline void spin_lock_prefetch(co
8427 extern unsigned long thread_saved_pc(struct task_struct *tsk);
8428
8429 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
8430 -#define KSTK_TOP(info) \
8431 -({ \
8432 - unsigned long *__ptr = (unsigned long *)(info); \
8433 - (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
8434 -})
8435 +#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
8436
8437 /*
8438 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
8439 @@ -908,7 +911,7 @@ extern unsigned long thread_saved_pc(str
8440 #define task_pt_regs(task) \
8441 ({ \
8442 struct pt_regs *__regs__; \
8443 - __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
8444 + __regs__ = (struct pt_regs *)((task)->thread.sp0); \
8445 __regs__ - 1; \
8446 })
8447
8448 @@ -918,13 +921,13 @@ extern unsigned long thread_saved_pc(str
8449 /*
8450 * User space process size. 47bits minus one guard page.
8451 */
8452 -#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
8453 +#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
8454
8455 /* This decides where the kernel will search for a free chunk of vm
8456 * space during mmap's.
8457 */
8458 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
8459 - 0xc0000000 : 0xFFFFe000)
8460 + 0xc0000000 : 0xFFFFf000)
8461
8462 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
8463 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
8464 @@ -935,11 +938,11 @@ extern unsigned long thread_saved_pc(str
8465 #define STACK_TOP_MAX TASK_SIZE_MAX
8466
8467 #define INIT_THREAD { \
8468 - .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8469 + .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8470 }
8471
8472 #define INIT_TSS { \
8473 - .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
8474 + .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
8475 }
8476
8477 /*
8478 @@ -961,6 +964,10 @@ extern void start_thread(struct pt_regs
8479 */
8480 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
8481
8482 +#ifdef CONFIG_PAX_SEGMEXEC
8483 +#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
8484 +#endif
8485 +
8486 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
8487
8488 /* Get/set a process' ability to use the timestamp counter instruction */
8489 diff -urNp linux-2.6.39.4/arch/x86/include/asm/ptrace.h linux-2.6.39.4/arch/x86/include/asm/ptrace.h
8490 --- linux-2.6.39.4/arch/x86/include/asm/ptrace.h 2011-05-19 00:06:34.000000000 -0400
8491 +++ linux-2.6.39.4/arch/x86/include/asm/ptrace.h 2011-08-05 19:44:33.000000000 -0400
8492 @@ -152,28 +152,29 @@ static inline unsigned long regs_return_
8493 }
8494
8495 /*
8496 - * user_mode_vm(regs) determines whether a register set came from user mode.
8497 + * user_mode(regs) determines whether a register set came from user mode.
8498 * This is true if V8086 mode was enabled OR if the register set was from
8499 * protected mode with RPL-3 CS value. This tricky test checks that with
8500 * one comparison. Many places in the kernel can bypass this full check
8501 - * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
8502 + * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
8503 + * be used.
8504 */
8505 -static inline int user_mode(struct pt_regs *regs)
8506 +static inline int user_mode_novm(struct pt_regs *regs)
8507 {
8508 #ifdef CONFIG_X86_32
8509 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
8510 #else
8511 - return !!(regs->cs & 3);
8512 + return !!(regs->cs & SEGMENT_RPL_MASK);
8513 #endif
8514 }
8515
8516 -static inline int user_mode_vm(struct pt_regs *regs)
8517 +static inline int user_mode(struct pt_regs *regs)
8518 {
8519 #ifdef CONFIG_X86_32
8520 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
8521 USER_RPL;
8522 #else
8523 - return user_mode(regs);
8524 + return user_mode_novm(regs);
8525 #endif
8526 }
8527
8528 diff -urNp linux-2.6.39.4/arch/x86/include/asm/reboot.h linux-2.6.39.4/arch/x86/include/asm/reboot.h
8529 --- linux-2.6.39.4/arch/x86/include/asm/reboot.h 2011-05-19 00:06:34.000000000 -0400
8530 +++ linux-2.6.39.4/arch/x86/include/asm/reboot.h 2011-08-05 20:34:06.000000000 -0400
8531 @@ -6,19 +6,19 @@
8532 struct pt_regs;
8533
8534 struct machine_ops {
8535 - void (*restart)(char *cmd);
8536 - void (*halt)(void);
8537 - void (*power_off)(void);
8538 + void (* __noreturn restart)(char *cmd);
8539 + void (* __noreturn halt)(void);
8540 + void (* __noreturn power_off)(void);
8541 void (*shutdown)(void);
8542 void (*crash_shutdown)(struct pt_regs *);
8543 - void (*emergency_restart)(void);
8544 -};
8545 + void (* __noreturn emergency_restart)(void);
8546 +} __no_const;
8547
8548 extern struct machine_ops machine_ops;
8549
8550 void native_machine_crash_shutdown(struct pt_regs *regs);
8551 void native_machine_shutdown(void);
8552 -void machine_real_restart(unsigned int type);
8553 +void machine_real_restart(unsigned int type) __noreturn;
8554 /* These must match dispatch_table in reboot_32.S */
8555 #define MRR_BIOS 0
8556 #define MRR_APM 1
8557 diff -urNp linux-2.6.39.4/arch/x86/include/asm/rwsem.h linux-2.6.39.4/arch/x86/include/asm/rwsem.h
8558 --- linux-2.6.39.4/arch/x86/include/asm/rwsem.h 2011-05-19 00:06:34.000000000 -0400
8559 +++ linux-2.6.39.4/arch/x86/include/asm/rwsem.h 2011-08-05 19:44:33.000000000 -0400
8560 @@ -64,6 +64,14 @@ static inline void __down_read(struct rw
8561 {
8562 asm volatile("# beginning down_read\n\t"
8563 LOCK_PREFIX _ASM_INC "(%1)\n\t"
8564 +
8565 +#ifdef CONFIG_PAX_REFCOUNT
8566 + "jno 0f\n"
8567 + LOCK_PREFIX _ASM_DEC "(%1)\n"
8568 + "int $4\n0:\n"
8569 + _ASM_EXTABLE(0b, 0b)
8570 +#endif
8571 +
8572 /* adds 0x00000001 */
8573 " jns 1f\n"
8574 " call call_rwsem_down_read_failed\n"
8575 @@ -85,6 +93,14 @@ static inline int __down_read_trylock(st
8576 "1:\n\t"
8577 " mov %1,%2\n\t"
8578 " add %3,%2\n\t"
8579 +
8580 +#ifdef CONFIG_PAX_REFCOUNT
8581 + "jno 0f\n"
8582 + "sub %3,%2\n"
8583 + "int $4\n0:\n"
8584 + _ASM_EXTABLE(0b, 0b)
8585 +#endif
8586 +
8587 " jle 2f\n\t"
8588 LOCK_PREFIX " cmpxchg %2,%0\n\t"
8589 " jnz 1b\n\t"
8590 @@ -104,6 +120,14 @@ static inline void __down_write_nested(s
8591 long tmp;
8592 asm volatile("# beginning down_write\n\t"
8593 LOCK_PREFIX " xadd %1,(%2)\n\t"
8594 +
8595 +#ifdef CONFIG_PAX_REFCOUNT
8596 + "jno 0f\n"
8597 + "mov %1,(%2)\n"
8598 + "int $4\n0:\n"
8599 + _ASM_EXTABLE(0b, 0b)
8600 +#endif
8601 +
8602 /* adds 0xffff0001, returns the old value */
8603 " test %1,%1\n\t"
8604 /* was the count 0 before? */
8605 @@ -141,6 +165,14 @@ static inline void __up_read(struct rw_s
8606 long tmp;
8607 asm volatile("# beginning __up_read\n\t"
8608 LOCK_PREFIX " xadd %1,(%2)\n\t"
8609 +
8610 +#ifdef CONFIG_PAX_REFCOUNT
8611 + "jno 0f\n"
8612 + "mov %1,(%2)\n"
8613 + "int $4\n0:\n"
8614 + _ASM_EXTABLE(0b, 0b)
8615 +#endif
8616 +
8617 /* subtracts 1, returns the old value */
8618 " jns 1f\n\t"
8619 " call call_rwsem_wake\n" /* expects old value in %edx */
8620 @@ -159,6 +191,14 @@ static inline void __up_write(struct rw_
8621 long tmp;
8622 asm volatile("# beginning __up_write\n\t"
8623 LOCK_PREFIX " xadd %1,(%2)\n\t"
8624 +
8625 +#ifdef CONFIG_PAX_REFCOUNT
8626 + "jno 0f\n"
8627 + "mov %1,(%2)\n"
8628 + "int $4\n0:\n"
8629 + _ASM_EXTABLE(0b, 0b)
8630 +#endif
8631 +
8632 /* subtracts 0xffff0001, returns the old value */
8633 " jns 1f\n\t"
8634 " call call_rwsem_wake\n" /* expects old value in %edx */
8635 @@ -176,6 +216,14 @@ static inline void __downgrade_write(str
8636 {
8637 asm volatile("# beginning __downgrade_write\n\t"
8638 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
8639 +
8640 +#ifdef CONFIG_PAX_REFCOUNT
8641 + "jno 0f\n"
8642 + LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
8643 + "int $4\n0:\n"
8644 + _ASM_EXTABLE(0b, 0b)
8645 +#endif
8646 +
8647 /*
8648 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
8649 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
8650 @@ -194,7 +242,15 @@ static inline void __downgrade_write(str
8651 */
8652 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
8653 {
8654 - asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
8655 + asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
8656 +
8657 +#ifdef CONFIG_PAX_REFCOUNT
8658 + "jno 0f\n"
8659 + LOCK_PREFIX _ASM_SUB "%1,%0\n"
8660 + "int $4\n0:\n"
8661 + _ASM_EXTABLE(0b, 0b)
8662 +#endif
8663 +
8664 : "+m" (sem->count)
8665 : "er" (delta));
8666 }
8667 @@ -206,7 +262,15 @@ static inline long rwsem_atomic_update(l
8668 {
8669 long tmp = delta;
8670
8671 - asm volatile(LOCK_PREFIX "xadd %0,%1"
8672 + asm volatile(LOCK_PREFIX "xadd %0,%1\n"
8673 +
8674 +#ifdef CONFIG_PAX_REFCOUNT
8675 + "jno 0f\n"
8676 + "mov %0,%1\n"
8677 + "int $4\n0:\n"
8678 + _ASM_EXTABLE(0b, 0b)
8679 +#endif
8680 +
8681 : "+r" (tmp), "+m" (sem->count)
8682 : : "memory");
8683
8684 diff -urNp linux-2.6.39.4/arch/x86/include/asm/segment.h linux-2.6.39.4/arch/x86/include/asm/segment.h
8685 --- linux-2.6.39.4/arch/x86/include/asm/segment.h 2011-05-19 00:06:34.000000000 -0400
8686 +++ linux-2.6.39.4/arch/x86/include/asm/segment.h 2011-08-05 19:44:33.000000000 -0400
8687 @@ -64,8 +64,8 @@
8688 * 26 - ESPFIX small SS
8689 * 27 - per-cpu [ offset to per-cpu data area ]
8690 * 28 - stack_canary-20 [ for stack protector ]
8691 - * 29 - unused
8692 - * 30 - unused
8693 + * 29 - PCI BIOS CS
8694 + * 30 - PCI BIOS DS
8695 * 31 - TSS for double fault handler
8696 */
8697 #define GDT_ENTRY_TLS_MIN 6
8698 @@ -79,6 +79,8 @@
8699
8700 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
8701
8702 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
8703 +
8704 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
8705
8706 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
8707 @@ -104,6 +106,12 @@
8708 #define __KERNEL_STACK_CANARY 0
8709 #endif
8710
8711 +#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
8712 +#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
8713 +
8714 +#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
8715 +#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
8716 +
8717 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
8718
8719 /*
8720 @@ -141,7 +149,7 @@
8721 */
8722
8723 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
8724 -#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
8725 +#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
8726
8727
8728 #else
8729 @@ -165,6 +173,8 @@
8730 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
8731 #define __USER32_DS __USER_DS
8732
8733 +#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
8734 +
8735 #define GDT_ENTRY_TSS 8 /* needs two entries */
8736 #define GDT_ENTRY_LDT 10 /* needs two entries */
8737 #define GDT_ENTRY_TLS_MIN 12
8738 @@ -185,6 +195,7 @@
8739 #endif
8740
8741 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
8742 +#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
8743 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
8744 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
8745 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
8746 diff -urNp linux-2.6.39.4/arch/x86/include/asm/smp.h linux-2.6.39.4/arch/x86/include/asm/smp.h
8747 --- linux-2.6.39.4/arch/x86/include/asm/smp.h 2011-05-19 00:06:34.000000000 -0400
8748 +++ linux-2.6.39.4/arch/x86/include/asm/smp.h 2011-08-05 20:34:06.000000000 -0400
8749 @@ -36,7 +36,7 @@ DECLARE_PER_CPU(cpumask_var_t, cpu_core_
8750 /* cpus sharing the last level cache: */
8751 DECLARE_PER_CPU(cpumask_var_t, cpu_llc_shared_map);
8752 DECLARE_PER_CPU(u16, cpu_llc_id);
8753 -DECLARE_PER_CPU(int, cpu_number);
8754 +DECLARE_PER_CPU(unsigned int, cpu_number);
8755
8756 static inline struct cpumask *cpu_sibling_mask(int cpu)
8757 {
8758 @@ -77,7 +77,7 @@ struct smp_ops {
8759
8760 void (*send_call_func_ipi)(const struct cpumask *mask);
8761 void (*send_call_func_single_ipi)(int cpu);
8762 -};
8763 +} __no_const;
8764
8765 /* Globals due to paravirt */
8766 extern void set_cpu_sibling_map(int cpu);
8767 @@ -192,14 +192,8 @@ extern unsigned disabled_cpus __cpuinitd
8768 extern int safe_smp_processor_id(void);
8769
8770 #elif defined(CONFIG_X86_64_SMP)
8771 -#define raw_smp_processor_id() (percpu_read(cpu_number))
8772 -
8773 -#define stack_smp_processor_id() \
8774 -({ \
8775 - struct thread_info *ti; \
8776 - __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
8777 - ti->cpu; \
8778 -})
8779 +#define raw_smp_processor_id() (percpu_read(cpu_number))
8780 +#define stack_smp_processor_id() raw_smp_processor_id()
8781 #define safe_smp_processor_id() smp_processor_id()
8782
8783 #endif
8784 diff -urNp linux-2.6.39.4/arch/x86/include/asm/spinlock.h linux-2.6.39.4/arch/x86/include/asm/spinlock.h
8785 --- linux-2.6.39.4/arch/x86/include/asm/spinlock.h 2011-05-19 00:06:34.000000000 -0400
8786 +++ linux-2.6.39.4/arch/x86/include/asm/spinlock.h 2011-08-05 19:44:33.000000000 -0400
8787 @@ -249,6 +249,14 @@ static inline int arch_write_can_lock(ar
8788 static inline void arch_read_lock(arch_rwlock_t *rw)
8789 {
8790 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
8791 +
8792 +#ifdef CONFIG_PAX_REFCOUNT
8793 + "jno 0f\n"
8794 + LOCK_PREFIX " addl $1,(%0)\n"
8795 + "int $4\n0:\n"
8796 + _ASM_EXTABLE(0b, 0b)
8797 +#endif
8798 +
8799 "jns 1f\n"
8800 "call __read_lock_failed\n\t"
8801 "1:\n"
8802 @@ -258,6 +266,14 @@ static inline void arch_read_lock(arch_r
8803 static inline void arch_write_lock(arch_rwlock_t *rw)
8804 {
8805 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
8806 +
8807 +#ifdef CONFIG_PAX_REFCOUNT
8808 + "jno 0f\n"
8809 + LOCK_PREFIX " addl %1,(%0)\n"
8810 + "int $4\n0:\n"
8811 + _ASM_EXTABLE(0b, 0b)
8812 +#endif
8813 +
8814 "jz 1f\n"
8815 "call __write_lock_failed\n\t"
8816 "1:\n"
8817 @@ -286,12 +302,29 @@ static inline int arch_write_trylock(arc
8818
8819 static inline void arch_read_unlock(arch_rwlock_t *rw)
8820 {
8821 - asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
8822 + asm volatile(LOCK_PREFIX "incl %0\n"
8823 +
8824 +#ifdef CONFIG_PAX_REFCOUNT
8825 + "jno 0f\n"
8826 + LOCK_PREFIX "decl %0\n"
8827 + "int $4\n0:\n"
8828 + _ASM_EXTABLE(0b, 0b)
8829 +#endif
8830 +
8831 + :"+m" (rw->lock) : : "memory");
8832 }
8833
8834 static inline void arch_write_unlock(arch_rwlock_t *rw)
8835 {
8836 - asm volatile(LOCK_PREFIX "addl %1, %0"
8837 + asm volatile(LOCK_PREFIX "addl %1, %0\n"
8838 +
8839 +#ifdef CONFIG_PAX_REFCOUNT
8840 + "jno 0f\n"
8841 + LOCK_PREFIX "subl %1, %0\n"
8842 + "int $4\n0:\n"
8843 + _ASM_EXTABLE(0b, 0b)
8844 +#endif
8845 +
8846 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
8847 }
8848
8849 diff -urNp linux-2.6.39.4/arch/x86/include/asm/stackprotector.h linux-2.6.39.4/arch/x86/include/asm/stackprotector.h
8850 --- linux-2.6.39.4/arch/x86/include/asm/stackprotector.h 2011-05-19 00:06:34.000000000 -0400
8851 +++ linux-2.6.39.4/arch/x86/include/asm/stackprotector.h 2011-08-05 19:44:33.000000000 -0400
8852 @@ -48,7 +48,7 @@
8853 * head_32 for boot CPU and setup_per_cpu_areas() for others.
8854 */
8855 #define GDT_STACK_CANARY_INIT \
8856 - [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
8857 + [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
8858
8859 /*
8860 * Initialize the stackprotector canary value.
8861 @@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
8862
8863 static inline void load_stack_canary_segment(void)
8864 {
8865 -#ifdef CONFIG_X86_32
8866 +#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
8867 asm volatile ("mov %0, %%gs" : : "r" (0));
8868 #endif
8869 }
8870 diff -urNp linux-2.6.39.4/arch/x86/include/asm/stacktrace.h linux-2.6.39.4/arch/x86/include/asm/stacktrace.h
8871 --- linux-2.6.39.4/arch/x86/include/asm/stacktrace.h 2011-05-19 00:06:34.000000000 -0400
8872 +++ linux-2.6.39.4/arch/x86/include/asm/stacktrace.h 2011-08-05 19:44:33.000000000 -0400
8873 @@ -11,28 +11,20 @@
8874
8875 extern int kstack_depth_to_print;
8876
8877 -struct thread_info;
8878 +struct task_struct;
8879 struct stacktrace_ops;
8880
8881 -typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
8882 - unsigned long *stack,
8883 - unsigned long bp,
8884 - const struct stacktrace_ops *ops,
8885 - void *data,
8886 - unsigned long *end,
8887 - int *graph);
8888 -
8889 -extern unsigned long
8890 -print_context_stack(struct thread_info *tinfo,
8891 - unsigned long *stack, unsigned long bp,
8892 - const struct stacktrace_ops *ops, void *data,
8893 - unsigned long *end, int *graph);
8894 -
8895 -extern unsigned long
8896 -print_context_stack_bp(struct thread_info *tinfo,
8897 - unsigned long *stack, unsigned long bp,
8898 - const struct stacktrace_ops *ops, void *data,
8899 - unsigned long *end, int *graph);
8900 +typedef unsigned long walk_stack_t(struct task_struct *task,
8901 + void *stack_start,
8902 + unsigned long *stack,
8903 + unsigned long bp,
8904 + const struct stacktrace_ops *ops,
8905 + void *data,
8906 + unsigned long *end,
8907 + int *graph);
8908 +
8909 +extern walk_stack_t print_context_stack;
8910 +extern walk_stack_t print_context_stack_bp;
8911
8912 /* Generic stack tracer with callbacks */
8913
8914 @@ -43,7 +35,7 @@ struct stacktrace_ops {
8915 void (*address)(void *data, unsigned long address, int reliable);
8916 /* On negative return stop dumping */
8917 int (*stack)(void *data, char *name);
8918 - walk_stack_t walk_stack;
8919 + walk_stack_t *walk_stack;
8920 };
8921
8922 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
8923 diff -urNp linux-2.6.39.4/arch/x86/include/asm/system.h linux-2.6.39.4/arch/x86/include/asm/system.h
8924 --- linux-2.6.39.4/arch/x86/include/asm/system.h 2011-05-19 00:06:34.000000000 -0400
8925 +++ linux-2.6.39.4/arch/x86/include/asm/system.h 2011-08-05 19:44:33.000000000 -0400
8926 @@ -129,7 +129,7 @@ do { \
8927 "call __switch_to\n\t" \
8928 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
8929 __switch_canary \
8930 - "movq %P[thread_info](%%rsi),%%r8\n\t" \
8931 + "movq "__percpu_arg([thread_info])",%%r8\n\t" \
8932 "movq %%rax,%%rdi\n\t" \
8933 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
8934 "jnz ret_from_fork\n\t" \
8935 @@ -140,7 +140,7 @@ do { \
8936 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
8937 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
8938 [_tif_fork] "i" (_TIF_FORK), \
8939 - [thread_info] "i" (offsetof(struct task_struct, stack)), \
8940 + [thread_info] "m" (current_tinfo), \
8941 [current_task] "m" (current_task) \
8942 __switch_canary_iparam \
8943 : "memory", "cc" __EXTRA_CLOBBER)
8944 @@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
8945 {
8946 unsigned long __limit;
8947 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
8948 - return __limit + 1;
8949 + return __limit;
8950 }
8951
8952 static inline void native_clts(void)
8953 @@ -340,12 +340,12 @@ void enable_hlt(void);
8954
8955 void cpu_idle_wait(void);
8956
8957 -extern unsigned long arch_align_stack(unsigned long sp);
8958 +#define arch_align_stack(x) ((x) & ~0xfUL)
8959 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
8960
8961 void default_idle(void);
8962
8963 -void stop_this_cpu(void *dummy);
8964 +void stop_this_cpu(void *dummy) __noreturn;
8965
8966 /*
8967 * Force strict CPU ordering.
8968 diff -urNp linux-2.6.39.4/arch/x86/include/asm/thread_info.h linux-2.6.39.4/arch/x86/include/asm/thread_info.h
8969 --- linux-2.6.39.4/arch/x86/include/asm/thread_info.h 2011-05-19 00:06:34.000000000 -0400
8970 +++ linux-2.6.39.4/arch/x86/include/asm/thread_info.h 2011-08-05 19:44:33.000000000 -0400
8971 @@ -10,6 +10,7 @@
8972 #include <linux/compiler.h>
8973 #include <asm/page.h>
8974 #include <asm/types.h>
8975 +#include <asm/percpu.h>
8976
8977 /*
8978 * low level task data that entry.S needs immediate access to
8979 @@ -24,7 +25,6 @@ struct exec_domain;
8980 #include <asm/atomic.h>
8981
8982 struct thread_info {
8983 - struct task_struct *task; /* main task structure */
8984 struct exec_domain *exec_domain; /* execution domain */
8985 __u32 flags; /* low level flags */
8986 __u32 status; /* thread synchronous flags */
8987 @@ -34,18 +34,12 @@ struct thread_info {
8988 mm_segment_t addr_limit;
8989 struct restart_block restart_block;
8990 void __user *sysenter_return;
8991 -#ifdef CONFIG_X86_32
8992 - unsigned long previous_esp; /* ESP of the previous stack in
8993 - case of nested (IRQ) stacks
8994 - */
8995 - __u8 supervisor_stack[0];
8996 -#endif
8997 + unsigned long lowest_stack;
8998 int uaccess_err;
8999 };
9000
9001 -#define INIT_THREAD_INFO(tsk) \
9002 +#define INIT_THREAD_INFO \
9003 { \
9004 - .task = &tsk, \
9005 .exec_domain = &default_exec_domain, \
9006 .flags = 0, \
9007 .cpu = 0, \
9008 @@ -56,7 +50,7 @@ struct thread_info {
9009 }, \
9010 }
9011
9012 -#define init_thread_info (init_thread_union.thread_info)
9013 +#define init_thread_info (init_thread_union.stack)
9014 #define init_stack (init_thread_union.stack)
9015
9016 #else /* !__ASSEMBLY__ */
9017 @@ -170,6 +164,23 @@ struct thread_info {
9018 ret; \
9019 })
9020
9021 +#ifdef __ASSEMBLY__
9022 +/* how to get the thread information struct from ASM */
9023 +#define GET_THREAD_INFO(reg) \
9024 + mov PER_CPU_VAR(current_tinfo), reg
9025 +
9026 +/* use this one if reg already contains %esp */
9027 +#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
9028 +#else
9029 +/* how to get the thread information struct from C */
9030 +DECLARE_PER_CPU(struct thread_info *, current_tinfo);
9031 +
9032 +static __always_inline struct thread_info *current_thread_info(void)
9033 +{
9034 + return percpu_read_stable(current_tinfo);
9035 +}
9036 +#endif
9037 +
9038 #ifdef CONFIG_X86_32
9039
9040 #define STACK_WARN (THREAD_SIZE/8)
9041 @@ -180,35 +191,13 @@ struct thread_info {
9042 */
9043 #ifndef __ASSEMBLY__
9044
9045 -
9046 /* how to get the current stack pointer from C */
9047 register unsigned long current_stack_pointer asm("esp") __used;
9048
9049 -/* how to get the thread information struct from C */
9050 -static inline struct thread_info *current_thread_info(void)
9051 -{
9052 - return (struct thread_info *)
9053 - (current_stack_pointer & ~(THREAD_SIZE - 1));
9054 -}
9055 -
9056 -#else /* !__ASSEMBLY__ */
9057 -
9058 -/* how to get the thread information struct from ASM */
9059 -#define GET_THREAD_INFO(reg) \
9060 - movl $-THREAD_SIZE, reg; \
9061 - andl %esp, reg
9062 -
9063 -/* use this one if reg already contains %esp */
9064 -#define GET_THREAD_INFO_WITH_ESP(reg) \
9065 - andl $-THREAD_SIZE, reg
9066 -
9067 #endif
9068
9069 #else /* X86_32 */
9070
9071 -#include <asm/percpu.h>
9072 -#define KERNEL_STACK_OFFSET (5*8)
9073 -
9074 /*
9075 * macros/functions for gaining access to the thread information structure
9076 * preempt_count needs to be 1 initially, until the scheduler is functional.
9077 @@ -216,21 +205,8 @@ static inline struct thread_info *curren
9078 #ifndef __ASSEMBLY__
9079 DECLARE_PER_CPU(unsigned long, kernel_stack);
9080
9081 -static inline struct thread_info *current_thread_info(void)
9082 -{
9083 - struct thread_info *ti;
9084 - ti = (void *)(percpu_read_stable(kernel_stack) +
9085 - KERNEL_STACK_OFFSET - THREAD_SIZE);
9086 - return ti;
9087 -}
9088 -
9089 -#else /* !__ASSEMBLY__ */
9090 -
9091 -/* how to get the thread information struct from ASM */
9092 -#define GET_THREAD_INFO(reg) \
9093 - movq PER_CPU_VAR(kernel_stack),reg ; \
9094 - subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
9095 -
9096 +/* how to get the current stack pointer from C */
9097 +register unsigned long current_stack_pointer asm("rsp") __used;
9098 #endif
9099
9100 #endif /* !X86_32 */
9101 @@ -266,5 +242,16 @@ extern void arch_task_cache_init(void);
9102 extern void free_thread_info(struct thread_info *ti);
9103 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
9104 #define arch_task_cache_init arch_task_cache_init
9105 +
9106 +#define __HAVE_THREAD_FUNCTIONS
9107 +#define task_thread_info(task) (&(task)->tinfo)
9108 +#define task_stack_page(task) ((task)->stack)
9109 +#define setup_thread_stack(p, org) do {} while (0)
9110 +#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
9111 +
9112 +#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
9113 +extern struct task_struct *alloc_task_struct_node(int node);
9114 +extern void free_task_struct(struct task_struct *);
9115 +
9116 #endif
9117 #endif /* _ASM_X86_THREAD_INFO_H */
9118 diff -urNp linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h
9119 --- linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h 2011-05-19 00:06:34.000000000 -0400
9120 +++ linux-2.6.39.4/arch/x86/include/asm/uaccess_32.h 2011-08-05 19:44:33.000000000 -0400
9121 @@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
9122 static __always_inline unsigned long __must_check
9123 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
9124 {
9125 + pax_track_stack();
9126 +
9127 + if ((long)n < 0)
9128 + return n;
9129 +
9130 if (__builtin_constant_p(n)) {
9131 unsigned long ret;
9132
9133 @@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
9134 return ret;
9135 }
9136 }
9137 + if (!__builtin_constant_p(n))
9138 + check_object_size(from, n, true);
9139 return __copy_to_user_ll(to, from, n);
9140 }
9141
9142 @@ -83,12 +90,16 @@ static __always_inline unsigned long __m
9143 __copy_to_user(void __user *to, const void *from, unsigned long n)
9144 {
9145 might_fault();
9146 +
9147 return __copy_to_user_inatomic(to, from, n);
9148 }
9149
9150 static __always_inline unsigned long
9151 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
9152 {
9153 + if ((long)n < 0)
9154 + return n;
9155 +
9156 /* Avoid zeroing the tail if the copy fails..
9157 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
9158 * but as the zeroing behaviour is only significant when n is not
9159 @@ -138,6 +149,12 @@ static __always_inline unsigned long
9160 __copy_from_user(void *to, const void __user *from, unsigned long n)
9161 {
9162 might_fault();
9163 +
9164 + pax_track_stack();
9165 +
9166 + if ((long)n < 0)
9167 + return n;
9168 +
9169 if (__builtin_constant_p(n)) {
9170 unsigned long ret;
9171
9172 @@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
9173 return ret;
9174 }
9175 }
9176 + if (!__builtin_constant_p(n))
9177 + check_object_size(to, n, false);
9178 return __copy_from_user_ll(to, from, n);
9179 }
9180
9181 @@ -160,6 +179,10 @@ static __always_inline unsigned long __c
9182 const void __user *from, unsigned long n)
9183 {
9184 might_fault();
9185 +
9186 + if ((long)n < 0)
9187 + return n;
9188 +
9189 if (__builtin_constant_p(n)) {
9190 unsigned long ret;
9191
9192 @@ -182,15 +205,19 @@ static __always_inline unsigned long
9193 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
9194 unsigned long n)
9195 {
9196 - return __copy_from_user_ll_nocache_nozero(to, from, n);
9197 -}
9198 + if ((long)n < 0)
9199 + return n;
9200
9201 -unsigned long __must_check copy_to_user(void __user *to,
9202 - const void *from, unsigned long n);
9203 -unsigned long __must_check _copy_from_user(void *to,
9204 - const void __user *from,
9205 - unsigned long n);
9206 + return __copy_from_user_ll_nocache_nozero(to, from, n);
9207 +}
9208
9209 +extern void copy_to_user_overflow(void)
9210 +#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9211 + __compiletime_error("copy_to_user() buffer size is not provably correct")
9212 +#else
9213 + __compiletime_warning("copy_to_user() buffer size is not provably correct")
9214 +#endif
9215 +;
9216
9217 extern void copy_from_user_overflow(void)
9218 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
9219 @@ -200,17 +227,61 @@ extern void copy_from_user_overflow(void
9220 #endif
9221 ;
9222
9223 -static inline unsigned long __must_check copy_from_user(void *to,
9224 - const void __user *from,
9225 - unsigned long n)
9226 +/**
9227 + * copy_to_user: - Copy a block of data into user space.
9228 + * @to: Destination address, in user space.
9229 + * @from: Source address, in kernel space.
9230 + * @n: Number of bytes to copy.
9231 + *
9232 + * Context: User context only. This function may sleep.
9233 + *
9234 + * Copy data from kernel space to user space.
9235 + *
9236 + * Returns number of bytes that could not be copied.
9237 + * On success, this will be zero.
9238 + */
9239 +static inline unsigned long __must_check
9240 +copy_to_user(void __user *to, const void *from, unsigned long n)
9241 +{
9242 + int sz = __compiletime_object_size(from);
9243 +
9244 + if (unlikely(sz != -1 && sz < n))
9245 + copy_to_user_overflow();
9246 + else if (access_ok(VERIFY_WRITE, to, n))
9247 + n = __copy_to_user(to, from, n);
9248 + return n;
9249 +}
9250 +
9251 +/**
9252 + * copy_from_user: - Copy a block of data from user space.
9253 + * @to: Destination address, in kernel space.
9254 + * @from: Source address, in user space.
9255 + * @n: Number of bytes to copy.
9256 + *
9257 + * Context: User context only. This function may sleep.
9258 + *
9259 + * Copy data from user space to kernel space.
9260 + *
9261 + * Returns number of bytes that could not be copied.
9262 + * On success, this will be zero.
9263 + *
9264 + * If some data could not be copied, this function will pad the copied
9265 + * data to the requested size using zero bytes.
9266 + */
9267 +static inline unsigned long __must_check
9268 +copy_from_user(void *to, const void __user *from, unsigned long n)
9269 {
9270 int sz = __compiletime_object_size(to);
9271
9272 - if (likely(sz == -1 || sz >= n))
9273 - n = _copy_from_user(to, from, n);
9274 - else
9275 + if (unlikely(sz != -1 && sz < n))
9276 copy_from_user_overflow();
9277 -
9278 + else if (access_ok(VERIFY_READ, from, n))
9279 + n = __copy_from_user(to, from, n);
9280 + else if ((long)n > 0) {
9281 + if (!__builtin_constant_p(n))
9282 + check_object_size(to, n, false);
9283 + memset(to, 0, n);
9284 + }
9285 return n;
9286 }
9287
9288 diff -urNp linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h
9289 --- linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h 2011-05-19 00:06:34.000000000 -0400
9290 +++ linux-2.6.39.4/arch/x86/include/asm/uaccess_64.h 2011-08-05 19:44:33.000000000 -0400
9291 @@ -11,6 +11,9 @@
9292 #include <asm/alternative.h>
9293 #include <asm/cpufeature.h>
9294 #include <asm/page.h>
9295 +#include <asm/pgtable.h>
9296 +
9297 +#define set_fs(x) (current_thread_info()->addr_limit = (x))
9298
9299 /*
9300 * Copy To/From Userspace
9301 @@ -37,26 +40,26 @@ copy_user_generic(void *to, const void *
9302 return ret;
9303 }
9304
9305 -__must_check unsigned long
9306 -_copy_to_user(void __user *to, const void *from, unsigned len);
9307 -__must_check unsigned long
9308 -_copy_from_user(void *to, const void __user *from, unsigned len);
9309 +static __always_inline __must_check unsigned long
9310 +__copy_to_user(void __user *to, const void *from, unsigned len);
9311 +static __always_inline __must_check unsigned long
9312 +__copy_from_user(void *to, const void __user *from, unsigned len);
9313 __must_check unsigned long
9314 copy_in_user(void __user *to, const void __user *from, unsigned len);
9315
9316 static inline unsigned long __must_check copy_from_user(void *to,
9317 const void __user *from,
9318 - unsigned long n)
9319 + unsigned n)
9320 {
9321 - int sz = __compiletime_object_size(to);
9322 -
9323 might_fault();
9324 - if (likely(sz == -1 || sz >= n))
9325 - n = _copy_from_user(to, from, n);
9326 -#ifdef CONFIG_DEBUG_VM
9327 - else
9328 - WARN(1, "Buffer overflow detected!\n");
9329 -#endif
9330 +
9331 + if (access_ok(VERIFY_READ, from, n))
9332 + n = __copy_from_user(to, from, n);
9333 + else if ((int)n > 0) {
9334 + if (!__builtin_constant_p(n))
9335 + check_object_size(to, n, false);
9336 + memset(to, 0, n);
9337 + }
9338 return n;
9339 }
9340
9341 @@ -65,110 +68,198 @@ int copy_to_user(void __user *dst, const
9342 {
9343 might_fault();
9344
9345 - return _copy_to_user(dst, src, size);
9346 + if (access_ok(VERIFY_WRITE, dst, size))
9347 + size = __copy_to_user(dst, src, size);
9348 + return size;
9349 }
9350
9351 static __always_inline __must_check
9352 -int __copy_from_user(void *dst, const void __user *src, unsigned size)
9353 +unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
9354 {
9355 - int ret = 0;
9356 + int sz = __compiletime_object_size(dst);
9357 + unsigned ret = 0;
9358
9359 might_fault();
9360 - if (!__builtin_constant_p(size))
9361 - return copy_user_generic(dst, (__force void *)src, size);
9362 +
9363 + pax_track_stack();
9364 +
9365 + if ((int)size < 0)
9366 + return size;
9367 +
9368 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9369 + if (!__access_ok(VERIFY_READ, src, size))
9370 + return size;
9371 +#endif
9372 +
9373 + if (unlikely(sz != -1 && sz < size)) {
9374 +#ifdef CONFIG_DEBUG_VM
9375 + WARN(1, "Buffer overflow detected!\n");
9376 +#endif
9377 + return size;
9378 + }
9379 +
9380 + if (!__builtin_constant_p(size)) {
9381 + check_object_size(dst, size, false);
9382 +
9383 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9384 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9385 + src += PAX_USER_SHADOW_BASE;
9386 +#endif
9387 +
9388 + return copy_user_generic(dst, (__force const void *)src, size);
9389 + }
9390 switch (size) {
9391 - case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
9392 + case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
9393 ret, "b", "b", "=q", 1);
9394 return ret;
9395 - case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
9396 + case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
9397 ret, "w", "w", "=r", 2);
9398 return ret;
9399 - case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
9400 + case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
9401 ret, "l", "k", "=r", 4);
9402 return ret;
9403 - case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
9404 + case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9405 ret, "q", "", "=r", 8);
9406 return ret;
9407 case 10:
9408 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9409 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9410 ret, "q", "", "=r", 10);
9411 if (unlikely(ret))
9412 return ret;
9413 __get_user_asm(*(u16 *)(8 + (char *)dst),
9414 - (u16 __user *)(8 + (char __user *)src),
9415 + (const u16 __user *)(8 + (const char __user *)src),
9416 ret, "w", "w", "=r", 2);
9417 return ret;
9418 case 16:
9419 - __get_user_asm(*(u64 *)dst, (u64 __user *)src,
9420 + __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
9421 ret, "q", "", "=r", 16);
9422 if (unlikely(ret))
9423 return ret;
9424 __get_user_asm(*(u64 *)(8 + (char *)dst),
9425 - (u64 __user *)(8 + (char __user *)src),
9426 + (const u64 __user *)(8 + (const char __user *)src),
9427 ret, "q", "", "=r", 8);
9428 return ret;
9429 default:
9430 - return copy_user_generic(dst, (__force void *)src, size);
9431 +
9432 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9433 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9434 + src += PAX_USER_SHADOW_BASE;
9435 +#endif
9436 +
9437 + return copy_user_generic(dst, (__force const void *)src, size);
9438 }
9439 }
9440
9441 static __always_inline __must_check
9442 -int __copy_to_user(void __user *dst, const void *src, unsigned size)
9443 +unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
9444 {
9445 - int ret = 0;
9446 + int sz = __compiletime_object_size(src);
9447 + unsigned ret = 0;
9448
9449 might_fault();
9450 - if (!__builtin_constant_p(size))
9451 +
9452 + pax_track_stack();
9453 +
9454 + if ((int)size < 0)
9455 + return size;
9456 +
9457 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9458 + if (!__access_ok(VERIFY_WRITE, dst, size))
9459 + return size;
9460 +#endif
9461 +
9462 + if (unlikely(sz != -1 && sz < size)) {
9463 +#ifdef CONFIG_DEBUG_VM
9464 + WARN(1, "Buffer overflow detected!\n");
9465 +#endif
9466 + return size;
9467 + }
9468 +
9469 + if (!__builtin_constant_p(size)) {
9470 + check_object_size(src, size, true);
9471 +
9472 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9473 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9474 + dst += PAX_USER_SHADOW_BASE;
9475 +#endif
9476 +
9477 return copy_user_generic((__force void *)dst, src, size);
9478 + }
9479 switch (size) {
9480 - case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
9481 + case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
9482 ret, "b", "b", "iq", 1);
9483 return ret;
9484 - case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
9485 + case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
9486 ret, "w", "w", "ir", 2);
9487 return ret;
9488 - case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
9489 + case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
9490 ret, "l", "k", "ir", 4);
9491 return ret;
9492 - case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
9493 + case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9494 ret, "q", "", "er", 8);
9495 return ret;
9496 case 10:
9497 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9498 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9499 ret, "q", "", "er", 10);
9500 if (unlikely(ret))
9501 return ret;
9502 asm("":::"memory");
9503 - __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
9504 + __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
9505 ret, "w", "w", "ir", 2);
9506 return ret;
9507 case 16:
9508 - __put_user_asm(*(u64 *)src, (u64 __user *)dst,
9509 + __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
9510 ret, "q", "", "er", 16);
9511 if (unlikely(ret))
9512 return ret;
9513 asm("":::"memory");
9514 - __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
9515 + __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
9516 ret, "q", "", "er", 8);
9517 return ret;
9518 default:
9519 +
9520 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9521 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9522 + dst += PAX_USER_SHADOW_BASE;
9523 +#endif
9524 +
9525 return copy_user_generic((__force void *)dst, src, size);
9526 }
9527 }
9528
9529 static __always_inline __must_check
9530 -int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9531 +unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
9532 {
9533 - int ret = 0;
9534 + unsigned ret = 0;
9535
9536 might_fault();
9537 - if (!__builtin_constant_p(size))
9538 +
9539 + if ((int)size < 0)
9540 + return size;
9541 +
9542 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9543 + if (!__access_ok(VERIFY_READ, src, size))
9544 + return size;
9545 + if (!__access_ok(VERIFY_WRITE, dst, size))
9546 + return size;
9547 +#endif
9548 +
9549 + if (!__builtin_constant_p(size)) {
9550 +
9551 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9552 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9553 + src += PAX_USER_SHADOW_BASE;
9554 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9555 + dst += PAX_USER_SHADOW_BASE;
9556 +#endif
9557 +
9558 return copy_user_generic((__force void *)dst,
9559 - (__force void *)src, size);
9560 + (__force const void *)src, size);
9561 + }
9562 switch (size) {
9563 case 1: {
9564 u8 tmp;
9565 - __get_user_asm(tmp, (u8 __user *)src,
9566 + __get_user_asm(tmp, (const u8 __user *)src,
9567 ret, "b", "b", "=q", 1);
9568 if (likely(!ret))
9569 __put_user_asm(tmp, (u8 __user *)dst,
9570 @@ -177,7 +268,7 @@ int __copy_in_user(void __user *dst, con
9571 }
9572 case 2: {
9573 u16 tmp;
9574 - __get_user_asm(tmp, (u16 __user *)src,
9575 + __get_user_asm(tmp, (const u16 __user *)src,
9576 ret, "w", "w", "=r", 2);
9577 if (likely(!ret))
9578 __put_user_asm(tmp, (u16 __user *)dst,
9579 @@ -187,7 +278,7 @@ int __copy_in_user(void __user *dst, con
9580
9581 case 4: {
9582 u32 tmp;
9583 - __get_user_asm(tmp, (u32 __user *)src,
9584 + __get_user_asm(tmp, (const u32 __user *)src,
9585 ret, "l", "k", "=r", 4);
9586 if (likely(!ret))
9587 __put_user_asm(tmp, (u32 __user *)dst,
9588 @@ -196,7 +287,7 @@ int __copy_in_user(void __user *dst, con
9589 }
9590 case 8: {
9591 u64 tmp;
9592 - __get_user_asm(tmp, (u64 __user *)src,
9593 + __get_user_asm(tmp, (const u64 __user *)src,
9594 ret, "q", "", "=r", 8);
9595 if (likely(!ret))
9596 __put_user_asm(tmp, (u64 __user *)dst,
9597 @@ -204,8 +295,16 @@ int __copy_in_user(void __user *dst, con
9598 return ret;
9599 }
9600 default:
9601 +
9602 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9603 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9604 + src += PAX_USER_SHADOW_BASE;
9605 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9606 + dst += PAX_USER_SHADOW_BASE;
9607 +#endif
9608 +
9609 return copy_user_generic((__force void *)dst,
9610 - (__force void *)src, size);
9611 + (__force const void *)src, size);
9612 }
9613 }
9614
9615 @@ -222,33 +321,72 @@ __must_check unsigned long __clear_user(
9616 static __must_check __always_inline int
9617 __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
9618 {
9619 + pax_track_stack();
9620 +
9621 + if ((int)size < 0)
9622 + return size;
9623 +
9624 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9625 + if (!__access_ok(VERIFY_READ, src, size))
9626 + return size;
9627 +
9628 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
9629 + src += PAX_USER_SHADOW_BASE;
9630 +#endif
9631 +
9632 return copy_user_generic(dst, (__force const void *)src, size);
9633 }
9634
9635 -static __must_check __always_inline int
9636 +static __must_check __always_inline unsigned long
9637 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
9638 {
9639 + if ((int)size < 0)
9640 + return size;
9641 +
9642 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9643 + if (!__access_ok(VERIFY_WRITE, dst, size))
9644 + return size;
9645 +
9646 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
9647 + dst += PAX_USER_SHADOW_BASE;
9648 +#endif
9649 +
9650 return copy_user_generic((__force void *)dst, src, size);
9651 }
9652
9653 -extern long __copy_user_nocache(void *dst, const void __user *src,
9654 +extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
9655 unsigned size, int zerorest);
9656
9657 -static inline int
9658 -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9659 +static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
9660 {
9661 might_sleep();
9662 +
9663 + if ((int)size < 0)
9664 + return size;
9665 +
9666 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9667 + if (!__access_ok(VERIFY_READ, src, size))
9668 + return size;
9669 +#endif
9670 +
9671 return __copy_user_nocache(dst, src, size, 1);
9672 }
9673
9674 -static inline int
9675 -__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9676 +static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
9677 unsigned size)
9678 {
9679 + if ((int)size < 0)
9680 + return size;
9681 +
9682 +#ifdef CONFIG_PAX_MEMORY_UDEREF
9683 + if (!__access_ok(VERIFY_READ, src, size))
9684 + return size;
9685 +#endif
9686 +
9687 return __copy_user_nocache(dst, src, size, 0);
9688 }
9689
9690 -unsigned long
9691 +extern unsigned long
9692 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
9693
9694 #endif /* _ASM_X86_UACCESS_64_H */
9695 diff -urNp linux-2.6.39.4/arch/x86/include/asm/uaccess.h linux-2.6.39.4/arch/x86/include/asm/uaccess.h
9696 --- linux-2.6.39.4/arch/x86/include/asm/uaccess.h 2011-06-03 00:04:13.000000000 -0400
9697 +++ linux-2.6.39.4/arch/x86/include/asm/uaccess.h 2011-08-05 19:44:33.000000000 -0400
9698 @@ -8,12 +8,15 @@
9699 #include <linux/thread_info.h>
9700 #include <linux/prefetch.h>
9701 #include <linux/string.h>
9702 +#include <linux/sched.h>
9703 #include <asm/asm.h>
9704 #include <asm/page.h>
9705
9706 #define VERIFY_READ 0
9707 #define VERIFY_WRITE 1
9708
9709 +extern void check_object_size(const void *ptr, unsigned long n, bool to);
9710 +
9711 /*
9712 * The fs value determines whether argument validity checking should be
9713 * performed or not. If get_fs() == USER_DS, checking is performed, with
9714 @@ -29,7 +32,12 @@
9715
9716 #define get_ds() (KERNEL_DS)
9717 #define get_fs() (current_thread_info()->addr_limit)
9718 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9719 +void __set_fs(mm_segment_t x);
9720 +void set_fs(mm_segment_t x);
9721 +#else
9722 #define set_fs(x) (current_thread_info()->addr_limit = (x))
9723 +#endif
9724
9725 #define segment_eq(a, b) ((a).seg == (b).seg)
9726
9727 @@ -77,7 +85,33 @@
9728 * checks that the pointer is in the user space range - after calling
9729 * this function, memory access functions may still return -EFAULT.
9730 */
9731 -#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9732 +#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
9733 +#define access_ok(type, addr, size) \
9734 +({ \
9735 + long __size = size; \
9736 + unsigned long __addr = (unsigned long)addr; \
9737 + unsigned long __addr_ao = __addr & PAGE_MASK; \
9738 + unsigned long __end_ao = __addr + __size - 1; \
9739 + bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
9740 + if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
9741 + while(__addr_ao <= __end_ao) { \
9742 + char __c_ao; \
9743 + __addr_ao += PAGE_SIZE; \
9744 + if (__size > PAGE_SIZE) \
9745 + cond_resched(); \
9746 + if (__get_user(__c_ao, (char __user *)__addr)) \
9747 + break; \
9748 + if (type != VERIFY_WRITE) { \
9749 + __addr = __addr_ao; \
9750 + continue; \
9751 + } \
9752 + if (__put_user(__c_ao, (char __user *)__addr)) \
9753 + break; \
9754 + __addr = __addr_ao; \
9755 + } \
9756 + } \
9757 + __ret_ao; \
9758 +})
9759
9760 /*
9761 * The exception table consists of pairs of addresses: the first is the
9762 @@ -183,12 +217,20 @@ extern int __get_user_bad(void);
9763 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
9764 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
9765
9766 -
9767 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
9768 +#define __copyuser_seg "gs;"
9769 +#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
9770 +#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
9771 +#else
9772 +#define __copyuser_seg
9773 +#define __COPYUSER_SET_ES
9774 +#define __COPYUSER_RESTORE_ES
9775 +#endif
9776
9777 #ifdef CONFIG_X86_32
9778 #define __put_user_asm_u64(x, addr, err, errret) \
9779 - asm volatile("1: movl %%eax,0(%2)\n" \
9780 - "2: movl %%edx,4(%2)\n" \
9781 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
9782 + "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
9783 "3:\n" \
9784 ".section .fixup,\"ax\"\n" \
9785 "4: movl %3,%0\n" \
9786 @@ -200,8 +242,8 @@ extern int __get_user_bad(void);
9787 : "A" (x), "r" (addr), "i" (errret), "0" (err))
9788
9789 #define __put_user_asm_ex_u64(x, addr) \
9790 - asm volatile("1: movl %%eax,0(%1)\n" \
9791 - "2: movl %%edx,4(%1)\n" \
9792 + asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
9793 + "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
9794 "3:\n" \
9795 _ASM_EXTABLE(1b, 2b - 1b) \
9796 _ASM_EXTABLE(2b, 3b - 2b) \
9797 @@ -374,7 +416,7 @@ do { \
9798 } while (0)
9799
9800 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9801 - asm volatile("1: mov"itype" %2,%"rtype"1\n" \
9802 + asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
9803 "2:\n" \
9804 ".section .fixup,\"ax\"\n" \
9805 "3: mov %3,%0\n" \
9806 @@ -382,7 +424,7 @@ do { \
9807 " jmp 2b\n" \
9808 ".previous\n" \
9809 _ASM_EXTABLE(1b, 3b) \
9810 - : "=r" (err), ltype(x) \
9811 + : "=r" (err), ltype (x) \
9812 : "m" (__m(addr)), "i" (errret), "0" (err))
9813
9814 #define __get_user_size_ex(x, ptr, size) \
9815 @@ -407,7 +449,7 @@ do { \
9816 } while (0)
9817
9818 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
9819 - asm volatile("1: mov"itype" %1,%"rtype"0\n" \
9820 + asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
9821 "2:\n" \
9822 _ASM_EXTABLE(1b, 2b - 1b) \
9823 : ltype(x) : "m" (__m(addr)))
9824 @@ -424,13 +466,24 @@ do { \
9825 int __gu_err; \
9826 unsigned long __gu_val; \
9827 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
9828 - (x) = (__force __typeof__(*(ptr)))__gu_val; \
9829 + (x) = (__typeof__(*(ptr)))__gu_val; \
9830 __gu_err; \
9831 })
9832
9833 /* FIXME: this hack is definitely wrong -AK */
9834 struct __large_struct { unsigned long buf[100]; };
9835 -#define __m(x) (*(struct __large_struct __user *)(x))
9836 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9837 +#define ____m(x) \
9838 +({ \
9839 + unsigned long ____x = (unsigned long)(x); \
9840 + if (____x < PAX_USER_SHADOW_BASE) \
9841 + ____x += PAX_USER_SHADOW_BASE; \
9842 + (void __user *)____x; \
9843 +})
9844 +#else
9845 +#define ____m(x) (x)
9846 +#endif
9847 +#define __m(x) (*(struct __large_struct __user *)____m(x))
9848
9849 /*
9850 * Tell gcc we read from memory instead of writing: this is because
9851 @@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
9852 * aliasing issues.
9853 */
9854 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
9855 - asm volatile("1: mov"itype" %"rtype"1,%2\n" \
9856 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
9857 "2:\n" \
9858 ".section .fixup,\"ax\"\n" \
9859 "3: mov %3,%0\n" \
9860 @@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
9861 ".previous\n" \
9862 _ASM_EXTABLE(1b, 3b) \
9863 : "=r"(err) \
9864 - : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
9865 + : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
9866
9867 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
9868 - asm volatile("1: mov"itype" %"rtype"0,%1\n" \
9869 + asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
9870 "2:\n" \
9871 _ASM_EXTABLE(1b, 2b - 1b) \
9872 : : ltype(x), "m" (__m(addr)))
9873 @@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
9874 * On error, the variable @x is set to zero.
9875 */
9876
9877 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9878 +#define __get_user(x, ptr) get_user((x), (ptr))
9879 +#else
9880 #define __get_user(x, ptr) \
9881 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
9882 +#endif
9883
9884 /**
9885 * __put_user: - Write a simple value into user space, with less checking.
9886 @@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
9887 * Returns zero on success, or -EFAULT on error.
9888 */
9889
9890 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9891 +#define __put_user(x, ptr) put_user((x), (ptr))
9892 +#else
9893 #define __put_user(x, ptr) \
9894 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
9895 +#endif
9896
9897 #define __get_user_unaligned __get_user
9898 #define __put_user_unaligned __put_user
9899 @@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
9900 #define get_user_ex(x, ptr) do { \
9901 unsigned long __gue_val; \
9902 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
9903 - (x) = (__force __typeof__(*(ptr)))__gue_val; \
9904 + (x) = (__typeof__(*(ptr)))__gue_val; \
9905 } while (0)
9906
9907 #ifdef CONFIG_X86_WP_WORKS_OK
9908 @@ -567,6 +628,7 @@ extern struct movsl_mask {
9909
9910 #define ARCH_HAS_NOCACHE_UACCESS 1
9911
9912 +#define ARCH_HAS_SORT_EXTABLE
9913 #ifdef CONFIG_X86_32
9914 # include "uaccess_32.h"
9915 #else
9916 diff -urNp linux-2.6.39.4/arch/x86/include/asm/vgtod.h linux-2.6.39.4/arch/x86/include/asm/vgtod.h
9917 --- linux-2.6.39.4/arch/x86/include/asm/vgtod.h 2011-05-19 00:06:34.000000000 -0400
9918 +++ linux-2.6.39.4/arch/x86/include/asm/vgtod.h 2011-08-05 19:44:33.000000000 -0400
9919 @@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
9920 int sysctl_enabled;
9921 struct timezone sys_tz;
9922 struct { /* extract of a clocksource struct */
9923 + char name[8];
9924 cycle_t (*vread)(void);
9925 cycle_t cycle_last;
9926 cycle_t mask;
9927 diff -urNp linux-2.6.39.4/arch/x86/include/asm/vsyscall.h linux-2.6.39.4/arch/x86/include/asm/vsyscall.h
9928 --- linux-2.6.39.4/arch/x86/include/asm/vsyscall.h 2011-05-19 00:06:34.000000000 -0400
9929 +++ linux-2.6.39.4/arch/x86/include/asm/vsyscall.h 2011-08-05 19:44:33.000000000 -0400
9930 @@ -15,9 +15,10 @@ enum vsyscall_num {
9931
9932 #ifdef __KERNEL__
9933 #include <linux/seqlock.h>
9934 +#include <linux/getcpu.h>
9935 +#include <linux/time.h>
9936
9937 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
9938 -#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
9939
9940 /* Definitions for CONFIG_GENERIC_TIME definitions */
9941 #define __section_vsyscall_gtod_data __attribute__ \
9942 @@ -31,7 +32,6 @@ enum vsyscall_num {
9943 #define VGETCPU_LSL 2
9944
9945 extern int __vgetcpu_mode;
9946 -extern volatile unsigned long __jiffies;
9947
9948 /* kernel space (writeable) */
9949 extern int vgetcpu_mode;
9950 @@ -39,6 +39,9 @@ extern struct timezone sys_tz;
9951
9952 extern void map_vsyscall(void);
9953
9954 +extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
9955 +extern time_t vtime(time_t *t);
9956 +extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
9957 #endif /* __KERNEL__ */
9958
9959 #endif /* _ASM_X86_VSYSCALL_H */
9960 diff -urNp linux-2.6.39.4/arch/x86/include/asm/x86_init.h linux-2.6.39.4/arch/x86/include/asm/x86_init.h
9961 --- linux-2.6.39.4/arch/x86/include/asm/x86_init.h 2011-05-19 00:06:34.000000000 -0400
9962 +++ linux-2.6.39.4/arch/x86/include/asm/x86_init.h 2011-08-05 20:34:06.000000000 -0400
9963 @@ -28,7 +28,7 @@ struct x86_init_mpparse {
9964 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
9965 void (*find_smp_config)(void);
9966 void (*get_smp_config)(unsigned int early);
9967 -};
9968 +} __no_const;
9969
9970 /**
9971 * struct x86_init_resources - platform specific resource related ops
9972 @@ -42,7 +42,7 @@ struct x86_init_resources {
9973 void (*probe_roms)(void);
9974 void (*reserve_resources)(void);
9975 char *(*memory_setup)(void);
9976 -};
9977 +} __no_const;
9978
9979 /**
9980 * struct x86_init_irqs - platform specific interrupt setup
9981 @@ -55,7 +55,7 @@ struct x86_init_irqs {
9982 void (*pre_vector_init)(void);
9983 void (*intr_init)(void);
9984 void (*trap_init)(void);
9985 -};
9986 +} __no_const;
9987
9988 /**
9989 * struct x86_init_oem - oem platform specific customizing functions
9990 @@ -65,7 +65,7 @@ struct x86_init_irqs {
9991 struct x86_init_oem {
9992 void (*arch_setup)(void);
9993 void (*banner)(void);
9994 -};
9995 +} __no_const;
9996
9997 /**
9998 * struct x86_init_mapping - platform specific initial kernel pagetable setup
9999 @@ -76,7 +76,7 @@ struct x86_init_oem {
10000 */
10001 struct x86_init_mapping {
10002 void (*pagetable_reserve)(u64 start, u64 end);
10003 -};
10004 +} __no_const;
10005
10006 /**
10007 * struct x86_init_paging - platform specific paging functions
10008 @@ -86,7 +86,7 @@ struct x86_init_mapping {
10009 struct x86_init_paging {
10010 void (*pagetable_setup_start)(pgd_t *base);
10011 void (*pagetable_setup_done)(pgd_t *base);
10012 -};
10013 +} __no_const;
10014
10015 /**
10016 * struct x86_init_timers - platform specific timer setup
10017 @@ -101,7 +101,7 @@ struct x86_init_timers {
10018 void (*tsc_pre_init)(void);
10019 void (*timer_init)(void);
10020 void (*wallclock_init)(void);
10021 -};
10022 +} __no_const;
10023
10024 /**
10025 * struct x86_init_iommu - platform specific iommu setup
10026 @@ -109,7 +109,7 @@ struct x86_init_timers {
10027 */
10028 struct x86_init_iommu {
10029 int (*iommu_init)(void);
10030 -};
10031 +} __no_const;
10032
10033 /**
10034 * struct x86_init_pci - platform specific pci init functions
10035 @@ -123,7 +123,7 @@ struct x86_init_pci {
10036 int (*init)(void);
10037 void (*init_irq)(void);
10038 void (*fixup_irqs)(void);
10039 -};
10040 +} __no_const;
10041
10042 /**
10043 * struct x86_init_ops - functions for platform specific setup
10044 @@ -139,7 +139,7 @@ struct x86_init_ops {
10045 struct x86_init_timers timers;
10046 struct x86_init_iommu iommu;
10047 struct x86_init_pci pci;
10048 -};
10049 +} __no_const;
10050
10051 /**
10052 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
10053 @@ -147,7 +147,7 @@ struct x86_init_ops {
10054 */
10055 struct x86_cpuinit_ops {
10056 void (*setup_percpu_clockev)(void);
10057 -};
10058 +} __no_const;
10059
10060 /**
10061 * struct x86_platform_ops - platform specific runtime functions
10062 @@ -166,7 +166,7 @@ struct x86_platform_ops {
10063 bool (*is_untracked_pat_range)(u64 start, u64 end);
10064 void (*nmi_init)(void);
10065 int (*i8042_detect)(void);
10066 -};
10067 +} __no_const;
10068
10069 struct pci_dev;
10070
10071 @@ -174,7 +174,7 @@ struct x86_msi_ops {
10072 int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
10073 void (*teardown_msi_irq)(unsigned int irq);
10074 void (*teardown_msi_irqs)(struct pci_dev *dev);
10075 -};
10076 +} __no_const;
10077
10078 extern struct x86_init_ops x86_init;
10079 extern struct x86_cpuinit_ops x86_cpuinit;
10080 diff -urNp linux-2.6.39.4/arch/x86/include/asm/xsave.h linux-2.6.39.4/arch/x86/include/asm/xsave.h
10081 --- linux-2.6.39.4/arch/x86/include/asm/xsave.h 2011-05-19 00:06:34.000000000 -0400
10082 +++ linux-2.6.39.4/arch/x86/include/asm/xsave.h 2011-08-05 19:44:33.000000000 -0400
10083 @@ -65,6 +65,11 @@ static inline int xsave_user(struct xsav
10084 {
10085 int err;
10086
10087 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10088 + if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
10089 + buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
10090 +#endif
10091 +
10092 /*
10093 * Clear the xsave header first, so that reserved fields are
10094 * initialized to zero.
10095 @@ -100,6 +105,11 @@ static inline int xrestore_user(struct x
10096 u32 lmask = mask;
10097 u32 hmask = mask >> 32;
10098
10099 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10100 + if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
10101 + xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
10102 +#endif
10103 +
10104 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
10105 "2:\n"
10106 ".section .fixup,\"ax\"\n"
10107 diff -urNp linux-2.6.39.4/arch/x86/Kconfig linux-2.6.39.4/arch/x86/Kconfig
10108 --- linux-2.6.39.4/arch/x86/Kconfig 2011-05-19 00:06:34.000000000 -0400
10109 +++ linux-2.6.39.4/arch/x86/Kconfig 2011-08-05 19:44:33.000000000 -0400
10110 @@ -224,7 +224,7 @@ config X86_HT
10111
10112 config X86_32_LAZY_GS
10113 def_bool y
10114 - depends on X86_32 && !CC_STACKPROTECTOR
10115 + depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10116
10117 config ARCH_HWEIGHT_CFLAGS
10118 string
10119 @@ -1022,7 +1022,7 @@ choice
10120
10121 config NOHIGHMEM
10122 bool "off"
10123 - depends on !X86_NUMAQ
10124 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10125 ---help---
10126 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10127 However, the address space of 32-bit x86 processors is only 4
10128 @@ -1059,7 +1059,7 @@ config NOHIGHMEM
10129
10130 config HIGHMEM4G
10131 bool "4GB"
10132 - depends on !X86_NUMAQ
10133 + depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10134 ---help---
10135 Select this if you have a 32-bit processor and between 1 and 4
10136 gigabytes of physical RAM.
10137 @@ -1113,7 +1113,7 @@ config PAGE_OFFSET
10138 hex
10139 default 0xB0000000 if VMSPLIT_3G_OPT
10140 default 0x80000000 if VMSPLIT_2G
10141 - default 0x78000000 if VMSPLIT_2G_OPT
10142 + default 0x70000000 if VMSPLIT_2G_OPT
10143 default 0x40000000 if VMSPLIT_1G
10144 default 0xC0000000
10145 depends on X86_32
10146 @@ -1457,7 +1457,7 @@ config ARCH_USES_PG_UNCACHED
10147
10148 config EFI
10149 bool "EFI runtime service support"
10150 - depends on ACPI
10151 + depends on ACPI && !PAX_KERNEXEC
10152 ---help---
10153 This enables the kernel to use EFI runtime services that are
10154 available (such as the EFI variable services).
10155 @@ -1487,6 +1487,7 @@ config SECCOMP
10156
10157 config CC_STACKPROTECTOR
10158 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
10159 + depends on X86_64 || !PAX_MEMORY_UDEREF
10160 ---help---
10161 This option turns on the -fstack-protector GCC feature. This
10162 feature puts, at the beginning of functions, a canary value on
10163 @@ -1544,6 +1545,7 @@ config KEXEC_JUMP
10164 config PHYSICAL_START
10165 hex "Physical address where the kernel is loaded" if (EXPERT || CRASH_DUMP)
10166 default "0x1000000"
10167 + range 0x400000 0x40000000
10168 ---help---
10169 This gives the physical address where the kernel is loaded.
10170
10171 @@ -1607,6 +1609,7 @@ config X86_NEED_RELOCS
10172 config PHYSICAL_ALIGN
10173 hex "Alignment value to which kernel should be aligned" if X86_32
10174 default "0x1000000"
10175 + range 0x400000 0x1000000 if PAX_KERNEXEC
10176 range 0x2000 0x1000000
10177 ---help---
10178 This value puts the alignment restrictions on physical address
10179 @@ -1638,9 +1641,10 @@ config HOTPLUG_CPU
10180 Say N if you want to disable CPU hotplug.
10181
10182 config COMPAT_VDSO
10183 - def_bool y
10184 + def_bool n
10185 prompt "Compat VDSO support"
10186 depends on X86_32 || IA32_EMULATION
10187 + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
10188 ---help---
10189 Map the 32-bit VDSO to the predictable old-style address too.
10190
10191 diff -urNp linux-2.6.39.4/arch/x86/Kconfig.cpu linux-2.6.39.4/arch/x86/Kconfig.cpu
10192 --- linux-2.6.39.4/arch/x86/Kconfig.cpu 2011-05-19 00:06:34.000000000 -0400
10193 +++ linux-2.6.39.4/arch/x86/Kconfig.cpu 2011-08-05 19:44:33.000000000 -0400
10194 @@ -334,7 +334,7 @@ config X86_PPRO_FENCE
10195
10196 config X86_F00F_BUG
10197 def_bool y
10198 - depends on M586MMX || M586TSC || M586 || M486 || M386
10199 + depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
10200
10201 config X86_INVD_BUG
10202 def_bool y
10203 @@ -358,7 +358,7 @@ config X86_POPAD_OK
10204
10205 config X86_ALIGNMENT_16
10206 def_bool y
10207 - depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10208 + depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10209
10210 config X86_INTEL_USERCOPY
10211 def_bool y
10212 @@ -404,7 +404,7 @@ config X86_CMPXCHG64
10213 # generates cmov.
10214 config X86_CMOV
10215 def_bool y
10216 - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10217 + depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10218
10219 config X86_MINIMUM_CPU_FAMILY
10220 int
10221 diff -urNp linux-2.6.39.4/arch/x86/Kconfig.debug linux-2.6.39.4/arch/x86/Kconfig.debug
10222 --- linux-2.6.39.4/arch/x86/Kconfig.debug 2011-05-19 00:06:34.000000000 -0400
10223 +++ linux-2.6.39.4/arch/x86/Kconfig.debug 2011-08-05 19:44:33.000000000 -0400
10224 @@ -101,7 +101,7 @@ config X86_PTDUMP
10225 config DEBUG_RODATA
10226 bool "Write protect kernel read-only data structures"
10227 default y
10228 - depends on DEBUG_KERNEL
10229 + depends on DEBUG_KERNEL && BROKEN
10230 ---help---
10231 Mark the kernel read-only data as write-protected in the pagetables,
10232 in order to catch accidental (and incorrect) writes to such const
10233 @@ -119,7 +119,7 @@ config DEBUG_RODATA_TEST
10234
10235 config DEBUG_SET_MODULE_RONX
10236 bool "Set loadable kernel module data as NX and text as RO"
10237 - depends on MODULES
10238 + depends on MODULES && BROKEN
10239 ---help---
10240 This option helps catch unintended modifications to loadable
10241 kernel module's text and read-only data. It also prevents execution
10242 diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile
10243 --- linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile 2011-05-19 00:06:34.000000000 -0400
10244 +++ linux-2.6.39.4/arch/x86/kernel/acpi/realmode/Makefile 2011-08-05 20:34:06.000000000 -0400
10245 @@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
10246 $(call cc-option, -fno-stack-protector) \
10247 $(call cc-option, -mpreferred-stack-boundary=2)
10248 KBUILD_CFLAGS += $(call cc-option, -m32)
10249 +ifdef CONSTIFY_PLUGIN
10250 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
10251 +endif
10252 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10253 GCOV_PROFILE := n
10254
10255 diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S
10256 --- linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-09 09:18:51.000000000 -0400
10257 +++ linux-2.6.39.4/arch/x86/kernel/acpi/realmode/wakeup.S 2011-08-05 19:44:33.000000000 -0400
10258 @@ -108,6 +108,9 @@ wakeup_code:
10259 /* Do any other stuff... */
10260
10261 #ifndef CONFIG_64BIT
10262 + /* Recheck NX bit overrides (64bit path does this in trampoline */
10263 + call verify_cpu
10264 +
10265 /* This could also be done in C code... */
10266 movl pmode_cr3, %eax
10267 movl %eax, %cr3
10268 @@ -131,6 +134,7 @@ wakeup_code:
10269 movl pmode_cr0, %eax
10270 movl %eax, %cr0
10271 jmp pmode_return
10272 +# include "../../verify_cpu.S"
10273 #else
10274 pushw $0
10275 pushw trampoline_segment
10276 diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c
10277 --- linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c 2011-07-09 09:18:51.000000000 -0400
10278 +++ linux-2.6.39.4/arch/x86/kernel/acpi/sleep.c 2011-08-05 19:44:33.000000000 -0400
10279 @@ -94,8 +94,12 @@ int acpi_suspend_lowlevel(void)
10280 header->trampoline_segment = trampoline_address() >> 4;
10281 #ifdef CONFIG_SMP
10282 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
10283 +
10284 + pax_open_kernel();
10285 early_gdt_descr.address =
10286 (unsigned long)get_cpu_gdt_table(smp_processor_id());
10287 + pax_close_kernel();
10288 +
10289 initial_gs = per_cpu_offset(smp_processor_id());
10290 #endif
10291 initial_code = (unsigned long)wakeup_long64;
10292 diff -urNp linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S
10293 --- linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S 2011-05-19 00:06:34.000000000 -0400
10294 +++ linux-2.6.39.4/arch/x86/kernel/acpi/wakeup_32.S 2011-08-05 19:44:33.000000000 -0400
10295 @@ -30,13 +30,11 @@ wakeup_pmode_return:
10296 # and restore the stack ... but you need gdt for this to work
10297 movl saved_context_esp, %esp
10298
10299 - movl %cs:saved_magic, %eax
10300 - cmpl $0x12345678, %eax
10301 + cmpl $0x12345678, saved_magic
10302 jne bogus_magic
10303
10304 # jump to place where we left off
10305 - movl saved_eip, %eax
10306 - jmp *%eax
10307 + jmp *(saved_eip)
10308
10309 bogus_magic:
10310 jmp bogus_magic
10311 diff -urNp linux-2.6.39.4/arch/x86/kernel/alternative.c linux-2.6.39.4/arch/x86/kernel/alternative.c
10312 --- linux-2.6.39.4/arch/x86/kernel/alternative.c 2011-05-19 00:06:34.000000000 -0400
10313 +++ linux-2.6.39.4/arch/x86/kernel/alternative.c 2011-08-05 19:44:33.000000000 -0400
10314 @@ -248,7 +248,7 @@ static void alternatives_smp_lock(const
10315 if (!*poff || ptr < text || ptr >= text_end)
10316 continue;
10317 /* turn DS segment override prefix into lock prefix */
10318 - if (*ptr == 0x3e)
10319 + if (*ktla_ktva(ptr) == 0x3e)
10320 text_poke(ptr, ((unsigned char []){0xf0}), 1);
10321 };
10322 mutex_unlock(&text_mutex);
10323 @@ -269,7 +269,7 @@ static void alternatives_smp_unlock(cons
10324 if (!*poff || ptr < text || ptr >= text_end)
10325 continue;
10326 /* turn lock prefix into DS segment override prefix */
10327 - if (*ptr == 0xf0)
10328 + if (*ktla_ktva(ptr) == 0xf0)
10329 text_poke(ptr, ((unsigned char []){0x3E}), 1);
10330 };
10331 mutex_unlock(&text_mutex);
10332 @@ -438,7 +438,7 @@ void __init_or_module apply_paravirt(str
10333
10334 BUG_ON(p->len > MAX_PATCH_LEN);
10335 /* prep the buffer with the original instructions */
10336 - memcpy(insnbuf, p->instr, p->len);
10337 + memcpy(insnbuf, ktla_ktva(p->instr), p->len);
10338 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
10339 (unsigned long)p->instr, p->len);
10340
10341 @@ -506,7 +506,7 @@ void __init alternative_instructions(voi
10342 if (smp_alt_once)
10343 free_init_pages("SMP alternatives",
10344 (unsigned long)__smp_locks,
10345 - (unsigned long)__smp_locks_end);
10346 + PAGE_ALIGN((unsigned long)__smp_locks_end));
10347
10348 restart_nmi();
10349 }
10350 @@ -523,13 +523,17 @@ void __init alternative_instructions(voi
10351 * instructions. And on the local CPU you need to be protected again NMI or MCE
10352 * handlers seeing an inconsistent instruction while you patch.
10353 */
10354 -void *__init_or_module text_poke_early(void *addr, const void *opcode,
10355 +void *__kprobes text_poke_early(void *addr, const void *opcode,
10356 size_t len)
10357 {
10358 unsigned long flags;
10359 local_irq_save(flags);
10360 - memcpy(addr, opcode, len);
10361 +
10362 + pax_open_kernel();
10363 + memcpy(ktla_ktva(addr), opcode, len);
10364 sync_core();
10365 + pax_close_kernel();
10366 +
10367 local_irq_restore(flags);
10368 /* Could also do a CLFLUSH here to speed up CPU recovery; but
10369 that causes hangs on some VIA CPUs. */
10370 @@ -551,36 +555,22 @@ void *__init_or_module text_poke_early(v
10371 */
10372 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
10373 {
10374 - unsigned long flags;
10375 - char *vaddr;
10376 + unsigned char *vaddr = ktla_ktva(addr);
10377 struct page *pages[2];
10378 - int i;
10379 + size_t i;
10380
10381 if (!core_kernel_text((unsigned long)addr)) {
10382 - pages[0] = vmalloc_to_page(addr);
10383 - pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
10384 + pages[0] = vmalloc_to_page(vaddr);
10385 + pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
10386 } else {
10387 - pages[0] = virt_to_page(addr);
10388 + pages[0] = virt_to_page(vaddr);
10389 WARN_ON(!PageReserved(pages[0]));
10390 - pages[1] = virt_to_page(addr + PAGE_SIZE);
10391 + pages[1] = virt_to_page(vaddr + PAGE_SIZE);
10392 }
10393 BUG_ON(!pages[0]);
10394 - local_irq_save(flags);
10395 - set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
10396 - if (pages[1])
10397 - set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
10398 - vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
10399 - memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
10400 - clear_fixmap(FIX_TEXT_POKE0);
10401 - if (pages[1])
10402 - clear_fixmap(FIX_TEXT_POKE1);
10403 - local_flush_tlb();
10404 - sync_core();
10405 - /* Could also do a CLFLUSH here to speed up CPU recovery; but
10406 - that causes hangs on some VIA CPUs. */
10407 + text_poke_early(addr, opcode, len);
10408 for (i = 0; i < len; i++)
10409 - BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
10410 - local_irq_restore(flags);
10411 + BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
10412 return addr;
10413 }
10414
10415 @@ -682,9 +672,9 @@ void __kprobes text_poke_smp_batch(struc
10416 #if defined(CONFIG_DYNAMIC_FTRACE) || defined(HAVE_JUMP_LABEL)
10417
10418 #ifdef CONFIG_X86_64
10419 -unsigned char ideal_nop5[5] = { 0x66, 0x66, 0x66, 0x66, 0x90 };
10420 +unsigned char ideal_nop5[5] __read_only = { 0x66, 0x66, 0x66, 0x66, 0x90 };
10421 #else
10422 -unsigned char ideal_nop5[5] = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
10423 +unsigned char ideal_nop5[5] __read_only = { 0x3e, 0x8d, 0x74, 0x26, 0x00 };
10424 #endif
10425
10426 void __init arch_init_ideal_nop5(void)
10427 diff -urNp linux-2.6.39.4/arch/x86/kernel/apic/apic.c linux-2.6.39.4/arch/x86/kernel/apic/apic.c
10428 --- linux-2.6.39.4/arch/x86/kernel/apic/apic.c 2011-05-19 00:06:34.000000000 -0400
10429 +++ linux-2.6.39.4/arch/x86/kernel/apic/apic.c 2011-08-17 20:01:50.000000000 -0400
10430 @@ -173,7 +173,7 @@ int first_system_vector = 0xfe;
10431 /*
10432 * Debug level, exported for io_apic.c
10433 */
10434 -unsigned int apic_verbosity;
10435 +int apic_verbosity;
10436
10437 int pic_mode;
10438
10439 @@ -1821,7 +1821,7 @@ void smp_error_interrupt(struct pt_regs
10440 apic_write(APIC_ESR, 0);
10441 v1 = apic_read(APIC_ESR);
10442 ack_APIC_irq();
10443 - atomic_inc(&irq_err_count);
10444 + atomic_inc_unchecked(&irq_err_count);
10445
10446 /*
10447 * Here is what the APIC error bits mean:
10448 @@ -2204,6 +2204,8 @@ static int __cpuinit apic_cluster_num(vo
10449 u16 *bios_cpu_apicid;
10450 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
10451
10452 + pax_track_stack();
10453 +
10454 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
10455 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
10456
10457 diff -urNp linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c
10458 --- linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c 2011-06-03 00:04:13.000000000 -0400
10459 +++ linux-2.6.39.4/arch/x86/kernel/apic/io_apic.c 2011-08-05 19:44:33.000000000 -0400
10460 @@ -623,7 +623,7 @@ struct IO_APIC_route_entry **alloc_ioapi
10461 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
10462 GFP_ATOMIC);
10463 if (!ioapic_entries)
10464 - return 0;
10465 + return NULL;
10466
10467 for (apic = 0; apic < nr_ioapics; apic++) {
10468 ioapic_entries[apic] =
10469 @@ -640,7 +640,7 @@ nomem:
10470 kfree(ioapic_entries[apic]);
10471 kfree(ioapic_entries);
10472
10473 - return 0;
10474 + return NULL;
10475 }
10476
10477 /*
10478 @@ -1040,7 +1040,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
10479 }
10480 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
10481
10482 -void lock_vector_lock(void)
10483 +void lock_vector_lock(void) __acquires(vector_lock)
10484 {
10485 /* Used to the online set of cpus does not change
10486 * during assign_irq_vector.
10487 @@ -1048,7 +1048,7 @@ void lock_vector_lock(void)
10488 raw_spin_lock(&vector_lock);
10489 }
10490
10491 -void unlock_vector_lock(void)
10492 +void unlock_vector_lock(void) __releases(vector_lock)
10493 {
10494 raw_spin_unlock(&vector_lock);
10495 }
10496 @@ -2379,7 +2379,7 @@ static void ack_apic_edge(struct irq_dat
10497 ack_APIC_irq();
10498 }
10499
10500 -atomic_t irq_mis_count;
10501 +atomic_unchecked_t irq_mis_count;
10502
10503 /*
10504 * IO-APIC versions below 0x20 don't support EOI register.
10505 @@ -2487,7 +2487,7 @@ static void ack_apic_level(struct irq_da
10506 * at the cpu.
10507 */
10508 if (!(v & (1 << (i & 0x1f)))) {
10509 - atomic_inc(&irq_mis_count);
10510 + atomic_inc_unchecked(&irq_mis_count);
10511
10512 eoi_ioapic_irq(irq, cfg);
10513 }
10514 diff -urNp linux-2.6.39.4/arch/x86/kernel/apm_32.c linux-2.6.39.4/arch/x86/kernel/apm_32.c
10515 --- linux-2.6.39.4/arch/x86/kernel/apm_32.c 2011-05-19 00:06:34.000000000 -0400
10516 +++ linux-2.6.39.4/arch/x86/kernel/apm_32.c 2011-08-05 19:44:33.000000000 -0400
10517 @@ -412,7 +412,7 @@ static DEFINE_MUTEX(apm_mutex);
10518 * This is for buggy BIOS's that refer to (real mode) segment 0x40
10519 * even though they are called in protected mode.
10520 */
10521 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
10522 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
10523 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
10524
10525 static const char driver_version[] = "1.16ac"; /* no spaces */
10526 @@ -590,7 +590,10 @@ static long __apm_bios_call(void *_call)
10527 BUG_ON(cpu != 0);
10528 gdt = get_cpu_gdt_table(cpu);
10529 save_desc_40 = gdt[0x40 / 8];
10530 +
10531 + pax_open_kernel();
10532 gdt[0x40 / 8] = bad_bios_desc;
10533 + pax_close_kernel();
10534
10535 apm_irq_save(flags);
10536 APM_DO_SAVE_SEGS;
10537 @@ -599,7 +602,11 @@ static long __apm_bios_call(void *_call)
10538 &call->esi);
10539 APM_DO_RESTORE_SEGS;
10540 apm_irq_restore(flags);
10541 +
10542 + pax_open_kernel();
10543 gdt[0x40 / 8] = save_desc_40;
10544 + pax_close_kernel();
10545 +
10546 put_cpu();
10547
10548 return call->eax & 0xff;
10549 @@ -666,7 +673,10 @@ static long __apm_bios_call_simple(void
10550 BUG_ON(cpu != 0);
10551 gdt = get_cpu_gdt_table(cpu);
10552 save_desc_40 = gdt[0x40 / 8];
10553 +
10554 + pax_open_kernel();
10555 gdt[0x40 / 8] = bad_bios_desc;
10556 + pax_close_kernel();
10557
10558 apm_irq_save(flags);
10559 APM_DO_SAVE_SEGS;
10560 @@ -674,7 +684,11 @@ static long __apm_bios_call_simple(void
10561 &call->eax);
10562 APM_DO_RESTORE_SEGS;
10563 apm_irq_restore(flags);
10564 +
10565 + pax_open_kernel();
10566 gdt[0x40 / 8] = save_desc_40;
10567 + pax_close_kernel();
10568 +
10569 put_cpu();
10570 return error;
10571 }
10572 @@ -2351,12 +2365,15 @@ static int __init apm_init(void)
10573 * code to that CPU.
10574 */
10575 gdt = get_cpu_gdt_table(0);
10576 +
10577 + pax_open_kernel();
10578 set_desc_base(&gdt[APM_CS >> 3],
10579 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
10580 set_desc_base(&gdt[APM_CS_16 >> 3],
10581 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
10582 set_desc_base(&gdt[APM_DS >> 3],
10583 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
10584 + pax_close_kernel();
10585
10586 proc_create("apm", 0, NULL, &apm_file_ops);
10587
10588 diff -urNp linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c
10589 --- linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c 2011-05-19 00:06:34.000000000 -0400
10590 +++ linux-2.6.39.4/arch/x86/kernel/asm-offsets_64.c 2011-08-05 19:44:33.000000000 -0400
10591 @@ -69,6 +69,7 @@ int main(void)
10592 BLANK();
10593 #undef ENTRY
10594
10595 + DEFINE(TSS_size, sizeof(struct tss_struct));
10596 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
10597 BLANK();
10598
10599 diff -urNp linux-2.6.39.4/arch/x86/kernel/asm-offsets.c linux-2.6.39.4/arch/x86/kernel/asm-offsets.c
10600 --- linux-2.6.39.4/arch/x86/kernel/asm-offsets.c 2011-05-19 00:06:34.000000000 -0400
10601 +++ linux-2.6.39.4/arch/x86/kernel/asm-offsets.c 2011-08-05 19:44:33.000000000 -0400
10602 @@ -33,6 +33,8 @@ void common(void) {
10603 OFFSET(TI_status, thread_info, status);
10604 OFFSET(TI_addr_limit, thread_info, addr_limit);
10605 OFFSET(TI_preempt_count, thread_info, preempt_count);
10606 + OFFSET(TI_lowest_stack, thread_info, lowest_stack);
10607 + DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
10608
10609 BLANK();
10610 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
10611 @@ -53,8 +55,26 @@ void common(void) {
10612 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
10613 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
10614 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
10615 +
10616 +#ifdef CONFIG_PAX_KERNEXEC
10617 + OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
10618 +#endif
10619 +
10620 +#ifdef CONFIG_PAX_MEMORY_UDEREF
10621 + OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
10622 + OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
10623 +#ifdef CONFIG_X86_64
10624 + OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
10625 +#endif
10626 #endif
10627
10628 +#endif
10629 +
10630 + BLANK();
10631 + DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
10632 + DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
10633 + DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
10634 +
10635 #ifdef CONFIG_XEN
10636 BLANK();
10637 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
10638 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/amd.c linux-2.6.39.4/arch/x86/kernel/cpu/amd.c
10639 --- linux-2.6.39.4/arch/x86/kernel/cpu/amd.c 2011-06-03 00:04:13.000000000 -0400
10640 +++ linux-2.6.39.4/arch/x86/kernel/cpu/amd.c 2011-08-05 19:44:33.000000000 -0400
10641 @@ -647,7 +647,7 @@ static unsigned int __cpuinit amd_size_c
10642 unsigned int size)
10643 {
10644 /* AMD errata T13 (order #21922) */
10645 - if ((c->x86 == 6)) {
10646 + if (c->x86 == 6) {
10647 /* Duron Rev A0 */
10648 if (c->x86_model == 3 && c->x86_mask == 0)
10649 size = 64;
10650 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/common.c linux-2.6.39.4/arch/x86/kernel/cpu/common.c
10651 --- linux-2.6.39.4/arch/x86/kernel/cpu/common.c 2011-06-03 00:04:13.000000000 -0400
10652 +++ linux-2.6.39.4/arch/x86/kernel/cpu/common.c 2011-08-05 19:44:33.000000000 -0400
10653 @@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
10654
10655 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
10656
10657 -DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
10658 -#ifdef CONFIG_X86_64
10659 - /*
10660 - * We need valid kernel segments for data and code in long mode too
10661 - * IRET will check the segment types kkeil 2000/10/28
10662 - * Also sysret mandates a special GDT layout
10663 - *
10664 - * TLS descriptors are currently at a different place compared to i386.
10665 - * Hopefully nobody expects them at a fixed place (Wine?)
10666 - */
10667 - [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
10668 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
10669 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
10670 - [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
10671 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
10672 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
10673 -#else
10674 - [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
10675 - [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10676 - [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
10677 - [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
10678 - /*
10679 - * Segments used for calling PnP BIOS have byte granularity.
10680 - * They code segments and data segments have fixed 64k limits,
10681 - * the transfer segment sizes are set at run time.
10682 - */
10683 - /* 32-bit code */
10684 - [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10685 - /* 16-bit code */
10686 - [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10687 - /* 16-bit data */
10688 - [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
10689 - /* 16-bit data */
10690 - [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
10691 - /* 16-bit data */
10692 - [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
10693 - /*
10694 - * The APM segments have byte granularity and their bases
10695 - * are set at run time. All have 64k limits.
10696 - */
10697 - /* 32-bit code */
10698 - [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
10699 - /* 16-bit code */
10700 - [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
10701 - /* data */
10702 - [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
10703 -
10704 - [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10705 - [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
10706 - GDT_STACK_CANARY_INIT
10707 -#endif
10708 -} };
10709 -EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
10710 -
10711 static int __init x86_xsave_setup(char *s)
10712 {
10713 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
10714 @@ -352,7 +298,7 @@ void switch_to_new_gdt(int cpu)
10715 {
10716 struct desc_ptr gdt_descr;
10717
10718 - gdt_descr.address = (long)get_cpu_gdt_table(cpu);
10719 + gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
10720 gdt_descr.size = GDT_SIZE - 1;
10721 load_gdt(&gdt_descr);
10722 /* Reload the per-cpu base */
10723 @@ -824,6 +770,10 @@ static void __cpuinit identify_cpu(struc
10724 /* Filter out anything that depends on CPUID levels we don't have */
10725 filter_cpuid_features(c, true);
10726
10727 +#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
10728 + setup_clear_cpu_cap(X86_FEATURE_SEP);
10729 +#endif
10730 +
10731 /* If the model name is still unset, do table lookup. */
10732 if (!c->x86_model_id[0]) {
10733 const char *p;
10734 @@ -1003,6 +953,9 @@ static __init int setup_disablecpuid(cha
10735 }
10736 __setup("clearcpuid=", setup_disablecpuid);
10737
10738 +DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
10739 +EXPORT_PER_CPU_SYMBOL(current_tinfo);
10740 +
10741 #ifdef CONFIG_X86_64
10742 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
10743
10744 @@ -1018,7 +971,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
10745 EXPORT_PER_CPU_SYMBOL(current_task);
10746
10747 DEFINE_PER_CPU(unsigned long, kernel_stack) =
10748 - (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
10749 + (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
10750 EXPORT_PER_CPU_SYMBOL(kernel_stack);
10751
10752 DEFINE_PER_CPU(char *, irq_stack_ptr) =
10753 @@ -1083,7 +1036,7 @@ struct pt_regs * __cpuinit idle_regs(str
10754 {
10755 memset(regs, 0, sizeof(struct pt_regs));
10756 regs->fs = __KERNEL_PERCPU;
10757 - regs->gs = __KERNEL_STACK_CANARY;
10758 + savesegment(gs, regs->gs);
10759
10760 return regs;
10761 }
10762 @@ -1138,7 +1091,7 @@ void __cpuinit cpu_init(void)
10763 int i;
10764
10765 cpu = stack_smp_processor_id();
10766 - t = &per_cpu(init_tss, cpu);
10767 + t = init_tss + cpu;
10768 oist = &per_cpu(orig_ist, cpu);
10769
10770 #ifdef CONFIG_NUMA
10771 @@ -1164,7 +1117,7 @@ void __cpuinit cpu_init(void)
10772 switch_to_new_gdt(cpu);
10773 loadsegment(fs, 0);
10774
10775 - load_idt((const struct desc_ptr *)&idt_descr);
10776 + load_idt(&idt_descr);
10777
10778 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
10779 syscall_init();
10780 @@ -1173,7 +1126,6 @@ void __cpuinit cpu_init(void)
10781 wrmsrl(MSR_KERNEL_GS_BASE, 0);
10782 barrier();
10783
10784 - x86_configure_nx();
10785 if (cpu != 0)
10786 enable_x2apic();
10787
10788 @@ -1227,7 +1179,7 @@ void __cpuinit cpu_init(void)
10789 {
10790 int cpu = smp_processor_id();
10791 struct task_struct *curr = current;
10792 - struct tss_struct *t = &per_cpu(init_tss, cpu);
10793 + struct tss_struct *t = init_tss + cpu;
10794 struct thread_struct *thread = &curr->thread;
10795
10796 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
10797 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/intel.c linux-2.6.39.4/arch/x86/kernel/cpu/intel.c
10798 --- linux-2.6.39.4/arch/x86/kernel/cpu/intel.c 2011-05-19 00:06:34.000000000 -0400
10799 +++ linux-2.6.39.4/arch/x86/kernel/cpu/intel.c 2011-08-05 19:44:33.000000000 -0400
10800 @@ -161,7 +161,7 @@ static void __cpuinit trap_init_f00f_bug
10801 * Update the IDT descriptor and reload the IDT so that
10802 * it uses the read-only mapped virtual address.
10803 */
10804 - idt_descr.address = fix_to_virt(FIX_F00F_IDT);
10805 + idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
10806 load_idt(&idt_descr);
10807 }
10808 #endif
10809 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/Makefile linux-2.6.39.4/arch/x86/kernel/cpu/Makefile
10810 --- linux-2.6.39.4/arch/x86/kernel/cpu/Makefile 2011-05-19 00:06:34.000000000 -0400
10811 +++ linux-2.6.39.4/arch/x86/kernel/cpu/Makefile 2011-08-05 19:44:33.000000000 -0400
10812 @@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
10813 CFLAGS_REMOVE_perf_event.o = -pg
10814 endif
10815
10816 -# Make sure load_percpu_segment has no stackprotector
10817 -nostackp := $(call cc-option, -fno-stack-protector)
10818 -CFLAGS_common.o := $(nostackp)
10819 -
10820 obj-y := intel_cacheinfo.o scattered.o topology.o
10821 obj-y += proc.o capflags.o powerflags.o common.o
10822 obj-y += vmware.o hypervisor.o sched.o mshyperv.o
10823 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c
10824 --- linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-19 00:06:34.000000000 -0400
10825 +++ linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce.c 2011-08-05 19:44:33.000000000 -0400
10826 @@ -46,6 +46,7 @@
10827 #include <asm/ipi.h>
10828 #include <asm/mce.h>
10829 #include <asm/msr.h>
10830 +#include <asm/local.h>
10831
10832 #include "mce-internal.h"
10833
10834 @@ -220,7 +221,7 @@ static void print_mce(struct mce *m)
10835 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
10836 m->cs, m->ip);
10837
10838 - if (m->cs == __KERNEL_CS)
10839 + if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
10840 print_symbol("{%s}", m->ip);
10841 pr_cont("\n");
10842 }
10843 @@ -244,10 +245,10 @@ static void print_mce(struct mce *m)
10844
10845 #define PANIC_TIMEOUT 5 /* 5 seconds */
10846
10847 -static atomic_t mce_paniced;
10848 +static atomic_unchecked_t mce_paniced;
10849
10850 static int fake_panic;
10851 -static atomic_t mce_fake_paniced;
10852 +static atomic_unchecked_t mce_fake_paniced;
10853
10854 /* Panic in progress. Enable interrupts and wait for final IPI */
10855 static void wait_for_panic(void)
10856 @@ -271,7 +272,7 @@ static void mce_panic(char *msg, struct
10857 /*
10858 * Make sure only one CPU runs in machine check panic
10859 */
10860 - if (atomic_inc_return(&mce_paniced) > 1)
10861 + if (atomic_inc_return_unchecked(&mce_paniced) > 1)
10862 wait_for_panic();
10863 barrier();
10864
10865 @@ -279,7 +280,7 @@ static void mce_panic(char *msg, struct
10866 console_verbose();
10867 } else {
10868 /* Don't log too much for fake panic */
10869 - if (atomic_inc_return(&mce_fake_paniced) > 1)
10870 + if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
10871 return;
10872 }
10873 /* First print corrected ones that are still unlogged */
10874 @@ -647,7 +648,7 @@ static int mce_timed_out(u64 *t)
10875 * might have been modified by someone else.
10876 */
10877 rmb();
10878 - if (atomic_read(&mce_paniced))
10879 + if (atomic_read_unchecked(&mce_paniced))
10880 wait_for_panic();
10881 if (!monarch_timeout)
10882 goto out;
10883 @@ -1461,14 +1462,14 @@ void __cpuinit mcheck_cpu_init(struct cp
10884 */
10885
10886 static DEFINE_SPINLOCK(mce_state_lock);
10887 -static int open_count; /* #times opened */
10888 +static local_t open_count; /* #times opened */
10889 static int open_exclu; /* already open exclusive? */
10890
10891 static int mce_open(struct inode *inode, struct file *file)
10892 {
10893 spin_lock(&mce_state_lock);
10894
10895 - if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
10896 + if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
10897 spin_unlock(&mce_state_lock);
10898
10899 return -EBUSY;
10900 @@ -1476,7 +1477,7 @@ static int mce_open(struct inode *inode,
10901
10902 if (file->f_flags & O_EXCL)
10903 open_exclu = 1;
10904 - open_count++;
10905 + local_inc(&open_count);
10906
10907 spin_unlock(&mce_state_lock);
10908
10909 @@ -1487,7 +1488,7 @@ static int mce_release(struct inode *ino
10910 {
10911 spin_lock(&mce_state_lock);
10912
10913 - open_count--;
10914 + local_dec(&open_count);
10915 open_exclu = 0;
10916
10917 spin_unlock(&mce_state_lock);
10918 @@ -2174,7 +2175,7 @@ struct dentry *mce_get_debugfs_dir(void)
10919 static void mce_reset(void)
10920 {
10921 cpu_missing = 0;
10922 - atomic_set(&mce_fake_paniced, 0);
10923 + atomic_set_unchecked(&mce_fake_paniced, 0);
10924 atomic_set(&mce_executing, 0);
10925 atomic_set(&mce_callin, 0);
10926 atomic_set(&global_nwo, 0);
10927 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c
10928 --- linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-05-19 00:06:34.000000000 -0400
10929 +++ linux-2.6.39.4/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-05 20:34:06.000000000 -0400
10930 @@ -215,7 +215,9 @@ static int inject_init(void)
10931 if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL))
10932 return -ENOMEM;
10933 printk(KERN_INFO "Machine check injector initialized\n");
10934 - mce_chrdev_ops.write = mce_write;
10935 + pax_open_kernel();
10936 + *(void **)&mce_chrdev_ops.write = mce_write;
10937 + pax_close_kernel();
10938 register_die_notifier(&mce_raise_nb);
10939 return 0;
10940 }
10941 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c
10942 --- linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c 2011-05-19 00:06:34.000000000 -0400
10943 +++ linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/main.c 2011-08-05 19:44:33.000000000 -0400
10944 @@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
10945 u64 size_or_mask, size_and_mask;
10946 static bool mtrr_aps_delayed_init;
10947
10948 -static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
10949 +static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
10950
10951 const struct mtrr_ops *mtrr_if;
10952
10953 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h
10954 --- linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-05-19 00:06:34.000000000 -0400
10955 +++ linux-2.6.39.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-08-05 20:34:06.000000000 -0400
10956 @@ -12,8 +12,8 @@
10957 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
10958
10959 struct mtrr_ops {
10960 - u32 vendor;
10961 - u32 use_intel_if;
10962 + const u32 vendor;
10963 + const u32 use_intel_if;
10964 void (*set)(unsigned int reg, unsigned long base,
10965 unsigned long size, mtrr_type type);
10966 void (*set_all)(void);
10967 diff -urNp linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c
10968 --- linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c 2011-05-19 00:06:34.000000000 -0400
10969 +++ linux-2.6.39.4/arch/x86/kernel/cpu/perf_event.c 2011-08-05 19:44:33.000000000 -0400
10970 @@ -774,6 +774,8 @@ static int x86_schedule_events(struct cp
10971 int i, j, w, wmax, num = 0;
10972 struct hw_perf_event *hwc;
10973
10974 + pax_track_stack();
10975 +
10976 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
10977
10978 for (i = 0; i < n; i++) {
10979 @@ -1878,7 +1880,7 @@ perf_callchain_user(struct perf_callchai
10980 break;
10981
10982 perf_callchain_store(entry, frame.return_address);
10983 - fp = frame.next_frame;
10984 + fp = (__force const void __user *)frame.next_frame;
10985 }
10986 }
10987
10988 diff -urNp linux-2.6.39.4/arch/x86/kernel/crash.c linux-2.6.39.4/arch/x86/kernel/crash.c
10989 --- linux-2.6.39.4/arch/x86/kernel/crash.c 2011-05-19 00:06:34.000000000 -0400
10990 +++ linux-2.6.39.4/arch/x86/kernel/crash.c 2011-08-05 19:44:33.000000000 -0400
10991 @@ -42,7 +42,7 @@ static void kdump_nmi_callback(int cpu,
10992 regs = args->regs;
10993
10994 #ifdef CONFIG_X86_32
10995 - if (!user_mode_vm(regs)) {
10996 + if (!user_mode(regs)) {
10997 crash_fixup_ss_esp(&fixed_regs, regs);
10998 regs = &fixed_regs;
10999 }
11000 diff -urNp linux-2.6.39.4/arch/x86/kernel/doublefault_32.c linux-2.6.39.4/arch/x86/kernel/doublefault_32.c
11001 --- linux-2.6.39.4/arch/x86/kernel/doublefault_32.c 2011-05-19 00:06:34.000000000 -0400
11002 +++ linux-2.6.39.4/arch/x86/kernel/doublefault_32.c 2011-08-05 19:44:33.000000000 -0400
11003 @@ -11,7 +11,7 @@
11004
11005 #define DOUBLEFAULT_STACKSIZE (1024)
11006 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
11007 -#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
11008 +#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
11009
11010 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
11011
11012 @@ -21,7 +21,7 @@ static void doublefault_fn(void)
11013 unsigned long gdt, tss;
11014
11015 store_gdt(&gdt_desc);
11016 - gdt = gdt_desc.address;
11017 + gdt = (unsigned long)gdt_desc.address;
11018
11019 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
11020
11021 @@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
11022 /* 0x2 bit is always set */
11023 .flags = X86_EFLAGS_SF | 0x2,
11024 .sp = STACK_START,
11025 - .es = __USER_DS,
11026 + .es = __KERNEL_DS,
11027 .cs = __KERNEL_CS,
11028 .ss = __KERNEL_DS,
11029 - .ds = __USER_DS,
11030 + .ds = __KERNEL_DS,
11031 .fs = __KERNEL_PERCPU,
11032
11033 .__cr3 = __pa_nodebug(swapper_pg_dir),
11034 diff -urNp linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c
11035 --- linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c 2011-05-19 00:06:34.000000000 -0400
11036 +++ linux-2.6.39.4/arch/x86/kernel/dumpstack_32.c 2011-08-05 19:44:33.000000000 -0400
11037 @@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task
11038 bp = stack_frame(task, regs);
11039
11040 for (;;) {
11041 - struct thread_info *context;
11042 + void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11043
11044 - context = (struct thread_info *)
11045 - ((unsigned long)stack & (~(THREAD_SIZE - 1)));
11046 - bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
11047 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11048
11049 - stack = (unsigned long *)context->previous_esp;
11050 - if (!stack)
11051 + if (stack_start == task_stack_page(task))
11052 break;
11053 + stack = *(unsigned long **)stack_start;
11054 if (ops->stack(data, "IRQ") < 0)
11055 break;
11056 touch_nmi_watchdog();
11057 @@ -96,21 +94,22 @@ void show_registers(struct pt_regs *regs
11058 * When in-kernel, we also print out the stack and code at the
11059 * time of the fault..
11060 */
11061 - if (!user_mode_vm(regs)) {
11062 + if (!user_mode(regs)) {
11063 unsigned int code_prologue = code_bytes * 43 / 64;
11064 unsigned int code_len = code_bytes;
11065 unsigned char c;
11066 u8 *ip;
11067 + unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
11068
11069 printk(KERN_EMERG "Stack:\n");
11070 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
11071
11072 printk(KERN_EMERG "Code: ");
11073
11074 - ip = (u8 *)regs->ip - code_prologue;
11075 + ip = (u8 *)regs->ip - code_prologue + cs_base;
11076 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
11077 /* try starting at IP */
11078 - ip = (u8 *)regs->ip;
11079 + ip = (u8 *)regs->ip + cs_base;
11080 code_len = code_len - code_prologue + 1;
11081 }
11082 for (i = 0; i < code_len; i++, ip++) {
11083 @@ -119,7 +118,7 @@ void show_registers(struct pt_regs *regs
11084 printk(" Bad EIP value.");
11085 break;
11086 }
11087 - if (ip == (u8 *)regs->ip)
11088 + if (ip == (u8 *)regs->ip + cs_base)
11089 printk("<%02x> ", c);
11090 else
11091 printk("%02x ", c);
11092 @@ -132,6 +131,7 @@ int is_valid_bugaddr(unsigned long ip)
11093 {
11094 unsigned short ud2;
11095
11096 + ip = ktla_ktva(ip);
11097 if (ip < PAGE_OFFSET)
11098 return 0;
11099 if (probe_kernel_address((unsigned short *)ip, ud2))
11100 diff -urNp linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c
11101 --- linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c 2011-05-19 00:06:34.000000000 -0400
11102 +++ linux-2.6.39.4/arch/x86/kernel/dumpstack_64.c 2011-08-05 19:44:33.000000000 -0400
11103 @@ -147,9 +147,9 @@ void dump_trace(struct task_struct *task
11104 unsigned long *irq_stack_end =
11105 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
11106 unsigned used = 0;
11107 - struct thread_info *tinfo;
11108 int graph = 0;
11109 unsigned long dummy;
11110 + void *stack_start;
11111
11112 if (!task)
11113 task = current;
11114 @@ -167,10 +167,10 @@ void dump_trace(struct task_struct *task
11115 * current stack address. If the stacks consist of nested
11116 * exceptions
11117 */
11118 - tinfo = task_thread_info(task);
11119 for (;;) {
11120 char *id;
11121 unsigned long *estack_end;
11122 +
11123 estack_end = in_exception_stack(cpu, (unsigned long)stack,
11124 &used, &id);
11125
11126 @@ -178,7 +178,7 @@ void dump_trace(struct task_struct *task
11127 if (ops->stack(data, id) < 0)
11128 break;
11129
11130 - bp = ops->walk_stack(tinfo, stack, bp, ops,
11131 + bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
11132 data, estack_end, &graph);
11133 ops->stack(data, "<EOE>");
11134 /*
11135 @@ -197,7 +197,7 @@ void dump_trace(struct task_struct *task
11136 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
11137 if (ops->stack(data, "IRQ") < 0)
11138 break;
11139 - bp = ops->walk_stack(tinfo, stack, bp,
11140 + bp = ops->walk_stack(task, irq_stack, stack, bp,
11141 ops, data, irq_stack_end, &graph);
11142 /*
11143 * We link to the next stack (which would be
11144 @@ -218,7 +218,8 @@ void dump_trace(struct task_struct *task
11145 /*
11146 * This handles the process stack:
11147 */
11148 - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
11149 + stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
11150 + bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
11151 put_cpu();
11152 }
11153 EXPORT_SYMBOL(dump_trace);
11154 diff -urNp linux-2.6.39.4/arch/x86/kernel/dumpstack.c linux-2.6.39.4/arch/x86/kernel/dumpstack.c
11155 --- linux-2.6.39.4/arch/x86/kernel/dumpstack.c 2011-05-19 00:06:34.000000000 -0400
11156 +++ linux-2.6.39.4/arch/x86/kernel/dumpstack.c 2011-08-05 19:44:33.000000000 -0400
11157 @@ -2,6 +2,9 @@
11158 * Copyright (C) 1991, 1992 Linus Torvalds
11159 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
11160 */
11161 +#ifdef CONFIG_GRKERNSEC_HIDESYM
11162 +#define __INCLUDED_BY_HIDESYM 1
11163 +#endif
11164 #include <linux/kallsyms.h>
11165 #include <linux/kprobes.h>
11166 #include <linux/uaccess.h>
11167 @@ -35,9 +38,8 @@ void printk_address(unsigned long addres
11168 static void
11169 print_ftrace_graph_addr(unsigned long addr, void *data,
11170 const struct stacktrace_ops *ops,
11171 - struct thread_info *tinfo, int *graph)
11172 + struct task_struct *task, int *graph)
11173 {
11174 - struct task_struct *task = tinfo->task;
11175 unsigned long ret_addr;
11176 int index = task->curr_ret_stack;
11177
11178 @@ -58,7 +60,7 @@ print_ftrace_graph_addr(unsigned long ad
11179 static inline void
11180 print_ftrace_graph_addr(unsigned long addr, void *data,
11181 const struct stacktrace_ops *ops,
11182 - struct thread_info *tinfo, int *graph)
11183 + struct task_struct *task, int *graph)
11184 { }
11185 #endif
11186
11187 @@ -69,10 +71,8 @@ print_ftrace_graph_addr(unsigned long ad
11188 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
11189 */
11190
11191 -static inline int valid_stack_ptr(struct thread_info *tinfo,
11192 - void *p, unsigned int size, void *end)
11193 +static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
11194 {
11195 - void *t = tinfo;
11196 if (end) {
11197 if (p < end && p >= (end-THREAD_SIZE))
11198 return 1;
11199 @@ -83,14 +83,14 @@ static inline int valid_stack_ptr(struct
11200 }
11201
11202 unsigned long
11203 -print_context_stack(struct thread_info *tinfo,
11204 +print_context_stack(struct task_struct *task, void *stack_start,
11205 unsigned long *stack, unsigned long bp,
11206 const struct stacktrace_ops *ops, void *data,
11207 unsigned long *end, int *graph)
11208 {
11209 struct stack_frame *frame = (struct stack_frame *)bp;
11210
11211 - while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
11212 + while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
11213 unsigned long addr;
11214
11215 addr = *stack;
11216 @@ -102,7 +102,7 @@ print_context_stack(struct thread_info *
11217 } else {
11218 ops->address(data, addr, 0);
11219 }
11220 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11221 + print_ftrace_graph_addr(addr, data, ops, task, graph);
11222 }
11223 stack++;
11224 }
11225 @@ -111,7 +111,7 @@ print_context_stack(struct thread_info *
11226 EXPORT_SYMBOL_GPL(print_context_stack);
11227
11228 unsigned long
11229 -print_context_stack_bp(struct thread_info *tinfo,
11230 +print_context_stack_bp(struct task_struct *task, void *stack_start,
11231 unsigned long *stack, unsigned long bp,
11232 const struct stacktrace_ops *ops, void *data,
11233 unsigned long *end, int *graph)
11234 @@ -119,7 +119,7 @@ print_context_stack_bp(struct thread_inf
11235 struct stack_frame *frame = (struct stack_frame *)bp;
11236 unsigned long *ret_addr = &frame->return_address;
11237
11238 - while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
11239 + while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
11240 unsigned long addr = *ret_addr;
11241
11242 if (!__kernel_text_address(addr))
11243 @@ -128,7 +128,7 @@ print_context_stack_bp(struct thread_inf
11244 ops->address(data, addr, 1);
11245 frame = frame->next_frame;
11246 ret_addr = &frame->return_address;
11247 - print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
11248 + print_ftrace_graph_addr(addr, data, ops, task, graph);
11249 }
11250
11251 return (unsigned long)frame;
11252 @@ -202,7 +202,7 @@ void dump_stack(void)
11253
11254 bp = stack_frame(current, NULL);
11255 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
11256 - current->pid, current->comm, print_tainted(),
11257 + task_pid_nr(current), current->comm, print_tainted(),
11258 init_utsname()->release,
11259 (int)strcspn(init_utsname()->version, " "),
11260 init_utsname()->version);
11261 @@ -238,6 +238,8 @@ unsigned __kprobes long oops_begin(void)
11262 }
11263 EXPORT_SYMBOL_GPL(oops_begin);
11264
11265 +extern void gr_handle_kernel_exploit(void);
11266 +
11267 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
11268 {
11269 if (regs && kexec_should_crash(current))
11270 @@ -259,7 +261,10 @@ void __kprobes oops_end(unsigned long fl
11271 panic("Fatal exception in interrupt");
11272 if (panic_on_oops)
11273 panic("Fatal exception");
11274 - do_exit(signr);
11275 +
11276 + gr_handle_kernel_exploit();
11277 +
11278 + do_group_exit(signr);
11279 }
11280
11281 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
11282 @@ -286,7 +291,7 @@ int __kprobes __die(const char *str, str
11283
11284 show_registers(regs);
11285 #ifdef CONFIG_X86_32
11286 - if (user_mode_vm(regs)) {
11287 + if (user_mode(regs)) {
11288 sp = regs->sp;
11289 ss = regs->ss & 0xffff;
11290 } else {
11291 @@ -314,7 +319,7 @@ void die(const char *str, struct pt_regs
11292 unsigned long flags = oops_begin();
11293 int sig = SIGSEGV;
11294
11295 - if (!user_mode_vm(regs))
11296 + if (!user_mode(regs))
11297 report_bug(regs->ip, regs);
11298
11299 if (__die(str, regs, err))
11300 diff -urNp linux-2.6.39.4/arch/x86/kernel/early_printk.c linux-2.6.39.4/arch/x86/kernel/early_printk.c
11301 --- linux-2.6.39.4/arch/x86/kernel/early_printk.c 2011-05-19 00:06:34.000000000 -0400
11302 +++ linux-2.6.39.4/arch/x86/kernel/early_printk.c 2011-08-05 19:44:33.000000000 -0400
11303 @@ -7,6 +7,7 @@
11304 #include <linux/pci_regs.h>
11305 #include <linux/pci_ids.h>
11306 #include <linux/errno.h>
11307 +#include <linux/sched.h>
11308 #include <asm/io.h>
11309 #include <asm/processor.h>
11310 #include <asm/fcntl.h>
11311 @@ -179,6 +180,8 @@ asmlinkage void early_printk(const char
11312 int n;
11313 va_list ap;
11314
11315 + pax_track_stack();
11316 +
11317 va_start(ap, fmt);
11318 n = vscnprintf(buf, sizeof(buf), fmt, ap);
11319 early_console->write(early_console, buf, n);
11320 diff -urNp linux-2.6.39.4/arch/x86/kernel/entry_32.S linux-2.6.39.4/arch/x86/kernel/entry_32.S
11321 --- linux-2.6.39.4/arch/x86/kernel/entry_32.S 2011-05-19 00:06:34.000000000 -0400
11322 +++ linux-2.6.39.4/arch/x86/kernel/entry_32.S 2011-08-05 19:44:33.000000000 -0400
11323 @@ -185,13 +185,146 @@
11324 /*CFI_REL_OFFSET gs, PT_GS*/
11325 .endm
11326 .macro SET_KERNEL_GS reg
11327 +
11328 +#ifdef CONFIG_CC_STACKPROTECTOR
11329 movl $(__KERNEL_STACK_CANARY), \reg
11330 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
11331 + movl $(__USER_DS), \reg
11332 +#else
11333 + xorl \reg, \reg
11334 +#endif
11335 +
11336 movl \reg, %gs
11337 .endm
11338
11339 #endif /* CONFIG_X86_32_LAZY_GS */
11340
11341 -.macro SAVE_ALL
11342 +.macro pax_enter_kernel
11343 +#ifdef CONFIG_PAX_KERNEXEC
11344 + call pax_enter_kernel
11345 +#endif
11346 +.endm
11347 +
11348 +.macro pax_exit_kernel
11349 +#ifdef CONFIG_PAX_KERNEXEC
11350 + call pax_exit_kernel
11351 +#endif
11352 +.endm
11353 +
11354 +#ifdef CONFIG_PAX_KERNEXEC
11355 +ENTRY(pax_enter_kernel)
11356 +#ifdef CONFIG_PARAVIRT
11357 + pushl %eax
11358 + pushl %ecx
11359 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
11360 + mov %eax, %esi
11361 +#else
11362 + mov %cr0, %esi
11363 +#endif
11364 + bts $16, %esi
11365 + jnc 1f
11366 + mov %cs, %esi
11367 + cmp $__KERNEL_CS, %esi
11368 + jz 3f
11369 + ljmp $__KERNEL_CS, $3f
11370 +1: ljmp $__KERNEXEC_KERNEL_CS, $2f
11371 +2:
11372 +#ifdef CONFIG_PARAVIRT
11373 + mov %esi, %eax
11374 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
11375 +#else
11376 + mov %esi, %cr0
11377 +#endif
11378 +3:
11379 +#ifdef CONFIG_PARAVIRT
11380 + popl %ecx
11381 + popl %eax
11382 +#endif
11383 + ret
11384 +ENDPROC(pax_enter_kernel)
11385 +
11386 +ENTRY(pax_exit_kernel)
11387 +#ifdef CONFIG_PARAVIRT
11388 + pushl %eax
11389 + pushl %ecx
11390 +#endif
11391 + mov %cs, %esi
11392 + cmp $__KERNEXEC_KERNEL_CS, %esi
11393 + jnz 2f
11394 +#ifdef CONFIG_PARAVIRT
11395 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
11396 + mov %eax, %esi
11397 +#else
11398 + mov %cr0, %esi
11399 +#endif
11400 + btr $16, %esi
11401 + ljmp $__KERNEL_CS, $1f
11402 +1:
11403 +#ifdef CONFIG_PARAVIRT
11404 + mov %esi, %eax
11405 + call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
11406 +#else
11407 + mov %esi, %cr0
11408 +#endif
11409 +2:
11410 +#ifdef CONFIG_PARAVIRT
11411 + popl %ecx
11412 + popl %eax
11413 +#endif
11414 + ret
11415 +ENDPROC(pax_exit_kernel)
11416 +#endif
11417 +
11418 +.macro pax_erase_kstack
11419 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11420 + call pax_erase_kstack
11421 +#endif
11422 +.endm
11423 +
11424 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11425 +/*
11426 + * ebp: thread_info
11427 + * ecx, edx: can be clobbered
11428 + */
11429 +ENTRY(pax_erase_kstack)
11430 + pushl %edi
11431 + pushl %eax
11432 +
11433 + mov TI_lowest_stack(%ebp), %edi
11434 + mov $-0xBEEF, %eax
11435 + std
11436 +
11437 +1: mov %edi, %ecx
11438 + and $THREAD_SIZE_asm - 1, %ecx
11439 + shr $2, %ecx
11440 + repne scasl
11441 + jecxz 2f
11442 +
11443 + cmp $2*16, %ecx
11444 + jc 2f
11445 +
11446 + mov $2*16, %ecx
11447 + repe scasl
11448 + jecxz 2f
11449 + jne 1b
11450 +
11451 +2: cld
11452 + mov %esp, %ecx
11453 + sub %edi, %ecx
11454 + shr $2, %ecx
11455 + rep stosl
11456 +
11457 + mov TI_task_thread_sp0(%ebp), %edi
11458 + sub $128, %edi
11459 + mov %edi, TI_lowest_stack(%ebp)
11460 +
11461 + popl %eax
11462 + popl %edi
11463 + ret
11464 +ENDPROC(pax_erase_kstack)
11465 +#endif
11466 +
11467 +.macro __SAVE_ALL _DS
11468 cld
11469 PUSH_GS
11470 pushl_cfi %fs
11471 @@ -214,7 +347,7 @@
11472 CFI_REL_OFFSET ecx, 0
11473 pushl_cfi %ebx
11474 CFI_REL_OFFSET ebx, 0
11475 - movl $(__USER_DS), %edx
11476 + movl $\_DS, %edx
11477 movl %edx, %ds
11478 movl %edx, %es
11479 movl $(__KERNEL_PERCPU), %edx
11480 @@ -222,6 +355,15 @@
11481 SET_KERNEL_GS %edx
11482 .endm
11483
11484 +.macro SAVE_ALL
11485 +#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
11486 + __SAVE_ALL __KERNEL_DS
11487 + pax_enter_kernel
11488 +#else
11489 + __SAVE_ALL __USER_DS
11490 +#endif
11491 +.endm
11492 +
11493 .macro RESTORE_INT_REGS
11494 popl_cfi %ebx
11495 CFI_RESTORE ebx
11496 @@ -332,7 +474,15 @@ check_userspace:
11497 movb PT_CS(%esp), %al
11498 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
11499 cmpl $USER_RPL, %eax
11500 +
11501 +#ifdef CONFIG_PAX_KERNEXEC
11502 + jae resume_userspace
11503 +
11504 + PAX_EXIT_KERNEL
11505 + jmp resume_kernel
11506 +#else
11507 jb resume_kernel # not returning to v8086 or userspace
11508 +#endif
11509
11510 ENTRY(resume_userspace)
11511 LOCKDEP_SYS_EXIT
11512 @@ -344,7 +494,7 @@ ENTRY(resume_userspace)
11513 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
11514 # int/exception return?
11515 jne work_pending
11516 - jmp restore_all
11517 + jmp restore_all_pax
11518 END(ret_from_exception)
11519
11520 #ifdef CONFIG_PREEMPT
11521 @@ -394,23 +544,34 @@ sysenter_past_esp:
11522 /*CFI_REL_OFFSET cs, 0*/
11523 /*
11524 * Push current_thread_info()->sysenter_return to the stack.
11525 - * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
11526 - * pushed above; +8 corresponds to copy_thread's esp0 setting.
11527 */
11528 - pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
11529 + pushl_cfi $0
11530 CFI_REL_OFFSET eip, 0
11531
11532 pushl_cfi %eax
11533 SAVE_ALL
11534 + GET_THREAD_INFO(%ebp)
11535 + movl TI_sysenter_return(%ebp),%ebp
11536 + movl %ebp,PT_EIP(%esp)
11537 ENABLE_INTERRUPTS(CLBR_NONE)
11538
11539 /*
11540 * Load the potential sixth argument from user stack.
11541 * Careful about security.
11542 */
11543 + movl PT_OLDESP(%esp),%ebp
11544 +
11545 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11546 + mov PT_OLDSS(%esp),%ds
11547 +1: movl %ds:(%ebp),%ebp
11548 + push %ss
11549 + pop %ds
11550 +#else
11551 cmpl $__PAGE_OFFSET-3,%ebp
11552 jae syscall_fault
11553 1: movl (%ebp),%ebp
11554 +#endif
11555 +
11556 movl %ebp,PT_EBP(%esp)
11557 .section __ex_table,"a"
11558 .align 4
11559 @@ -433,12 +594,23 @@ sysenter_do_call:
11560 testl $_TIF_ALLWORK_MASK, %ecx
11561 jne sysexit_audit
11562 sysenter_exit:
11563 +
11564 +#ifdef CONFIG_PAX_RANDKSTACK
11565 + pushl_cfi %eax
11566 + call pax_randomize_kstack
11567 + popl_cfi %eax
11568 +#endif
11569 +
11570 + pax_erase_kstack
11571 +
11572 /* if something modifies registers it must also disable sysexit */
11573 movl PT_EIP(%esp), %edx
11574 movl PT_OLDESP(%esp), %ecx
11575 xorl %ebp,%ebp
11576 TRACE_IRQS_ON
11577 1: mov PT_FS(%esp), %fs
11578 +2: mov PT_DS(%esp), %ds
11579 +3: mov PT_ES(%esp), %es
11580 PTGS_TO_GS
11581 ENABLE_INTERRUPTS_SYSEXIT
11582
11583 @@ -455,6 +627,9 @@ sysenter_audit:
11584 movl %eax,%edx /* 2nd arg: syscall number */
11585 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
11586 call audit_syscall_entry
11587 +
11588 + pax_erase_kstack
11589 +
11590 pushl_cfi %ebx
11591 movl PT_EAX(%esp),%eax /* reload syscall number */
11592 jmp sysenter_do_call
11593 @@ -481,11 +656,17 @@ sysexit_audit:
11594
11595 CFI_ENDPROC
11596 .pushsection .fixup,"ax"
11597 -2: movl $0,PT_FS(%esp)
11598 +4: movl $0,PT_FS(%esp)
11599 + jmp 1b
11600 +5: movl $0,PT_DS(%esp)
11601 + jmp 1b
11602 +6: movl $0,PT_ES(%esp)
11603 jmp 1b
11604 .section __ex_table,"a"
11605 .align 4
11606 - .long 1b,2b
11607 + .long 1b,4b
11608 + .long 2b,5b
11609 + .long 3b,6b
11610 .popsection
11611 PTGS_TO_GS_EX
11612 ENDPROC(ia32_sysenter_target)
11613 @@ -518,6 +699,14 @@ syscall_exit:
11614 testl $_TIF_ALLWORK_MASK, %ecx # current->work
11615 jne syscall_exit_work
11616
11617 +restore_all_pax:
11618 +
11619 +#ifdef CONFIG_PAX_RANDKSTACK
11620 + call pax_randomize_kstack
11621 +#endif
11622 +
11623 + pax_erase_kstack
11624 +
11625 restore_all:
11626 TRACE_IRQS_IRET
11627 restore_all_notrace:
11628 @@ -577,14 +766,21 @@ ldt_ss:
11629 * compensating for the offset by changing to the ESPFIX segment with
11630 * a base address that matches for the difference.
11631 */
11632 -#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
11633 +#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
11634 mov %esp, %edx /* load kernel esp */
11635 mov PT_OLDESP(%esp), %eax /* load userspace esp */
11636 mov %dx, %ax /* eax: new kernel esp */
11637 sub %eax, %edx /* offset (low word is 0) */
11638 +#ifdef CONFIG_SMP
11639 + movl PER_CPU_VAR(cpu_number), %ebx
11640 + shll $PAGE_SHIFT_asm, %ebx
11641 + addl $cpu_gdt_table, %ebx
11642 +#else
11643 + movl $cpu_gdt_table, %ebx
11644 +#endif
11645 shr $16, %edx
11646 - mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
11647 - mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
11648 + mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
11649 + mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
11650 pushl_cfi $__ESPFIX_SS
11651 pushl_cfi %eax /* new kernel esp */
11652 /* Disable interrupts, but do not irqtrace this section: we
11653 @@ -613,29 +809,23 @@ work_resched:
11654 movl TI_flags(%ebp), %ecx
11655 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
11656 # than syscall tracing?
11657 - jz restore_all
11658 + jz restore_all_pax
11659 testb $_TIF_NEED_RESCHED, %cl
11660 jnz work_resched
11661
11662 work_notifysig: # deal with pending signals and
11663 # notify-resume requests
11664 + movl %esp, %eax
11665 #ifdef CONFIG_VM86
11666 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
11667 - movl %esp, %eax
11668 - jne work_notifysig_v86 # returning to kernel-space or
11669 + jz 1f # returning to kernel-space or
11670 # vm86-space
11671 - xorl %edx, %edx
11672 - call do_notify_resume
11673 - jmp resume_userspace_sig
11674
11675 - ALIGN
11676 -work_notifysig_v86:
11677 pushl_cfi %ecx # save ti_flags for do_notify_resume
11678 call save_v86_state # %eax contains pt_regs pointer
11679 popl_cfi %ecx
11680 movl %eax, %esp
11681 -#else
11682 - movl %esp, %eax
11683 +1:
11684 #endif
11685 xorl %edx, %edx
11686 call do_notify_resume
11687 @@ -648,6 +838,9 @@ syscall_trace_entry:
11688 movl $-ENOSYS,PT_EAX(%esp)
11689 movl %esp, %eax
11690 call syscall_trace_enter
11691 +
11692 + pax_erase_kstack
11693 +
11694 /* What it returned is what we'll actually use. */
11695 cmpl $(nr_syscalls), %eax
11696 jnae syscall_call
11697 @@ -670,6 +863,10 @@ END(syscall_exit_work)
11698
11699 RING0_INT_FRAME # can't unwind into user space anyway
11700 syscall_fault:
11701 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11702 + push %ss
11703 + pop %ds
11704 +#endif
11705 GET_THREAD_INFO(%ebp)
11706 movl $-EFAULT,PT_EAX(%esp)
11707 jmp resume_userspace
11708 @@ -752,6 +949,36 @@ ptregs_clone:
11709 CFI_ENDPROC
11710 ENDPROC(ptregs_clone)
11711
11712 + ALIGN;
11713 +ENTRY(kernel_execve)
11714 + CFI_STARTPROC
11715 + pushl_cfi %ebp
11716 + sub $PT_OLDSS+4,%esp
11717 + pushl_cfi %edi
11718 + pushl_cfi %ecx
11719 + pushl_cfi %eax
11720 + lea 3*4(%esp),%edi
11721 + mov $PT_OLDSS/4+1,%ecx
11722 + xorl %eax,%eax
11723 + rep stosl
11724 + popl_cfi %eax
11725 + popl_cfi %ecx
11726 + popl_cfi %edi
11727 + movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
11728 + pushl_cfi %esp
11729 + call sys_execve
11730 + add $4,%esp
11731 + CFI_ADJUST_CFA_OFFSET -4
11732 + GET_THREAD_INFO(%ebp)
11733 + test %eax,%eax
11734 + jz syscall_exit
11735 + add $PT_OLDSS+4,%esp
11736 + CFI_ADJUST_CFA_OFFSET -PT_OLDSS-4
11737 + popl_cfi %ebp
11738 + ret
11739 + CFI_ENDPROC
11740 +ENDPROC(kernel_execve)
11741 +
11742 .macro FIXUP_ESPFIX_STACK
11743 /*
11744 * Switch back for ESPFIX stack to the normal zerobased stack
11745 @@ -761,8 +988,15 @@ ENDPROC(ptregs_clone)
11746 * normal stack and adjusts ESP with the matching offset.
11747 */
11748 /* fixup the stack */
11749 - mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
11750 - mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
11751 +#ifdef CONFIG_SMP
11752 + movl PER_CPU_VAR(cpu_number), %ebx
11753 + shll $PAGE_SHIFT_asm, %ebx
11754 + addl $cpu_gdt_table, %ebx
11755 +#else
11756 + movl $cpu_gdt_table, %ebx
11757 +#endif
11758 + mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
11759 + mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
11760 shl $16, %eax
11761 addl %esp, %eax /* the adjusted stack pointer */
11762 pushl_cfi $__KERNEL_DS
11763 @@ -1213,7 +1447,6 @@ return_to_handler:
11764 jmp *%ecx
11765 #endif
11766
11767 -.section .rodata,"a"
11768 #include "syscall_table_32.S"
11769
11770 syscall_table_size=(.-sys_call_table)
11771 @@ -1259,9 +1492,12 @@ error_code:
11772 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
11773 REG_TO_PTGS %ecx
11774 SET_KERNEL_GS %ecx
11775 - movl $(__USER_DS), %ecx
11776 + movl $(__KERNEL_DS), %ecx
11777 movl %ecx, %ds
11778 movl %ecx, %es
11779 +
11780 + pax_enter_kernel
11781 +
11782 TRACE_IRQS_OFF
11783 movl %esp,%eax # pt_regs pointer
11784 call *%edi
11785 @@ -1346,6 +1582,9 @@ nmi_stack_correct:
11786 xorl %edx,%edx # zero error code
11787 movl %esp,%eax # pt_regs pointer
11788 call do_nmi
11789 +
11790 + pax_exit_kernel
11791 +
11792 jmp restore_all_notrace
11793 CFI_ENDPROC
11794
11795 @@ -1382,6 +1621,9 @@ nmi_espfix_stack:
11796 FIXUP_ESPFIX_STACK # %eax == %esp
11797 xorl %edx,%edx # zero error code
11798 call do_nmi
11799 +
11800 + pax_exit_kernel
11801 +
11802 RESTORE_REGS
11803 lss 12+4(%esp), %esp # back to espfix stack
11804 CFI_ADJUST_CFA_OFFSET -24
11805 diff -urNp linux-2.6.39.4/arch/x86/kernel/entry_64.S linux-2.6.39.4/arch/x86/kernel/entry_64.S
11806 --- linux-2.6.39.4/arch/x86/kernel/entry_64.S 2011-05-19 00:06:34.000000000 -0400
11807 +++ linux-2.6.39.4/arch/x86/kernel/entry_64.S 2011-08-05 19:44:33.000000000 -0400
11808 @@ -53,6 +53,7 @@
11809 #include <asm/paravirt.h>
11810 #include <asm/ftrace.h>
11811 #include <asm/percpu.h>
11812 +#include <asm/pgtable.h>
11813
11814 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11815 #include <linux/elf-em.h>
11816 @@ -176,6 +177,259 @@ ENTRY(native_usergs_sysret64)
11817 ENDPROC(native_usergs_sysret64)
11818 #endif /* CONFIG_PARAVIRT */
11819
11820 + .macro ljmpq sel, off
11821 +#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
11822 + .byte 0x48; ljmp *1234f(%rip)
11823 + .pushsection .rodata
11824 + .align 16
11825 + 1234: .quad \off; .word \sel
11826 + .popsection
11827 +#else
11828 + pushq $\sel
11829 + pushq $\off
11830 + lretq
11831 +#endif
11832 + .endm
11833 +
11834 + .macro pax_enter_kernel
11835 +#ifdef CONFIG_PAX_KERNEXEC
11836 + call pax_enter_kernel
11837 +#endif
11838 + .endm
11839 +
11840 + .macro pax_exit_kernel
11841 +#ifdef CONFIG_PAX_KERNEXEC
11842 + call pax_exit_kernel
11843 +#endif
11844 + .endm
11845 +
11846 +#ifdef CONFIG_PAX_KERNEXEC
11847 +ENTRY(pax_enter_kernel)
11848 + pushq %rdi
11849 +
11850 +#ifdef CONFIG_PARAVIRT
11851 + PV_SAVE_REGS(CLBR_RDI)
11852 +#endif
11853 +
11854 + GET_CR0_INTO_RDI
11855 + bts $16,%rdi
11856 + jnc 1f
11857 + mov %cs,%edi
11858 + cmp $__KERNEL_CS,%edi
11859 + jz 3f
11860 + ljmpq __KERNEL_CS,3f
11861 +1: ljmpq __KERNEXEC_KERNEL_CS,2f
11862 +2: SET_RDI_INTO_CR0
11863 +3:
11864 +
11865 +#ifdef CONFIG_PARAVIRT
11866 + PV_RESTORE_REGS(CLBR_RDI)
11867 +#endif
11868 +
11869 + popq %rdi
11870 + retq
11871 +ENDPROC(pax_enter_kernel)
11872 +
11873 +ENTRY(pax_exit_kernel)
11874 + pushq %rdi
11875 +
11876 +#ifdef CONFIG_PARAVIRT
11877 + PV_SAVE_REGS(CLBR_RDI)
11878 +#endif
11879 +
11880 + mov %cs,%rdi
11881 + cmp $__KERNEXEC_KERNEL_CS,%edi
11882 + jnz 2f
11883 + GET_CR0_INTO_RDI
11884 + btr $16,%rdi
11885 + ljmpq __KERNEL_CS,1f
11886 +1: SET_RDI_INTO_CR0
11887 +2:
11888 +
11889 +#ifdef CONFIG_PARAVIRT
11890 + PV_RESTORE_REGS(CLBR_RDI);
11891 +#endif
11892 +
11893 + popq %rdi
11894 + retq
11895 +ENDPROC(pax_exit_kernel)
11896 +#endif
11897 +
11898 + .macro pax_enter_kernel_user
11899 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11900 + call pax_enter_kernel_user
11901 +#endif
11902 + .endm
11903 +
11904 + .macro pax_exit_kernel_user
11905 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11906 + call pax_exit_kernel_user
11907 +#endif
11908 +#ifdef CONFIG_PAX_RANDKSTACK
11909 + push %rax
11910 + call pax_randomize_kstack
11911 + pop %rax
11912 +#endif
11913 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11914 + call pax_erase_kstack
11915 +#endif
11916 + .endm
11917 +
11918 +#ifdef CONFIG_PAX_MEMORY_UDEREF
11919 +ENTRY(pax_enter_kernel_user)
11920 + pushq %rdi
11921 + pushq %rbx
11922 +
11923 +#ifdef CONFIG_PARAVIRT
11924 + PV_SAVE_REGS(CLBR_RDI)
11925 +#endif
11926 +
11927 + GET_CR3_INTO_RDI
11928 + mov %rdi,%rbx
11929 + add $__START_KERNEL_map,%rbx
11930 + sub phys_base(%rip),%rbx
11931 +
11932 +#ifdef CONFIG_PARAVIRT
11933 + pushq %rdi
11934 + cmpl $0, pv_info+PARAVIRT_enabled
11935 + jz 1f
11936 + i = 0
11937 + .rept USER_PGD_PTRS
11938 + mov i*8(%rbx),%rsi
11939 + mov $0,%sil
11940 + lea i*8(%rbx),%rdi
11941 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
11942 + i = i + 1
11943 + .endr
11944 + jmp 2f
11945 +1:
11946 +#endif
11947 +
11948 + i = 0
11949 + .rept USER_PGD_PTRS
11950 + movb $0,i*8(%rbx)
11951 + i = i + 1
11952 + .endr
11953 +
11954 +#ifdef CONFIG_PARAVIRT
11955 +2: popq %rdi
11956 +#endif
11957 + SET_RDI_INTO_CR3
11958 +
11959 +#ifdef CONFIG_PAX_KERNEXEC
11960 + GET_CR0_INTO_RDI
11961 + bts $16,%rdi
11962 + SET_RDI_INTO_CR0
11963 +#endif
11964 +
11965 +#ifdef CONFIG_PARAVIRT
11966 + PV_RESTORE_REGS(CLBR_RDI)
11967 +#endif
11968 +
11969 + popq %rbx
11970 + popq %rdi
11971 + retq
11972 +ENDPROC(pax_enter_kernel_user)
11973 +
11974 +ENTRY(pax_exit_kernel_user)
11975 + push %rdi
11976 +
11977 +#ifdef CONFIG_PARAVIRT
11978 + pushq %rbx
11979 + PV_SAVE_REGS(CLBR_RDI)
11980 +#endif
11981 +
11982 +#ifdef CONFIG_PAX_KERNEXEC
11983 + GET_CR0_INTO_RDI
11984 + btr $16,%rdi
11985 + SET_RDI_INTO_CR0
11986 +#endif
11987 +
11988 + GET_CR3_INTO_RDI
11989 + add $__START_KERNEL_map,%rdi
11990 + sub phys_base(%rip),%rdi
11991 +
11992 +#ifdef CONFIG_PARAVIRT
11993 + cmpl $0, pv_info+PARAVIRT_enabled
11994 + jz 1f
11995 + mov %rdi,%rbx
11996 + i = 0
11997 + .rept USER_PGD_PTRS
11998 + mov i*8(%rbx),%rsi
11999 + mov $0x67,%sil
12000 + lea i*8(%rbx),%rdi
12001 + call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
12002 + i = i + 1
12003 + .endr
12004 + jmp 2f
12005 +1:
12006 +#endif
12007 +
12008 + i = 0
12009 + .rept USER_PGD_PTRS
12010 + movb $0x67,i*8(%rdi)
12011 + i = i + 1
12012 + .endr
12013 +
12014 +#ifdef CONFIG_PARAVIRT
12015 +2: PV_RESTORE_REGS(CLBR_RDI)
12016 + popq %rbx
12017 +#endif
12018 +
12019 + popq %rdi
12020 + retq
12021 +ENDPROC(pax_exit_kernel_user)
12022 +#endif
12023 +
12024 + .macro pax_erase_kstack
12025 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12026 + call pax_erase_kstack
12027 +#endif
12028 + .endm
12029 +
12030 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12031 +/*
12032 + * r10: thread_info
12033 + * rcx, rdx: can be clobbered
12034 + */
12035 +ENTRY(pax_erase_kstack)
12036 + pushq %rdi
12037 + pushq %rax
12038 +
12039 + GET_THREAD_INFO(%r10)
12040 + mov TI_lowest_stack(%r10), %rdi
12041 + mov $-0xBEEF, %rax
12042 + std
12043 +
12044 +1: mov %edi, %ecx
12045 + and $THREAD_SIZE_asm - 1, %ecx
12046 + shr $3, %ecx
12047 + repne scasq
12048 + jecxz 2f
12049 +
12050 + cmp $2*8, %ecx
12051 + jc 2f
12052 +
12053 + mov $2*8, %ecx
12054 + repe scasq
12055 + jecxz 2f
12056 + jne 1b
12057 +
12058 +2: cld
12059 + mov %esp, %ecx
12060 + sub %edi, %ecx
12061 + shr $3, %ecx
12062 + rep stosq
12063 +
12064 + mov TI_task_thread_sp0(%r10), %rdi
12065 + sub $256, %rdi
12066 + mov %rdi, TI_lowest_stack(%r10)
12067 +
12068 + popq %rax
12069 + popq %rdi
12070 + ret
12071 +ENDPROC(pax_erase_kstack)
12072 +#endif
12073
12074 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
12075 #ifdef CONFIG_TRACE_IRQFLAGS
12076 @@ -318,7 +572,7 @@ ENTRY(save_args)
12077 leaq -RBP+8(%rsp),%rdi /* arg1 for handler */
12078 movq_cfi rbp, 8 /* push %rbp */
12079 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
12080 - testl $3, CS(%rdi)
12081 + testb $3, CS(%rdi)
12082 je 1f
12083 SWAPGS
12084 /*
12085 @@ -409,7 +663,7 @@ ENTRY(ret_from_fork)
12086
12087 RESTORE_REST
12088
12089 - testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12090 + testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
12091 je int_ret_from_sys_call
12092
12093 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
12094 @@ -455,7 +709,7 @@ END(ret_from_fork)
12095 ENTRY(system_call)
12096 CFI_STARTPROC simple
12097 CFI_SIGNAL_FRAME
12098 - CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
12099 + CFI_DEF_CFA rsp,0
12100 CFI_REGISTER rip,rcx
12101 /*CFI_REGISTER rflags,r11*/
12102 SWAPGS_UNSAFE_STACK
12103 @@ -468,12 +722,13 @@ ENTRY(system_call_after_swapgs)
12104
12105 movq %rsp,PER_CPU_VAR(old_rsp)
12106 movq PER_CPU_VAR(kernel_stack),%rsp
12107 + pax_enter_kernel_user
12108 /*
12109 * No need to follow this irqs off/on section - it's straight
12110 * and short:
12111 */
12112 ENABLE_INTERRUPTS(CLBR_NONE)
12113 - SAVE_ARGS 8,1
12114 + SAVE_ARGS 8*6,1
12115 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
12116 movq %rcx,RIP-ARGOFFSET(%rsp)
12117 CFI_REL_OFFSET rip,RIP-ARGOFFSET
12118 @@ -502,6 +757,7 @@ sysret_check:
12119 andl %edi,%edx
12120 jnz sysret_careful
12121 CFI_REMEMBER_STATE
12122 + pax_exit_kernel_user
12123 /*
12124 * sysretq will re-enable interrupts:
12125 */
12126 @@ -560,6 +816,9 @@ auditsys:
12127 movq %rax,%rsi /* 2nd arg: syscall number */
12128 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
12129 call audit_syscall_entry
12130 +
12131 + pax_erase_kstack
12132 +
12133 LOAD_ARGS 0 /* reload call-clobbered registers */
12134 jmp system_call_fastpath
12135
12136 @@ -590,6 +849,9 @@ tracesys:
12137 FIXUP_TOP_OF_STACK %rdi
12138 movq %rsp,%rdi
12139 call syscall_trace_enter
12140 +
12141 + pax_erase_kstack
12142 +
12143 /*
12144 * Reload arg registers from stack in case ptrace changed them.
12145 * We don't reload %rax because syscall_trace_enter() returned
12146 @@ -611,7 +873,7 @@ tracesys:
12147 GLOBAL(int_ret_from_sys_call)
12148 DISABLE_INTERRUPTS(CLBR_NONE)
12149 TRACE_IRQS_OFF
12150 - testl $3,CS-ARGOFFSET(%rsp)
12151 + testb $3,CS-ARGOFFSET(%rsp)
12152 je retint_restore_args
12153 movl $_TIF_ALLWORK_MASK,%edi
12154 /* edi: mask to check */
12155 @@ -793,6 +1055,16 @@ END(interrupt)
12156 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
12157 call save_args
12158 PARTIAL_FRAME 0
12159 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12160 + testb $3, CS(%rdi)
12161 + jnz 1f
12162 + pax_enter_kernel
12163 + jmp 2f
12164 +1: pax_enter_kernel_user
12165 +2:
12166 +#else
12167 + pax_enter_kernel
12168 +#endif
12169 call \func
12170 .endm
12171
12172 @@ -825,7 +1097,7 @@ ret_from_intr:
12173 CFI_ADJUST_CFA_OFFSET -8
12174 exit_intr:
12175 GET_THREAD_INFO(%rcx)
12176 - testl $3,CS-ARGOFFSET(%rsp)
12177 + testb $3,CS-ARGOFFSET(%rsp)
12178 je retint_kernel
12179
12180 /* Interrupt came from user space */
12181 @@ -847,12 +1119,14 @@ retint_swapgs: /* return to user-space
12182 * The iretq could re-enable interrupts:
12183 */
12184 DISABLE_INTERRUPTS(CLBR_ANY)
12185 + pax_exit_kernel_user
12186 TRACE_IRQS_IRETQ
12187 SWAPGS
12188 jmp restore_args
12189
12190 retint_restore_args: /* return to kernel space */
12191 DISABLE_INTERRUPTS(CLBR_ANY)
12192 + pax_exit_kernel
12193 /*
12194 * The iretq could re-enable interrupts:
12195 */
12196 @@ -1027,6 +1301,16 @@ ENTRY(\sym)
12197 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12198 call error_entry
12199 DEFAULT_FRAME 0
12200 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12201 + testb $3, CS(%rsp)
12202 + jnz 1f
12203 + pax_enter_kernel
12204 + jmp 2f
12205 +1: pax_enter_kernel_user
12206 +2:
12207 +#else
12208 + pax_enter_kernel
12209 +#endif
12210 movq %rsp,%rdi /* pt_regs pointer */
12211 xorl %esi,%esi /* no error code */
12212 call \do_sym
12213 @@ -1044,6 +1328,16 @@ ENTRY(\sym)
12214 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12215 call save_paranoid
12216 TRACE_IRQS_OFF
12217 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12218 + testb $3, CS(%rsp)
12219 + jnz 1f
12220 + pax_enter_kernel
12221 + jmp 2f
12222 +1: pax_enter_kernel_user
12223 +2:
12224 +#else
12225 + pax_enter_kernel
12226 +#endif
12227 movq %rsp,%rdi /* pt_regs pointer */
12228 xorl %esi,%esi /* no error code */
12229 call \do_sym
12230 @@ -1052,7 +1346,7 @@ ENTRY(\sym)
12231 END(\sym)
12232 .endm
12233
12234 -#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
12235 +#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
12236 .macro paranoidzeroentry_ist sym do_sym ist
12237 ENTRY(\sym)
12238 INTR_FRAME
12239 @@ -1062,8 +1356,24 @@ ENTRY(\sym)
12240 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12241 call save_paranoid
12242 TRACE_IRQS_OFF
12243 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12244 + testb $3, CS(%rsp)
12245 + jnz 1f
12246 + pax_enter_kernel
12247 + jmp 2f
12248 +1: pax_enter_kernel_user
12249 +2:
12250 +#else
12251 + pax_enter_kernel
12252 +#endif
12253 movq %rsp,%rdi /* pt_regs pointer */
12254 xorl %esi,%esi /* no error code */
12255 +#ifdef CONFIG_SMP
12256 + imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
12257 + lea init_tss(%r12), %r12
12258 +#else
12259 + lea init_tss(%rip), %r12
12260 +#endif
12261 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12262 call \do_sym
12263 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
12264 @@ -1080,6 +1390,16 @@ ENTRY(\sym)
12265 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12266 call error_entry
12267 DEFAULT_FRAME 0
12268 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12269 + testb $3, CS(%rsp)
12270 + jnz 1f
12271 + pax_enter_kernel
12272 + jmp 2f
12273 +1: pax_enter_kernel_user
12274 +2:
12275 +#else
12276 + pax_enter_kernel
12277 +#endif
12278 movq %rsp,%rdi /* pt_regs pointer */
12279 movq ORIG_RAX(%rsp),%rsi /* get error code */
12280 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12281 @@ -1099,6 +1419,16 @@ ENTRY(\sym)
12282 call save_paranoid
12283 DEFAULT_FRAME 0
12284 TRACE_IRQS_OFF
12285 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12286 + testb $3, CS(%rsp)
12287 + jnz 1f
12288 + pax_enter_kernel
12289 + jmp 2f
12290 +1: pax_enter_kernel_user
12291 +2:
12292 +#else
12293 + pax_enter_kernel
12294 +#endif
12295 movq %rsp,%rdi /* pt_regs pointer */
12296 movq ORIG_RAX(%rsp),%rsi /* get error code */
12297 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
12298 @@ -1361,14 +1691,27 @@ ENTRY(paranoid_exit)
12299 TRACE_IRQS_OFF
12300 testl %ebx,%ebx /* swapgs needed? */
12301 jnz paranoid_restore
12302 - testl $3,CS(%rsp)
12303 + testb $3,CS(%rsp)
12304 jnz paranoid_userspace
12305 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12306 + pax_exit_kernel
12307 + TRACE_IRQS_IRETQ 0
12308 + SWAPGS_UNSAFE_STACK
12309 + RESTORE_ALL 8
12310 + jmp irq_return
12311 +#endif
12312 paranoid_swapgs:
12313 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12314 + pax_exit_kernel_user
12315 +#else
12316 + pax_exit_kernel
12317 +#endif
12318 TRACE_IRQS_IRETQ 0
12319 SWAPGS_UNSAFE_STACK
12320 RESTORE_ALL 8
12321 jmp irq_return
12322 paranoid_restore:
12323 + pax_exit_kernel
12324 TRACE_IRQS_IRETQ 0
12325 RESTORE_ALL 8
12326 jmp irq_return
12327 @@ -1426,7 +1769,7 @@ ENTRY(error_entry)
12328 movq_cfi r14, R14+8
12329 movq_cfi r15, R15+8
12330 xorl %ebx,%ebx
12331 - testl $3,CS+8(%rsp)
12332 + testb $3,CS+8(%rsp)
12333 je error_kernelspace
12334 error_swapgs:
12335 SWAPGS
12336 @@ -1490,6 +1833,16 @@ ENTRY(nmi)
12337 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
12338 call save_paranoid
12339 DEFAULT_FRAME 0
12340 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12341 + testb $3, CS(%rsp)
12342 + jnz 1f
12343 + pax_enter_kernel
12344 + jmp 2f
12345 +1: pax_enter_kernel_user
12346 +2:
12347 +#else
12348 + pax_enter_kernel
12349 +#endif
12350 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
12351 movq %rsp,%rdi
12352 movq $-1,%rsi
12353 @@ -1500,11 +1853,25 @@ ENTRY(nmi)
12354 DISABLE_INTERRUPTS(CLBR_NONE)
12355 testl %ebx,%ebx /* swapgs needed? */
12356 jnz nmi_restore
12357 - testl $3,CS(%rsp)
12358 + testb $3,CS(%rsp)
12359 jnz nmi_userspace
12360 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12361 + pax_exit_kernel
12362 + SWAPGS_UNSAFE_STACK
12363 + RESTORE_ALL 8
12364 + jmp irq_return
12365 +#endif
12366 nmi_swapgs:
12367 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12368 + pax_exit_kernel_user
12369 +#else
12370 + pax_exit_kernel
12371 +#endif
12372 SWAPGS_UNSAFE_STACK
12373 + RESTORE_ALL 8
12374 + jmp irq_return
12375 nmi_restore:
12376 + pax_exit_kernel
12377 RESTORE_ALL 8
12378 jmp irq_return
12379 nmi_userspace:
12380 diff -urNp linux-2.6.39.4/arch/x86/kernel/ftrace.c linux-2.6.39.4/arch/x86/kernel/ftrace.c
12381 --- linux-2.6.39.4/arch/x86/kernel/ftrace.c 2011-05-19 00:06:34.000000000 -0400
12382 +++ linux-2.6.39.4/arch/x86/kernel/ftrace.c 2011-08-05 19:44:33.000000000 -0400
12383 @@ -126,7 +126,7 @@ static void *mod_code_ip; /* holds the
12384 static void *mod_code_newcode; /* holds the text to write to the IP */
12385
12386 static unsigned nmi_wait_count;
12387 -static atomic_t nmi_update_count = ATOMIC_INIT(0);
12388 +static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
12389
12390 int ftrace_arch_read_dyn_info(char *buf, int size)
12391 {
12392 @@ -134,7 +134,7 @@ int ftrace_arch_read_dyn_info(char *buf,
12393
12394 r = snprintf(buf, size, "%u %u",
12395 nmi_wait_count,
12396 - atomic_read(&nmi_update_count));
12397 + atomic_read_unchecked(&nmi_update_count));
12398 return r;
12399 }
12400
12401 @@ -177,8 +177,10 @@ void ftrace_nmi_enter(void)
12402
12403 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
12404 smp_rmb();
12405 + pax_open_kernel();
12406 ftrace_mod_code();
12407 - atomic_inc(&nmi_update_count);
12408 + pax_close_kernel();
12409 + atomic_inc_unchecked(&nmi_update_count);
12410 }
12411 /* Must have previous changes seen before executions */
12412 smp_mb();
12413 @@ -271,6 +273,8 @@ ftrace_modify_code(unsigned long ip, uns
12414 {
12415 unsigned char replaced[MCOUNT_INSN_SIZE];
12416
12417 + ip = ktla_ktva(ip);
12418 +
12419 /*
12420 * Note: Due to modules and __init, code can
12421 * disappear and change, we need to protect against faulting
12422 @@ -327,7 +331,7 @@ int ftrace_update_ftrace_func(ftrace_fun
12423 unsigned char old[MCOUNT_INSN_SIZE], *new;
12424 int ret;
12425
12426 - memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
12427 + memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
12428 new = ftrace_call_replace(ip, (unsigned long)func);
12429 ret = ftrace_modify_code(ip, old, new);
12430
12431 @@ -353,6 +357,8 @@ static int ftrace_mod_jmp(unsigned long
12432 {
12433 unsigned char code[MCOUNT_INSN_SIZE];
12434
12435 + ip = ktla_ktva(ip);
12436 +
12437 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
12438 return -EFAULT;
12439
12440 diff -urNp linux-2.6.39.4/arch/x86/kernel/head32.c linux-2.6.39.4/arch/x86/kernel/head32.c
12441 --- linux-2.6.39.4/arch/x86/kernel/head32.c 2011-05-19 00:06:34.000000000 -0400
12442 +++ linux-2.6.39.4/arch/x86/kernel/head32.c 2011-08-05 19:44:33.000000000 -0400
12443 @@ -19,6 +19,7 @@
12444 #include <asm/io_apic.h>
12445 #include <asm/bios_ebda.h>
12446 #include <asm/tlbflush.h>
12447 +#include <asm/boot.h>
12448
12449 static void __init i386_default_early_setup(void)
12450 {
12451 @@ -34,7 +35,7 @@ void __init i386_start_kernel(void)
12452 {
12453 memblock_init();
12454
12455 - memblock_x86_reserve_range(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12456 + memblock_x86_reserve_range(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
12457
12458 #ifdef CONFIG_BLK_DEV_INITRD
12459 /* Reserve INITRD */
12460 diff -urNp linux-2.6.39.4/arch/x86/kernel/head_32.S linux-2.6.39.4/arch/x86/kernel/head_32.S
12461 --- linux-2.6.39.4/arch/x86/kernel/head_32.S 2011-05-19 00:06:34.000000000 -0400
12462 +++ linux-2.6.39.4/arch/x86/kernel/head_32.S 2011-08-05 19:44:33.000000000 -0400
12463 @@ -25,6 +25,12 @@
12464 /* Physical address */
12465 #define pa(X) ((X) - __PAGE_OFFSET)
12466
12467 +#ifdef CONFIG_PAX_KERNEXEC
12468 +#define ta(X) (X)
12469 +#else
12470 +#define ta(X) ((X) - __PAGE_OFFSET)
12471 +#endif
12472 +
12473 /*
12474 * References to members of the new_cpu_data structure.
12475 */
12476 @@ -54,11 +60,7 @@
12477 * and small than max_low_pfn, otherwise will waste some page table entries
12478 */
12479
12480 -#if PTRS_PER_PMD > 1
12481 -#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
12482 -#else
12483 -#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
12484 -#endif
12485 +#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
12486
12487 /* Number of possible pages in the lowmem region */
12488 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
12489 @@ -77,6 +79,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
12490 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12491
12492 /*
12493 + * Real beginning of normal "text" segment
12494 + */
12495 +ENTRY(stext)
12496 +ENTRY(_stext)
12497 +
12498 +/*
12499 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
12500 * %esi points to the real-mode code as a 32-bit pointer.
12501 * CS and DS must be 4 GB flat segments, but we don't depend on
12502 @@ -84,6 +92,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
12503 * can.
12504 */
12505 __HEAD
12506 +
12507 +#ifdef CONFIG_PAX_KERNEXEC
12508 + jmp startup_32
12509 +/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
12510 +.fill PAGE_SIZE-5,1,0xcc
12511 +#endif
12512 +
12513 ENTRY(startup_32)
12514 movl pa(stack_start),%ecx
12515
12516 @@ -105,6 +120,57 @@ ENTRY(startup_32)
12517 2:
12518 leal -__PAGE_OFFSET(%ecx),%esp
12519
12520 +#ifdef CONFIG_SMP
12521 + movl $pa(cpu_gdt_table),%edi
12522 + movl $__per_cpu_load,%eax
12523 + movw %ax,__KERNEL_PERCPU + 2(%edi)
12524 + rorl $16,%eax
12525 + movb %al,__KERNEL_PERCPU + 4(%edi)
12526 + movb %ah,__KERNEL_PERCPU + 7(%edi)
12527 + movl $__per_cpu_end - 1,%eax
12528 + subl $__per_cpu_start,%eax
12529 + movw %ax,__KERNEL_PERCPU + 0(%edi)
12530 +#endif
12531 +
12532 +#ifdef CONFIG_PAX_MEMORY_UDEREF
12533 + movl $NR_CPUS,%ecx
12534 + movl $pa(cpu_gdt_table),%edi
12535 +1:
12536 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
12537 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
12538 + movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
12539 + addl $PAGE_SIZE_asm,%edi
12540 + loop 1b
12541 +#endif
12542 +
12543 +#ifdef CONFIG_PAX_KERNEXEC
12544 + movl $pa(boot_gdt),%edi
12545 + movl $__LOAD_PHYSICAL_ADDR,%eax
12546 + movw %ax,__BOOT_CS + 2(%edi)
12547 + rorl $16,%eax
12548 + movb %al,__BOOT_CS + 4(%edi)
12549 + movb %ah,__BOOT_CS + 7(%edi)
12550 + rorl $16,%eax
12551 +
12552 + ljmp $(__BOOT_CS),$1f
12553 +1:
12554 +
12555 + movl $NR_CPUS,%ecx
12556 + movl $pa(cpu_gdt_table),%edi
12557 + addl $__PAGE_OFFSET,%eax
12558 +1:
12559 + movw %ax,__KERNEL_CS + 2(%edi)
12560 + movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
12561 + rorl $16,%eax
12562 + movb %al,__KERNEL_CS + 4(%edi)
12563 + movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
12564 + movb %ah,__KERNEL_CS + 7(%edi)
12565 + movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
12566 + rorl $16,%eax
12567 + addl $PAGE_SIZE_asm,%edi
12568 + loop 1b
12569 +#endif
12570 +
12571 /*
12572 * Clear BSS first so that there are no surprises...
12573 */
12574 @@ -195,8 +261,11 @@ ENTRY(startup_32)
12575 movl %eax, pa(max_pfn_mapped)
12576
12577 /* Do early initialization of the fixmap area */
12578 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12579 - movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
12580 +#ifdef CONFIG_COMPAT_VDSO
12581 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
12582 +#else
12583 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
12584 +#endif
12585 #else /* Not PAE */
12586
12587 page_pde_offset = (__PAGE_OFFSET >> 20);
12588 @@ -226,8 +295,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12589 movl %eax, pa(max_pfn_mapped)
12590
12591 /* Do early initialization of the fixmap area */
12592 - movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
12593 - movl %eax,pa(initial_page_table+0xffc)
12594 +#ifdef CONFIG_COMPAT_VDSO
12595 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
12596 +#else
12597 + movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
12598 +#endif
12599 #endif
12600
12601 #ifdef CONFIG_PARAVIRT
12602 @@ -241,9 +313,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
12603 cmpl $num_subarch_entries, %eax
12604 jae bad_subarch
12605
12606 - movl pa(subarch_entries)(,%eax,4), %eax
12607 - subl $__PAGE_OFFSET, %eax
12608 - jmp *%eax
12609 + jmp *pa(subarch_entries)(,%eax,4)
12610
12611 bad_subarch:
12612 WEAK(lguest_entry)
12613 @@ -255,10 +325,10 @@ WEAK(xen_entry)
12614 __INITDATA
12615
12616 subarch_entries:
12617 - .long default_entry /* normal x86/PC */
12618 - .long lguest_entry /* lguest hypervisor */
12619 - .long xen_entry /* Xen hypervisor */
12620 - .long default_entry /* Moorestown MID */
12621 + .long ta(default_entry) /* normal x86/PC */
12622 + .long ta(lguest_entry) /* lguest hypervisor */
12623 + .long ta(xen_entry) /* Xen hypervisor */
12624 + .long ta(default_entry) /* Moorestown MID */
12625 num_subarch_entries = (. - subarch_entries) / 4
12626 .previous
12627 #else
12628 @@ -312,6 +382,7 @@ default_entry:
12629 orl %edx,%eax
12630 movl %eax,%cr4
12631
12632 +#ifdef CONFIG_X86_PAE
12633 testb $X86_CR4_PAE, %al # check if PAE is enabled
12634 jz 6f
12635
12636 @@ -340,6 +411,9 @@ default_entry:
12637 /* Make changes effective */
12638 wrmsr
12639
12640 + btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
12641 +#endif
12642 +
12643 6:
12644
12645 /*
12646 @@ -443,7 +517,7 @@ is386: movl $2,%ecx # set MP
12647 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
12648 movl %eax,%ss # after changing gdt.
12649
12650 - movl $(__USER_DS),%eax # DS/ES contains default USER segment
12651 +# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
12652 movl %eax,%ds
12653 movl %eax,%es
12654
12655 @@ -457,15 +531,22 @@ is386: movl $2,%ecx # set MP
12656 */
12657 cmpb $0,ready
12658 jne 1f
12659 - movl $gdt_page,%eax
12660 + movl $cpu_gdt_table,%eax
12661 movl $stack_canary,%ecx
12662 +#ifdef CONFIG_SMP
12663 + addl $__per_cpu_load,%ecx
12664 +#endif
12665 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
12666 shrl $16, %ecx
12667 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
12668 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
12669 1:
12670 -#endif
12671 movl $(__KERNEL_STACK_CANARY),%eax
12672 +#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12673 + movl $(__USER_DS),%eax
12674 +#else
12675 + xorl %eax,%eax
12676 +#endif
12677 movl %eax,%gs
12678
12679 xorl %eax,%eax # Clear LDT
12680 @@ -558,22 +639,22 @@ early_page_fault:
12681 jmp early_fault
12682
12683 early_fault:
12684 - cld
12685 #ifdef CONFIG_PRINTK
12686 + cmpl $1,%ss:early_recursion_flag
12687 + je hlt_loop
12688 + incl %ss:early_recursion_flag
12689 + cld
12690 pusha
12691 movl $(__KERNEL_DS),%eax
12692 movl %eax,%ds
12693 movl %eax,%es
12694 - cmpl $2,early_recursion_flag
12695 - je hlt_loop
12696 - incl early_recursion_flag
12697 movl %cr2,%eax
12698 pushl %eax
12699 pushl %edx /* trapno */
12700 pushl $fault_msg
12701 call printk
12702 +; call dump_stack
12703 #endif
12704 - call dump_stack
12705 hlt_loop:
12706 hlt
12707 jmp hlt_loop
12708 @@ -581,8 +662,11 @@ hlt_loop:
12709 /* This is the default interrupt "handler" :-) */
12710 ALIGN
12711 ignore_int:
12712 - cld
12713 #ifdef CONFIG_PRINTK
12714 + cmpl $2,%ss:early_recursion_flag
12715 + je hlt_loop
12716 + incl %ss:early_recursion_flag
12717 + cld
12718 pushl %eax
12719 pushl %ecx
12720 pushl %edx
12721 @@ -591,9 +675,6 @@ ignore_int:
12722 movl $(__KERNEL_DS),%eax
12723 movl %eax,%ds
12724 movl %eax,%es
12725 - cmpl $2,early_recursion_flag
12726 - je hlt_loop
12727 - incl early_recursion_flag
12728 pushl 16(%esp)
12729 pushl 24(%esp)
12730 pushl 32(%esp)
12731 @@ -622,29 +703,43 @@ ENTRY(initial_code)
12732 /*
12733 * BSS section
12734 */
12735 -__PAGE_ALIGNED_BSS
12736 - .align PAGE_SIZE
12737 #ifdef CONFIG_X86_PAE
12738 +.section .initial_pg_pmd,"a",@progbits
12739 initial_pg_pmd:
12740 .fill 1024*KPMDS,4,0
12741 #else
12742 +.section .initial_page_table,"a",@progbits
12743 ENTRY(initial_page_table)
12744 .fill 1024,4,0
12745 #endif
12746 +.section .initial_pg_fixmap,"a",@progbits
12747 initial_pg_fixmap:
12748 .fill 1024,4,0
12749 +.section .empty_zero_page,"a",@progbits
12750 ENTRY(empty_zero_page)
12751 .fill 4096,1,0
12752 +.section .swapper_pg_dir,"a",@progbits
12753 ENTRY(swapper_pg_dir)
12754 +#ifdef CONFIG_X86_PAE
12755 + .fill 4,8,0
12756 +#else
12757 .fill 1024,4,0
12758 +#endif
12759 +
12760 +/*
12761 + * The IDT has to be page-aligned to simplify the Pentium
12762 + * F0 0F bug workaround.. We have a special link segment
12763 + * for this.
12764 + */
12765 +.section .idt,"a",@progbits
12766 +ENTRY(idt_table)
12767 + .fill 256,8,0
12768
12769 /*
12770 * This starts the data section.
12771 */
12772 #ifdef CONFIG_X86_PAE
12773 -__PAGE_ALIGNED_DATA
12774 - /* Page-aligned for the benefit of paravirt? */
12775 - .align PAGE_SIZE
12776 +.section .initial_page_table,"a",@progbits
12777 ENTRY(initial_page_table)
12778 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
12779 # if KPMDS == 3
12780 @@ -663,18 +758,27 @@ ENTRY(initial_page_table)
12781 # error "Kernel PMDs should be 1, 2 or 3"
12782 # endif
12783 .align PAGE_SIZE /* needs to be page-sized too */
12784 +
12785 +#ifdef CONFIG_PAX_PER_CPU_PGD
12786 +ENTRY(cpu_pgd)
12787 + .rept NR_CPUS
12788 + .fill 4,8,0
12789 + .endr
12790 +#endif
12791 +
12792 #endif
12793
12794 .data
12795 .balign 4
12796 ENTRY(stack_start)
12797 - .long init_thread_union+THREAD_SIZE
12798 + .long init_thread_union+THREAD_SIZE-8
12799 +
12800 +ready: .byte 0
12801
12802 +.section .rodata,"a",@progbits
12803 early_recursion_flag:
12804 .long 0
12805
12806 -ready: .byte 0
12807 -
12808 int_msg:
12809 .asciz "Unknown interrupt or fault at: %p %p %p\n"
12810
12811 @@ -707,7 +811,7 @@ fault_msg:
12812 .word 0 # 32 bit align gdt_desc.address
12813 boot_gdt_descr:
12814 .word __BOOT_DS+7
12815 - .long boot_gdt - __PAGE_OFFSET
12816 + .long pa(boot_gdt)
12817
12818 .word 0 # 32-bit align idt_desc.address
12819 idt_descr:
12820 @@ -718,7 +822,7 @@ idt_descr:
12821 .word 0 # 32 bit align gdt_desc.address
12822 ENTRY(early_gdt_descr)
12823 .word GDT_ENTRIES*8-1
12824 - .long gdt_page /* Overwritten for secondary CPUs */
12825 + .long cpu_gdt_table /* Overwritten for secondary CPUs */
12826
12827 /*
12828 * The boot_gdt must mirror the equivalent in setup.S and is
12829 @@ -727,5 +831,65 @@ ENTRY(early_gdt_descr)
12830 .align L1_CACHE_BYTES
12831 ENTRY(boot_gdt)
12832 .fill GDT_ENTRY_BOOT_CS,8,0
12833 - .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
12834 - .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
12835 + .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
12836 + .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
12837 +
12838 + .align PAGE_SIZE_asm
12839 +ENTRY(cpu_gdt_table)
12840 + .rept NR_CPUS
12841 + .quad 0x0000000000000000 /* NULL descriptor */
12842 + .quad 0x0000000000000000 /* 0x0b reserved */
12843 + .quad 0x0000000000000000 /* 0x13 reserved */
12844 + .quad 0x0000000000000000 /* 0x1b reserved */
12845 +
12846 +#ifdef CONFIG_PAX_KERNEXEC
12847 + .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
12848 +#else
12849 + .quad 0x0000000000000000 /* 0x20 unused */
12850 +#endif
12851 +
12852 + .quad 0x0000000000000000 /* 0x28 unused */
12853 + .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
12854 + .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
12855 + .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
12856 + .quad 0x0000000000000000 /* 0x4b reserved */
12857 + .quad 0x0000000000000000 /* 0x53 reserved */
12858 + .quad 0x0000000000000000 /* 0x5b reserved */
12859 +
12860 + .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
12861 + .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
12862 + .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
12863 + .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
12864 +
12865 + .quad 0x0000000000000000 /* 0x80 TSS descriptor */
12866 + .quad 0x0000000000000000 /* 0x88 LDT descriptor */
12867 +
12868 + /*
12869 + * Segments used for calling PnP BIOS have byte granularity.
12870 + * The code segments and data segments have fixed 64k limits,
12871 + * the transfer segment sizes are set at run time.
12872 + */
12873 + .quad 0x00409b000000ffff /* 0x90 32-bit code */
12874 + .quad 0x00009b000000ffff /* 0x98 16-bit code */
12875 + .quad 0x000093000000ffff /* 0xa0 16-bit data */
12876 + .quad 0x0000930000000000 /* 0xa8 16-bit data */
12877 + .quad 0x0000930000000000 /* 0xb0 16-bit data */
12878 +
12879 + /*
12880 + * The APM segments have byte granularity and their bases
12881 + * are set at run time. All have 64k limits.
12882 + */
12883 + .quad 0x00409b000000ffff /* 0xb8 APM CS code */
12884 + .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
12885 + .quad 0x004093000000ffff /* 0xc8 APM DS data */
12886 +
12887 + .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
12888 + .quad 0x0040930000000000 /* 0xd8 - PERCPU */
12889 + .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
12890 + .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
12891 + .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
12892 + .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
12893 +
12894 + /* Be sure this is zeroed to avoid false validations in Xen */
12895 + .fill PAGE_SIZE_asm - GDT_SIZE,1,0
12896 + .endr
12897 diff -urNp linux-2.6.39.4/arch/x86/kernel/head_64.S linux-2.6.39.4/arch/x86/kernel/head_64.S
12898 --- linux-2.6.39.4/arch/x86/kernel/head_64.S 2011-05-19 00:06:34.000000000 -0400
12899 +++ linux-2.6.39.4/arch/x86/kernel/head_64.S 2011-08-05 19:44:33.000000000 -0400
12900 @@ -19,6 +19,7 @@
12901 #include <asm/cache.h>
12902 #include <asm/processor-flags.h>
12903 #include <asm/percpu.h>
12904 +#include <asm/cpufeature.h>
12905
12906 #ifdef CONFIG_PARAVIRT
12907 #include <asm/asm-offsets.h>
12908 @@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
12909 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
12910 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
12911 L3_START_KERNEL = pud_index(__START_KERNEL_map)
12912 +L4_VMALLOC_START = pgd_index(VMALLOC_START)
12913 +L3_VMALLOC_START = pud_index(VMALLOC_START)
12914 +L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
12915 +L3_VMEMMAP_START = pud_index(VMEMMAP_START)
12916
12917 .text
12918 __HEAD
12919 @@ -85,35 +90,22 @@ startup_64:
12920 */
12921 addq %rbp, init_level4_pgt + 0(%rip)
12922 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
12923 + addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
12924 + addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
12925 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
12926
12927 addq %rbp, level3_ident_pgt + 0(%rip)
12928 +#ifndef CONFIG_XEN
12929 + addq %rbp, level3_ident_pgt + 8(%rip)
12930 +#endif
12931
12932 - addq %rbp, level3_kernel_pgt + (510*8)(%rip)
12933 - addq %rbp, level3_kernel_pgt + (511*8)(%rip)
12934 + addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
12935
12936 - addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12937 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
12938 + addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
12939
12940 - /* Add an Identity mapping if I am above 1G */
12941 - leaq _text(%rip), %rdi
12942 - andq $PMD_PAGE_MASK, %rdi
12943 -
12944 - movq %rdi, %rax
12945 - shrq $PUD_SHIFT, %rax
12946 - andq $(PTRS_PER_PUD - 1), %rax
12947 - jz ident_complete
12948 -
12949 - leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
12950 - leaq level3_ident_pgt(%rip), %rbx
12951 - movq %rdx, 0(%rbx, %rax, 8)
12952 -
12953 - movq %rdi, %rax
12954 - shrq $PMD_SHIFT, %rax
12955 - andq $(PTRS_PER_PMD - 1), %rax
12956 - leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
12957 - leaq level2_spare_pgt(%rip), %rbx
12958 - movq %rdx, 0(%rbx, %rax, 8)
12959 -ident_complete:
12960 + addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
12961 + addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
12962
12963 /*
12964 * Fixup the kernel text+data virtual addresses. Note that
12965 @@ -160,8 +152,8 @@ ENTRY(secondary_startup_64)
12966 * after the boot processor executes this code.
12967 */
12968
12969 - /* Enable PAE mode and PGE */
12970 - movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
12971 + /* Enable PAE mode and PSE/PGE */
12972 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
12973 movq %rax, %cr4
12974
12975 /* Setup early boot stage 4 level pagetables. */
12976 @@ -183,9 +175,14 @@ ENTRY(secondary_startup_64)
12977 movl $MSR_EFER, %ecx
12978 rdmsr
12979 btsl $_EFER_SCE, %eax /* Enable System Call */
12980 - btl $20,%edi /* No Execute supported? */
12981 + btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
12982 jnc 1f
12983 btsl $_EFER_NX, %eax
12984 + leaq init_level4_pgt(%rip), %rdi
12985 + btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
12986 + btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
12987 + btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
12988 + btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
12989 1: wrmsr /* Make changes effective */
12990
12991 /* Setup cr0 */
12992 @@ -269,7 +266,7 @@ ENTRY(secondary_startup_64)
12993 bad_address:
12994 jmp bad_address
12995
12996 - .section ".init.text","ax"
12997 + __INIT
12998 #ifdef CONFIG_EARLY_PRINTK
12999 .globl early_idt_handlers
13000 early_idt_handlers:
13001 @@ -314,18 +311,23 @@ ENTRY(early_idt_handler)
13002 #endif /* EARLY_PRINTK */
13003 1: hlt
13004 jmp 1b
13005 + .previous
13006
13007 #ifdef CONFIG_EARLY_PRINTK
13008 + __INITDATA
13009 early_recursion_flag:
13010 .long 0
13011 + .previous
13012
13013 + .section .rodata,"a",@progbits
13014 early_idt_msg:
13015 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
13016 early_idt_ripmsg:
13017 .asciz "RIP %s\n"
13018 -#endif /* CONFIG_EARLY_PRINTK */
13019 .previous
13020 +#endif /* CONFIG_EARLY_PRINTK */
13021
13022 + .section .rodata,"a",@progbits
13023 #define NEXT_PAGE(name) \
13024 .balign PAGE_SIZE; \
13025 ENTRY(name)
13026 @@ -338,7 +340,6 @@ ENTRY(name)
13027 i = i + 1 ; \
13028 .endr
13029
13030 - .data
13031 /*
13032 * This default setting generates an ident mapping at address 0x100000
13033 * and a mapping for the kernel that precisely maps virtual address
13034 @@ -349,13 +350,36 @@ NEXT_PAGE(init_level4_pgt)
13035 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13036 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
13037 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13038 + .org init_level4_pgt + L4_VMALLOC_START*8, 0
13039 + .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
13040 + .org init_level4_pgt + L4_VMEMMAP_START*8, 0
13041 + .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13042 .org init_level4_pgt + L4_START_KERNEL*8, 0
13043 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
13044 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
13045
13046 +#ifdef CONFIG_PAX_PER_CPU_PGD
13047 +NEXT_PAGE(cpu_pgd)
13048 + .rept NR_CPUS
13049 + .fill 512,8,0
13050 + .endr
13051 +#endif
13052 +
13053 NEXT_PAGE(level3_ident_pgt)
13054 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
13055 +#ifdef CONFIG_XEN
13056 .fill 511,8,0
13057 +#else
13058 + .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
13059 + .fill 510,8,0
13060 +#endif
13061 +
13062 +NEXT_PAGE(level3_vmalloc_pgt)
13063 + .fill 512,8,0
13064 +
13065 +NEXT_PAGE(level3_vmemmap_pgt)
13066 + .fill L3_VMEMMAP_START,8,0
13067 + .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
13068
13069 NEXT_PAGE(level3_kernel_pgt)
13070 .fill L3_START_KERNEL,8,0
13071 @@ -363,20 +387,23 @@ NEXT_PAGE(level3_kernel_pgt)
13072 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
13073 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13074
13075 +NEXT_PAGE(level2_vmemmap_pgt)
13076 + .fill 512,8,0
13077 +
13078 NEXT_PAGE(level2_fixmap_pgt)
13079 - .fill 506,8,0
13080 - .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
13081 - /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
13082 - .fill 5,8,0
13083 + .fill 507,8,0
13084 + .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
13085 + /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
13086 + .fill 4,8,0
13087
13088 -NEXT_PAGE(level1_fixmap_pgt)
13089 +NEXT_PAGE(level1_vsyscall_pgt)
13090 .fill 512,8,0
13091
13092 -NEXT_PAGE(level2_ident_pgt)
13093 - /* Since I easily can, map the first 1G.
13094 + /* Since I easily can, map the first 2G.
13095 * Don't set NX because code runs from these pages.
13096 */
13097 - PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
13098 +NEXT_PAGE(level2_ident_pgt)
13099 + PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
13100
13101 NEXT_PAGE(level2_kernel_pgt)
13102 /*
13103 @@ -389,33 +416,55 @@ NEXT_PAGE(level2_kernel_pgt)
13104 * If you want to increase this then increase MODULES_VADDR
13105 * too.)
13106 */
13107 - PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
13108 - KERNEL_IMAGE_SIZE/PMD_SIZE)
13109 -
13110 -NEXT_PAGE(level2_spare_pgt)
13111 - .fill 512, 8, 0
13112 + PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
13113
13114 #undef PMDS
13115 #undef NEXT_PAGE
13116
13117 - .data
13118 + .align PAGE_SIZE
13119 +ENTRY(cpu_gdt_table)
13120 + .rept NR_CPUS
13121 + .quad 0x0000000000000000 /* NULL descriptor */
13122 + .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
13123 + .quad 0x00af9b000000ffff /* __KERNEL_CS */
13124 + .quad 0x00cf93000000ffff /* __KERNEL_DS */
13125 + .quad 0x00cffb000000ffff /* __USER32_CS */
13126 + .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
13127 + .quad 0x00affb000000ffff /* __USER_CS */
13128 +
13129 +#ifdef CONFIG_PAX_KERNEXEC
13130 + .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
13131 +#else
13132 + .quad 0x0 /* unused */
13133 +#endif
13134 +
13135 + .quad 0,0 /* TSS */
13136 + .quad 0,0 /* LDT */
13137 + .quad 0,0,0 /* three TLS descriptors */
13138 + .quad 0x0000f40000000000 /* node/CPU stored in limit */
13139 + /* asm/segment.h:GDT_ENTRIES must match this */
13140 +
13141 + /* zero the remaining page */
13142 + .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
13143 + .endr
13144 +
13145 .align 16
13146 .globl early_gdt_descr
13147 early_gdt_descr:
13148 .word GDT_ENTRIES*8-1
13149 early_gdt_descr_base:
13150 - .quad INIT_PER_CPU_VAR(gdt_page)
13151 + .quad cpu_gdt_table
13152
13153 ENTRY(phys_base)
13154 /* This must match the first entry in level2_kernel_pgt */
13155 .quad 0x0000000000000000
13156
13157 #include "../../x86/xen/xen-head.S"
13158 -
13159 - .section .bss, "aw", @nobits
13160 +
13161 + .section .rodata,"a",@progbits
13162 .align L1_CACHE_BYTES
13163 ENTRY(idt_table)
13164 - .skip IDT_ENTRIES * 16
13165 + .fill 512,8,0
13166
13167 __PAGE_ALIGNED_BSS
13168 .align PAGE_SIZE
13169 diff -urNp linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c
13170 --- linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c 2011-05-19 00:06:34.000000000 -0400
13171 +++ linux-2.6.39.4/arch/x86/kernel/i386_ksyms_32.c 2011-08-05 19:44:33.000000000 -0400
13172 @@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
13173 EXPORT_SYMBOL(cmpxchg8b_emu);
13174 #endif
13175
13176 +EXPORT_SYMBOL_GPL(cpu_gdt_table);
13177 +
13178 /* Networking helper routines. */
13179 EXPORT_SYMBOL(csum_partial_copy_generic);
13180 +EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
13181 +EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
13182
13183 EXPORT_SYMBOL(__get_user_1);
13184 EXPORT_SYMBOL(__get_user_2);
13185 @@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
13186
13187 EXPORT_SYMBOL(csum_partial);
13188 EXPORT_SYMBOL(empty_zero_page);
13189 +
13190 +#ifdef CONFIG_PAX_KERNEXEC
13191 +EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
13192 +#endif
13193 diff -urNp linux-2.6.39.4/arch/x86/kernel/i8259.c linux-2.6.39.4/arch/x86/kernel/i8259.c
13194 --- linux-2.6.39.4/arch/x86/kernel/i8259.c 2011-05-19 00:06:34.000000000 -0400
13195 +++ linux-2.6.39.4/arch/x86/kernel/i8259.c 2011-08-05 19:44:33.000000000 -0400
13196 @@ -210,7 +210,7 @@ spurious_8259A_irq:
13197 "spurious 8259A interrupt: IRQ%d.\n", irq);
13198 spurious_irq_mask |= irqmask;
13199 }
13200 - atomic_inc(&irq_err_count);
13201 + atomic_inc_unchecked(&irq_err_count);
13202 /*
13203 * Theoretically we do not have to handle this IRQ,
13204 * but in Linux this does not cause problems and is
13205 diff -urNp linux-2.6.39.4/arch/x86/kernel/init_task.c linux-2.6.39.4/arch/x86/kernel/init_task.c
13206 --- linux-2.6.39.4/arch/x86/kernel/init_task.c 2011-05-19 00:06:34.000000000 -0400
13207 +++ linux-2.6.39.4/arch/x86/kernel/init_task.c 2011-08-05 19:44:33.000000000 -0400
13208 @@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
13209 * way process stacks are handled. This is done by having a special
13210 * "init_task" linker map entry..
13211 */
13212 -union thread_union init_thread_union __init_task_data =
13213 - { INIT_THREAD_INFO(init_task) };
13214 +union thread_union init_thread_union __init_task_data;
13215
13216 /*
13217 * Initial task structure.
13218 @@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
13219 * section. Since TSS's are completely CPU-local, we want them
13220 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
13221 */
13222 -DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
13223 -
13224 +struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
13225 +EXPORT_SYMBOL(init_tss);
13226 diff -urNp linux-2.6.39.4/arch/x86/kernel/ioport.c linux-2.6.39.4/arch/x86/kernel/ioport.c
13227 --- linux-2.6.39.4/arch/x86/kernel/ioport.c 2011-05-19 00:06:34.000000000 -0400
13228 +++ linux-2.6.39.4/arch/x86/kernel/ioport.c 2011-08-05 19:44:33.000000000 -0400
13229 @@ -6,6 +6,7 @@
13230 #include <linux/sched.h>
13231 #include <linux/kernel.h>
13232 #include <linux/capability.h>
13233 +#include <linux/security.h>
13234 #include <linux/errno.h>
13235 #include <linux/types.h>
13236 #include <linux/ioport.h>
13237 @@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long
13238
13239 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
13240 return -EINVAL;
13241 +#ifdef CONFIG_GRKERNSEC_IO
13242 + if (turn_on && grsec_disable_privio) {
13243 + gr_handle_ioperm();
13244 + return -EPERM;
13245 + }
13246 +#endif
13247 if (turn_on && !capable(CAP_SYS_RAWIO))
13248 return -EPERM;
13249
13250 @@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long
13251 * because the ->io_bitmap_max value must match the bitmap
13252 * contents:
13253 */
13254 - tss = &per_cpu(init_tss, get_cpu());
13255 + tss = init_tss + get_cpu();
13256
13257 if (turn_on)
13258 bitmap_clear(t->io_bitmap_ptr, from, num);
13259 @@ -102,6 +109,12 @@ long sys_iopl(unsigned int level, struct
13260 return -EINVAL;
13261 /* Trying to gain more privileges? */
13262 if (level > old) {
13263 +#ifdef CONFIG_GRKERNSEC_IO
13264 + if (grsec_disable_privio) {
13265 + gr_handle_iopl();
13266 + return -EPERM;
13267 + }
13268 +#endif
13269 if (!capable(CAP_SYS_RAWIO))
13270 return -EPERM;
13271 }
13272 diff -urNp linux-2.6.39.4/arch/x86/kernel/irq_32.c linux-2.6.39.4/arch/x86/kernel/irq_32.c
13273 --- linux-2.6.39.4/arch/x86/kernel/irq_32.c 2011-05-19 00:06:34.000000000 -0400
13274 +++ linux-2.6.39.4/arch/x86/kernel/irq_32.c 2011-08-05 19:44:33.000000000 -0400
13275 @@ -36,7 +36,7 @@ static int check_stack_overflow(void)
13276 __asm__ __volatile__("andl %%esp,%0" :
13277 "=r" (sp) : "0" (THREAD_SIZE - 1));
13278
13279 - return sp < (sizeof(struct thread_info) + STACK_WARN);
13280 + return sp < STACK_WARN;
13281 }
13282
13283 static void print_stack_overflow(void)
13284 @@ -54,8 +54,8 @@ static inline void print_stack_overflow(
13285 * per-CPU IRQ handling contexts (thread information and stack)
13286 */
13287 union irq_ctx {
13288 - struct thread_info tinfo;
13289 - u32 stack[THREAD_SIZE/sizeof(u32)];
13290 + unsigned long previous_esp;
13291 + u32 stack[THREAD_SIZE/sizeof(u32)];
13292 } __attribute__((aligned(THREAD_SIZE)));
13293
13294 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
13295 @@ -75,10 +75,9 @@ static void call_on_stack(void *func, vo
13296 static inline int
13297 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
13298 {
13299 - union irq_ctx *curctx, *irqctx;
13300 + union irq_ctx *irqctx;
13301 u32 *isp, arg1, arg2;
13302
13303 - curctx = (union irq_ctx *) current_thread_info();
13304 irqctx = __this_cpu_read(hardirq_ctx);
13305
13306 /*
13307 @@ -87,21 +86,16 @@ execute_on_irq_stack(int overflow, struc
13308 * handler) we can't do that and just have to keep using the
13309 * current stack (which is the irq stack already after all)
13310 */
13311 - if (unlikely(curctx == irqctx))
13312 + if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
13313 return 0;
13314
13315 /* build the stack frame on the IRQ stack */
13316 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13317 - irqctx->tinfo.task = curctx->tinfo.task;
13318 - irqctx->tinfo.previous_esp = current_stack_pointer;
13319 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13320 + irqctx->previous_esp = current_stack_pointer;
13321
13322 - /*
13323 - * Copy the softirq bits in preempt_count so that the
13324 - * softirq checks work in the hardirq context.
13325 - */
13326 - irqctx->tinfo.preempt_count =
13327 - (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
13328 - (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
13329 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13330 + __set_fs(MAKE_MM_SEG(0));
13331 +#endif
13332
13333 if (unlikely(overflow))
13334 call_on_stack(print_stack_overflow, isp);
13335 @@ -113,6 +107,11 @@ execute_on_irq_stack(int overflow, struc
13336 : "0" (irq), "1" (desc), "2" (isp),
13337 "D" (desc->handle_irq)
13338 : "memory", "cc", "ecx");
13339 +
13340 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13341 + __set_fs(current_thread_info()->addr_limit);
13342 +#endif
13343 +
13344 return 1;
13345 }
13346
13347 @@ -121,29 +120,11 @@ execute_on_irq_stack(int overflow, struc
13348 */
13349 void __cpuinit irq_ctx_init(int cpu)
13350 {
13351 - union irq_ctx *irqctx;
13352 -
13353 if (per_cpu(hardirq_ctx, cpu))
13354 return;
13355
13356 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13357 - THREAD_FLAGS,
13358 - THREAD_ORDER));
13359 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13360 - irqctx->tinfo.cpu = cpu;
13361 - irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
13362 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13363 -
13364 - per_cpu(hardirq_ctx, cpu) = irqctx;
13365 -
13366 - irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
13367 - THREAD_FLAGS,
13368 - THREAD_ORDER));
13369 - memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
13370 - irqctx->tinfo.cpu = cpu;
13371 - irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
13372 -
13373 - per_cpu(softirq_ctx, cpu) = irqctx;
13374 + per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13375 + per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREAD_FLAGS, THREAD_ORDER));
13376
13377 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
13378 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
13379 @@ -152,7 +133,6 @@ void __cpuinit irq_ctx_init(int cpu)
13380 asmlinkage void do_softirq(void)
13381 {
13382 unsigned long flags;
13383 - struct thread_info *curctx;
13384 union irq_ctx *irqctx;
13385 u32 *isp;
13386
13387 @@ -162,15 +142,22 @@ asmlinkage void do_softirq(void)
13388 local_irq_save(flags);
13389
13390 if (local_softirq_pending()) {
13391 - curctx = current_thread_info();
13392 irqctx = __this_cpu_read(softirq_ctx);
13393 - irqctx->tinfo.task = curctx->task;
13394 - irqctx->tinfo.previous_esp = current_stack_pointer;
13395 + irqctx->previous_esp = current_stack_pointer;
13396
13397 /* build the stack frame on the softirq stack */
13398 - isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
13399 + isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
13400 +
13401 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13402 + __set_fs(MAKE_MM_SEG(0));
13403 +#endif
13404
13405 call_on_stack(__do_softirq, isp);
13406 +
13407 +#ifdef CONFIG_PAX_MEMORY_UDEREF
13408 + __set_fs(current_thread_info()->addr_limit);
13409 +#endif
13410 +
13411 /*
13412 * Shouldn't happen, we returned above if in_interrupt():
13413 */
13414 diff -urNp linux-2.6.39.4/arch/x86/kernel/irq.c linux-2.6.39.4/arch/x86/kernel/irq.c
13415 --- linux-2.6.39.4/arch/x86/kernel/irq.c 2011-05-19 00:06:34.000000000 -0400
13416 +++ linux-2.6.39.4/arch/x86/kernel/irq.c 2011-08-05 19:44:33.000000000 -0400
13417 @@ -17,7 +17,7 @@
13418 #include <asm/mce.h>
13419 #include <asm/hw_irq.h>
13420
13421 -atomic_t irq_err_count;
13422 +atomic_unchecked_t irq_err_count;
13423
13424 /* Function pointer for generic interrupt vector handling */
13425 void (*x86_platform_ipi_callback)(void) = NULL;
13426 @@ -116,9 +116,9 @@ int arch_show_interrupts(struct seq_file
13427 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
13428 seq_printf(p, " Machine check polls\n");
13429 #endif
13430 - seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
13431 + seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
13432 #if defined(CONFIG_X86_IO_APIC)
13433 - seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
13434 + seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
13435 #endif
13436 return 0;
13437 }
13438 @@ -158,10 +158,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
13439
13440 u64 arch_irq_stat(void)
13441 {
13442 - u64 sum = atomic_read(&irq_err_count);
13443 + u64 sum = atomic_read_unchecked(&irq_err_count);
13444
13445 #ifdef CONFIG_X86_IO_APIC
13446 - sum += atomic_read(&irq_mis_count);
13447 + sum += atomic_read_unchecked(&irq_mis_count);
13448 #endif
13449 return sum;
13450 }
13451 diff -urNp linux-2.6.39.4/arch/x86/kernel/kgdb.c linux-2.6.39.4/arch/x86/kernel/kgdb.c
13452 --- linux-2.6.39.4/arch/x86/kernel/kgdb.c 2011-05-19 00:06:34.000000000 -0400
13453 +++ linux-2.6.39.4/arch/x86/kernel/kgdb.c 2011-08-05 20:34:06.000000000 -0400
13454 @@ -124,11 +124,11 @@ char *dbg_get_reg(int regno, void *mem,
13455 #ifdef CONFIG_X86_32
13456 switch (regno) {
13457 case GDB_SS:
13458 - if (!user_mode_vm(regs))
13459 + if (!user_mode(regs))
13460 *(unsigned long *)mem = __KERNEL_DS;
13461 break;
13462 case GDB_SP:
13463 - if (!user_mode_vm(regs))
13464 + if (!user_mode(regs))
13465 *(unsigned long *)mem = kernel_stack_pointer(regs);
13466 break;
13467 case GDB_GS:
13468 @@ -473,12 +473,12 @@ int kgdb_arch_handle_exception(int e_vec
13469 case 'k':
13470 /* clear the trace bit */
13471 linux_regs->flags &= ~X86_EFLAGS_TF;
13472 - atomic_set(&kgdb_cpu_doing_single_step, -1);
13473 + atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
13474
13475 /* set the trace bit if we're stepping */
13476 if (remcomInBuffer[0] == 's') {
13477 linux_regs->flags |= X86_EFLAGS_TF;
13478 - atomic_set(&kgdb_cpu_doing_single_step,
13479 + atomic_set_unchecked(&kgdb_cpu_doing_single_step,
13480 raw_smp_processor_id());
13481 }
13482
13483 @@ -534,7 +534,7 @@ static int __kgdb_notify(struct die_args
13484 return NOTIFY_DONE;
13485
13486 case DIE_DEBUG:
13487 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
13488 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
13489 if (user_mode(regs))
13490 return single_step_cont(regs, args);
13491 break;
13492 diff -urNp linux-2.6.39.4/arch/x86/kernel/kprobes.c linux-2.6.39.4/arch/x86/kernel/kprobes.c
13493 --- linux-2.6.39.4/arch/x86/kernel/kprobes.c 2011-05-19 00:06:34.000000000 -0400
13494 +++ linux-2.6.39.4/arch/x86/kernel/kprobes.c 2011-08-05 19:44:33.000000000 -0400
13495 @@ -115,8 +115,11 @@ static void __kprobes __synthesize_relat
13496 } __attribute__((packed)) *insn;
13497
13498 insn = (struct __arch_relative_insn *)from;
13499 +
13500 + pax_open_kernel();
13501 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
13502 insn->op = op;
13503 + pax_close_kernel();
13504 }
13505
13506 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
13507 @@ -153,7 +156,7 @@ static int __kprobes can_boost(kprobe_op
13508 kprobe_opcode_t opcode;
13509 kprobe_opcode_t *orig_opcodes = opcodes;
13510
13511 - if (search_exception_tables((unsigned long)opcodes))
13512 + if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
13513 return 0; /* Page fault may occur on this address. */
13514
13515 retry:
13516 @@ -314,7 +317,9 @@ static int __kprobes __copy_instruction(
13517 }
13518 }
13519 insn_get_length(&insn);
13520 + pax_open_kernel();
13521 memcpy(dest, insn.kaddr, insn.length);
13522 + pax_close_kernel();
13523
13524 #ifdef CONFIG_X86_64
13525 if (insn_rip_relative(&insn)) {
13526 @@ -338,7 +343,9 @@ static int __kprobes __copy_instruction(
13527 (u8 *) dest;
13528 BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */
13529 disp = (u8 *) dest + insn_offset_displacement(&insn);
13530 + pax_open_kernel();
13531 *(s32 *) disp = (s32) newdisp;
13532 + pax_close_kernel();
13533 }
13534 #endif
13535 return insn.length;
13536 @@ -352,12 +359,12 @@ static void __kprobes arch_copy_kprobe(s
13537 */
13538 __copy_instruction(p->ainsn.insn, p->addr, 0);
13539
13540 - if (can_boost(p->addr))
13541 + if (can_boost(ktla_ktva(p->addr)))
13542 p->ainsn.boostable = 0;
13543 else
13544 p->ainsn.boostable = -1;
13545
13546 - p->opcode = *p->addr;
13547 + p->opcode = *(ktla_ktva(p->addr));
13548 }
13549
13550 int __kprobes arch_prepare_kprobe(struct kprobe *p)
13551 @@ -474,7 +481,7 @@ static void __kprobes setup_singlestep(s
13552 * nor set current_kprobe, because it doesn't use single
13553 * stepping.
13554 */
13555 - regs->ip = (unsigned long)p->ainsn.insn;
13556 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13557 preempt_enable_no_resched();
13558 return;
13559 }
13560 @@ -493,7 +500,7 @@ static void __kprobes setup_singlestep(s
13561 if (p->opcode == BREAKPOINT_INSTRUCTION)
13562 regs->ip = (unsigned long)p->addr;
13563 else
13564 - regs->ip = (unsigned long)p->ainsn.insn;
13565 + regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
13566 }
13567
13568 /*
13569 @@ -572,7 +579,7 @@ static int __kprobes kprobe_handler(stru
13570 setup_singlestep(p, regs, kcb, 0);
13571 return 1;
13572 }
13573 - } else if (*addr != BREAKPOINT_INSTRUCTION) {
13574 + } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
13575 /*
13576 * The breakpoint instruction was removed right
13577 * after we hit it. Another cpu has removed
13578 @@ -817,7 +824,7 @@ static void __kprobes resume_execution(s
13579 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
13580 {
13581 unsigned long *tos = stack_addr(regs);
13582 - unsigned long copy_ip = (unsigned long)p->ainsn.insn;
13583 + unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
13584 unsigned long orig_ip = (unsigned long)p->addr;
13585 kprobe_opcode_t *insn = p->ainsn.insn;
13586
13587 @@ -999,7 +1006,7 @@ int __kprobes kprobe_exceptions_notify(s
13588 struct die_args *args = data;
13589 int ret = NOTIFY_DONE;
13590
13591 - if (args->regs && user_mode_vm(args->regs))
13592 + if (args->regs && user_mode(args->regs))
13593 return ret;
13594
13595 switch (val) {
13596 @@ -1381,7 +1388,7 @@ int __kprobes arch_prepare_optimized_kpr
13597 * Verify if the address gap is in 2GB range, because this uses
13598 * a relative jump.
13599 */
13600 - rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
13601 + rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
13602 if (abs(rel) > 0x7fffffff)
13603 return -ERANGE;
13604
13605 @@ -1402,11 +1409,11 @@ int __kprobes arch_prepare_optimized_kpr
13606 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
13607
13608 /* Set probe function call */
13609 - synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
13610 + synthesize_relcall(buf + TMPL_CALL_IDX, ktla_ktva(optimized_callback));
13611
13612 /* Set returning jmp instruction at the tail of out-of-line buffer */
13613 synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
13614 - (u8 *)op->kp.addr + op->optinsn.size);
13615 + (u8 *)ktla_ktva(op->kp.addr) + op->optinsn.size);
13616
13617 flush_icache_range((unsigned long) buf,
13618 (unsigned long) buf + TMPL_END_IDX +
13619 @@ -1428,7 +1435,7 @@ static void __kprobes setup_optimize_kpr
13620 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
13621
13622 /* Backup instructions which will be replaced by jump address */
13623 - memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
13624 + memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
13625 RELATIVE_ADDR_SIZE);
13626
13627 insn_buf[0] = RELATIVEJUMP_OPCODE;
13628 diff -urNp linux-2.6.39.4/arch/x86/kernel/ldt.c linux-2.6.39.4/arch/x86/kernel/ldt.c
13629 --- linux-2.6.39.4/arch/x86/kernel/ldt.c 2011-05-19 00:06:34.000000000 -0400
13630 +++ linux-2.6.39.4/arch/x86/kernel/ldt.c 2011-08-05 19:44:33.000000000 -0400
13631 @@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i
13632 if (reload) {
13633 #ifdef CONFIG_SMP
13634 preempt_disable();
13635 - load_LDT(pc);
13636 + load_LDT_nolock(pc);
13637 if (!cpumask_equal(mm_cpumask(current->mm),
13638 cpumask_of(smp_processor_id())))
13639 smp_call_function(flush_ldt, current->mm, 1);
13640 preempt_enable();
13641 #else
13642 - load_LDT(pc);
13643 + load_LDT_nolock(pc);
13644 #endif
13645 }
13646 if (oldsize) {
13647 @@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t
13648 return err;
13649
13650 for (i = 0; i < old->size; i++)
13651 - write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
13652 + write_ldt_entry(new->ldt, i, old->ldt + i);
13653 return 0;
13654 }
13655
13656 @@ -116,6 +116,24 @@ int init_new_context(struct task_struct
13657 retval = copy_ldt(&mm->context, &old_mm->context);
13658 mutex_unlock(&old_mm->context.lock);
13659 }
13660 +
13661 + if (tsk == current) {
13662 + mm->context.vdso = 0;
13663 +
13664 +#ifdef CONFIG_X86_32
13665 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13666 + mm->context.user_cs_base = 0UL;
13667 + mm->context.user_cs_limit = ~0UL;
13668 +
13669 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13670 + cpus_clear(mm->context.cpu_user_cs_mask);
13671 +#endif
13672 +
13673 +#endif
13674 +#endif
13675 +
13676 + }
13677 +
13678 return retval;
13679 }
13680
13681 @@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u
13682 }
13683 }
13684
13685 +#ifdef CONFIG_PAX_SEGMEXEC
13686 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
13687 + error = -EINVAL;
13688 + goto out_unlock;
13689 + }
13690 +#endif
13691 +
13692 fill_ldt(&ldt, &ldt_info);
13693 if (oldmode)
13694 ldt.avl = 0;
13695 diff -urNp linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c
13696 --- linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c 2011-05-19 00:06:34.000000000 -0400
13697 +++ linux-2.6.39.4/arch/x86/kernel/machine_kexec_32.c 2011-08-05 19:44:33.000000000 -0400
13698 @@ -27,7 +27,7 @@
13699 #include <asm/cacheflush.h>
13700 #include <asm/debugreg.h>
13701
13702 -static void set_idt(void *newidt, __u16 limit)
13703 +static void set_idt(struct desc_struct *newidt, __u16 limit)
13704 {
13705 struct desc_ptr curidt;
13706
13707 @@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16
13708 }
13709
13710
13711 -static void set_gdt(void *newgdt, __u16 limit)
13712 +static void set_gdt(struct desc_struct *newgdt, __u16 limit)
13713 {
13714 struct desc_ptr curgdt;
13715
13716 @@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
13717 }
13718
13719 control_page = page_address(image->control_code_page);
13720 - memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
13721 + memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
13722
13723 relocate_kernel_ptr = control_page;
13724 page_list[PA_CONTROL_PAGE] = __pa(control_page);
13725 diff -urNp linux-2.6.39.4/arch/x86/kernel/microcode_intel.c linux-2.6.39.4/arch/x86/kernel/microcode_intel.c
13726 --- linux-2.6.39.4/arch/x86/kernel/microcode_intel.c 2011-05-19 00:06:34.000000000 -0400
13727 +++ linux-2.6.39.4/arch/x86/kernel/microcode_intel.c 2011-08-05 20:34:06.000000000 -0400
13728 @@ -440,13 +440,13 @@ static enum ucode_state request_microcod
13729
13730 static int get_ucode_user(void *to, const void *from, size_t n)
13731 {
13732 - return copy_from_user(to, from, n);
13733 + return copy_from_user(to, (__force const void __user *)from, n);
13734 }
13735
13736 static enum ucode_state
13737 request_microcode_user(int cpu, const void __user *buf, size_t size)
13738 {
13739 - return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
13740 + return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
13741 }
13742
13743 static void microcode_fini_cpu(int cpu)
13744 diff -urNp linux-2.6.39.4/arch/x86/kernel/module.c linux-2.6.39.4/arch/x86/kernel/module.c
13745 --- linux-2.6.39.4/arch/x86/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
13746 +++ linux-2.6.39.4/arch/x86/kernel/module.c 2011-08-05 19:44:33.000000000 -0400
13747 @@ -35,21 +35,66 @@
13748 #define DEBUGP(fmt...)
13749 #endif
13750
13751 -void *module_alloc(unsigned long size)
13752 +static inline void *__module_alloc(unsigned long size, pgprot_t prot)
13753 {
13754 if (PAGE_ALIGN(size) > MODULES_LEN)
13755 return NULL;
13756 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
13757 - GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
13758 + GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
13759 -1, __builtin_return_address(0));
13760 }
13761
13762 +void *module_alloc(unsigned long size)
13763 +{
13764 +
13765 +#ifdef CONFIG_PAX_KERNEXEC
13766 + return __module_alloc(size, PAGE_KERNEL);
13767 +#else
13768 + return __module_alloc(size, PAGE_KERNEL_EXEC);
13769 +#endif
13770 +
13771 +}
13772 +
13773 /* Free memory returned from module_alloc */
13774 void module_free(struct module *mod, void *module_region)
13775 {
13776 vfree(module_region);
13777 }
13778
13779 +#ifdef CONFIG_PAX_KERNEXEC
13780 +#ifdef CONFIG_X86_32
13781 +void *module_alloc_exec(unsigned long size)
13782 +{
13783 + struct vm_struct *area;
13784 +
13785 + if (size == 0)
13786 + return NULL;
13787 +
13788 + area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
13789 + return area ? area->addr : NULL;
13790 +}
13791 +EXPORT_SYMBOL(module_alloc_exec);
13792 +
13793 +void module_free_exec(struct module *mod, void *module_region)
13794 +{
13795 + vunmap(module_region);
13796 +}
13797 +EXPORT_SYMBOL(module_free_exec);
13798 +#else
13799 +void module_free_exec(struct module *mod, void *module_region)
13800 +{
13801 + module_free(mod, module_region);
13802 +}
13803 +EXPORT_SYMBOL(module_free_exec);
13804 +
13805 +void *module_alloc_exec(unsigned long size)
13806 +{
13807 + return __module_alloc(size, PAGE_KERNEL_RX);
13808 +}
13809 +EXPORT_SYMBOL(module_alloc_exec);
13810 +#endif
13811 +#endif
13812 +
13813 /* We don't need anything special. */
13814 int module_frob_arch_sections(Elf_Ehdr *hdr,
13815 Elf_Shdr *sechdrs,
13816 @@ -69,14 +114,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13817 unsigned int i;
13818 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
13819 Elf32_Sym *sym;
13820 - uint32_t *location;
13821 + uint32_t *plocation, location;
13822
13823 DEBUGP("Applying relocate section %u to %u\n", relsec,
13824 sechdrs[relsec].sh_info);
13825 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
13826 /* This is where to make the change */
13827 - location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
13828 - + rel[i].r_offset;
13829 + plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
13830 + location = (uint32_t)plocation;
13831 + if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
13832 + plocation = ktla_ktva((void *)plocation);
13833 /* This is the symbol it is referring to. Note that all
13834 undefined symbols have been resolved. */
13835 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
13836 @@ -85,11 +132,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
13837 switch (ELF32_R_TYPE(rel[i].r_info)) {
13838 case R_386_32:
13839 /* We add the value into the location given */
13840 - *location += sym->st_value;
13841 + pax_open_kernel();
13842 + *plocation += sym->st_value;
13843 + pax_close_kernel();
13844 break;
13845 case R_386_PC32:
13846 /* Add the value, subtract its postition */
13847 - *location += sym->st_value - (uint32_t)location;
13848 + pax_open_kernel();
13849 + *plocation += sym->st_value - location;
13850 + pax_close_kernel();
13851 break;
13852 default:
13853 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
13854 @@ -145,21 +196,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
13855 case R_X86_64_NONE:
13856 break;
13857 case R_X86_64_64:
13858 + pax_open_kernel();
13859 *(u64 *)loc = val;
13860 + pax_close_kernel();
13861 break;
13862 case R_X86_64_32:
13863 + pax_open_kernel();
13864 *(u32 *)loc = val;
13865 + pax_close_kernel();
13866 if (val != *(u32 *)loc)
13867 goto overflow;
13868 break;
13869 case R_X86_64_32S:
13870 + pax_open_kernel();
13871 *(s32 *)loc = val;
13872 + pax_close_kernel();
13873 if ((s64)val != *(s32 *)loc)
13874 goto overflow;
13875 break;
13876 case R_X86_64_PC32:
13877 val -= (u64)loc;
13878 + pax_open_kernel();
13879 *(u32 *)loc = val;
13880 + pax_close_kernel();
13881 +
13882 #if 0
13883 if ((s64)val != *(s32 *)loc)
13884 goto overflow;
13885 diff -urNp linux-2.6.39.4/arch/x86/kernel/paravirt.c linux-2.6.39.4/arch/x86/kernel/paravirt.c
13886 --- linux-2.6.39.4/arch/x86/kernel/paravirt.c 2011-05-19 00:06:34.000000000 -0400
13887 +++ linux-2.6.39.4/arch/x86/kernel/paravirt.c 2011-08-05 19:44:33.000000000 -0400
13888 @@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
13889 {
13890 return x;
13891 }
13892 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13893 +PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
13894 +#endif
13895
13896 void __init default_banner(void)
13897 {
13898 @@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
13899 * corresponding structure. */
13900 static void *get_call_destination(u8 type)
13901 {
13902 - struct paravirt_patch_template tmpl = {
13903 + const struct paravirt_patch_template tmpl = {
13904 .pv_init_ops = pv_init_ops,
13905 .pv_time_ops = pv_time_ops,
13906 .pv_cpu_ops = pv_cpu_ops,
13907 @@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
13908 .pv_lock_ops = pv_lock_ops,
13909 #endif
13910 };
13911 +
13912 + pax_track_stack();
13913 +
13914 return *((void **)&tmpl + type);
13915 }
13916
13917 @@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
13918 if (opfunc == NULL)
13919 /* If there's no function, patch it with a ud2a (BUG) */
13920 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
13921 - else if (opfunc == _paravirt_nop)
13922 + else if (opfunc == (void *)_paravirt_nop)
13923 /* If the operation is a nop, then nop the callsite */
13924 ret = paravirt_patch_nop();
13925
13926 /* identity functions just return their single argument */
13927 - else if (opfunc == _paravirt_ident_32)
13928 + else if (opfunc == (void *)_paravirt_ident_32)
13929 ret = paravirt_patch_ident_32(insnbuf, len);
13930 - else if (opfunc == _paravirt_ident_64)
13931 + else if (opfunc == (void *)_paravirt_ident_64)
13932 ret = paravirt_patch_ident_64(insnbuf, len);
13933 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
13934 + else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
13935 + ret = paravirt_patch_ident_64(insnbuf, len);
13936 +#endif
13937
13938 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
13939 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
13940 @@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
13941 if (insn_len > len || start == NULL)
13942 insn_len = len;
13943 else
13944 - memcpy(insnbuf, start, insn_len);
13945 + memcpy(insnbuf, ktla_ktva(start), insn_len);
13946
13947 return insn_len;
13948 }
13949 @@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
13950 preempt_enable();
13951 }
13952
13953 -struct pv_info pv_info = {
13954 +struct pv_info pv_info __read_only = {
13955 .name = "bare hardware",
13956 .paravirt_enabled = 0,
13957 .kernel_rpl = 0,
13958 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
13959 };
13960
13961 -struct pv_init_ops pv_init_ops = {
13962 +struct pv_init_ops pv_init_ops __read_only = {
13963 .patch = native_patch,
13964 };
13965
13966 -struct pv_time_ops pv_time_ops = {
13967 +struct pv_time_ops pv_time_ops __read_only = {
13968 .sched_clock = native_sched_clock,
13969 };
13970
13971 -struct pv_irq_ops pv_irq_ops = {
13972 +struct pv_irq_ops pv_irq_ops __read_only = {
13973 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
13974 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
13975 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
13976 @@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
13977 #endif
13978 };
13979
13980 -struct pv_cpu_ops pv_cpu_ops = {
13981 +struct pv_cpu_ops pv_cpu_ops __read_only = {
13982 .cpuid = native_cpuid,
13983 .get_debugreg = native_get_debugreg,
13984 .set_debugreg = native_set_debugreg,
13985 @@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
13986 .end_context_switch = paravirt_nop,
13987 };
13988
13989 -struct pv_apic_ops pv_apic_ops = {
13990 +struct pv_apic_ops pv_apic_ops __read_only = {
13991 #ifdef CONFIG_X86_LOCAL_APIC
13992 .startup_ipi_hook = paravirt_nop,
13993 #endif
13994 };
13995
13996 -#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
13997 +#ifdef CONFIG_X86_32
13998 +#ifdef CONFIG_X86_PAE
13999 +/* 64-bit pagetable entries */
14000 +#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
14001 +#else
14002 /* 32-bit pagetable entries */
14003 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
14004 +#endif
14005 #else
14006 /* 64-bit pagetable entries */
14007 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
14008 #endif
14009
14010 -struct pv_mmu_ops pv_mmu_ops = {
14011 +struct pv_mmu_ops pv_mmu_ops __read_only = {
14012
14013 .read_cr2 = native_read_cr2,
14014 .write_cr2 = native_write_cr2,
14015 @@ -465,6 +480,12 @@ struct pv_mmu_ops pv_mmu_ops = {
14016 },
14017
14018 .set_fixmap = native_set_fixmap,
14019 +
14020 +#ifdef CONFIG_PAX_KERNEXEC
14021 + .pax_open_kernel = native_pax_open_kernel,
14022 + .pax_close_kernel = native_pax_close_kernel,
14023 +#endif
14024 +
14025 };
14026
14027 EXPORT_SYMBOL_GPL(pv_time_ops);
14028 diff -urNp linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c
14029 --- linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c 2011-05-19 00:06:34.000000000 -0400
14030 +++ linux-2.6.39.4/arch/x86/kernel/paravirt-spinlocks.c 2011-08-05 19:44:33.000000000 -0400
14031 @@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t
14032 arch_spin_lock(lock);
14033 }
14034
14035 -struct pv_lock_ops pv_lock_ops = {
14036 +struct pv_lock_ops pv_lock_ops __read_only = {
14037 #ifdef CONFIG_SMP
14038 .spin_is_locked = __ticket_spin_is_locked,
14039 .spin_is_contended = __ticket_spin_is_contended,
14040 diff -urNp linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c
14041 --- linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c 2011-05-19 00:06:34.000000000 -0400
14042 +++ linux-2.6.39.4/arch/x86/kernel/pci-iommu_table.c 2011-08-05 19:44:35.000000000 -0400
14043 @@ -2,7 +2,7 @@
14044 #include <asm/iommu_table.h>
14045 #include <linux/string.h>
14046 #include <linux/kallsyms.h>
14047 -
14048 +#include <linux/sched.h>
14049
14050 #define DEBUG 1
14051
14052 @@ -53,6 +53,8 @@ void __init check_iommu_entries(struct i
14053 char sym_p[KSYM_SYMBOL_LEN];
14054 char sym_q[KSYM_SYMBOL_LEN];
14055
14056 + pax_track_stack();
14057 +
14058 /* Simple cyclic dependency checker. */
14059 for (p = start; p < finish; p++) {
14060 q = find_dependents_of(start, finish, p);
14061 diff -urNp linux-2.6.39.4/arch/x86/kernel/process_32.c linux-2.6.39.4/arch/x86/kernel/process_32.c
14062 --- linux-2.6.39.4/arch/x86/kernel/process_32.c 2011-06-25 12:55:22.000000000 -0400
14063 +++ linux-2.6.39.4/arch/x86/kernel/process_32.c 2011-08-05 19:44:35.000000000 -0400
14064 @@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as
14065 unsigned long thread_saved_pc(struct task_struct *tsk)
14066 {
14067 return ((unsigned long *)tsk->thread.sp)[3];
14068 +//XXX return tsk->thread.eip;
14069 }
14070
14071 #ifndef CONFIG_SMP
14072 @@ -126,15 +127,14 @@ void __show_regs(struct pt_regs *regs, i
14073 unsigned long sp;
14074 unsigned short ss, gs;
14075
14076 - if (user_mode_vm(regs)) {
14077 + if (user_mode(regs)) {
14078 sp = regs->sp;
14079 ss = regs->ss & 0xffff;
14080 - gs = get_user_gs(regs);
14081 } else {
14082 sp = kernel_stack_pointer(regs);
14083 savesegment(ss, ss);
14084 - savesegment(gs, gs);
14085 }
14086 + gs = get_user_gs(regs);
14087
14088 show_regs_common();
14089
14090 @@ -196,13 +196,14 @@ int copy_thread(unsigned long clone_flag
14091 struct task_struct *tsk;
14092 int err;
14093
14094 - childregs = task_pt_regs(p);
14095 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
14096 *childregs = *regs;
14097 childregs->ax = 0;
14098 childregs->sp = sp;
14099
14100 p->thread.sp = (unsigned long) childregs;
14101 p->thread.sp0 = (unsigned long) (childregs+1);
14102 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14103
14104 p->thread.ip = (unsigned long) ret_from_fork;
14105
14106 @@ -292,7 +293,7 @@ __switch_to(struct task_struct *prev_p,
14107 struct thread_struct *prev = &prev_p->thread,
14108 *next = &next_p->thread;
14109 int cpu = smp_processor_id();
14110 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
14111 + struct tss_struct *tss = init_tss + cpu;
14112 bool preload_fpu;
14113
14114 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
14115 @@ -327,6 +328,10 @@ __switch_to(struct task_struct *prev_p,
14116 */
14117 lazy_save_gs(prev->gs);
14118
14119 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14120 + __set_fs(task_thread_info(next_p)->addr_limit);
14121 +#endif
14122 +
14123 /*
14124 * Load the per-thread Thread-Local Storage descriptor.
14125 */
14126 @@ -362,6 +367,9 @@ __switch_to(struct task_struct *prev_p,
14127 */
14128 arch_end_context_switch(next_p);
14129
14130 + percpu_write(current_task, next_p);
14131 + percpu_write(current_tinfo, &next_p->tinfo);
14132 +
14133 if (preload_fpu)
14134 __math_state_restore();
14135
14136 @@ -371,8 +379,6 @@ __switch_to(struct task_struct *prev_p,
14137 if (prev->gs | next->gs)
14138 lazy_load_gs(next->gs);
14139
14140 - percpu_write(current_task, next_p);
14141 -
14142 return prev_p;
14143 }
14144
14145 @@ -402,4 +408,3 @@ unsigned long get_wchan(struct task_stru
14146 } while (count++ < 16);
14147 return 0;
14148 }
14149 -
14150 diff -urNp linux-2.6.39.4/arch/x86/kernel/process_64.c linux-2.6.39.4/arch/x86/kernel/process_64.c
14151 --- linux-2.6.39.4/arch/x86/kernel/process_64.c 2011-06-25 12:55:22.000000000 -0400
14152 +++ linux-2.6.39.4/arch/x86/kernel/process_64.c 2011-08-05 19:44:35.000000000 -0400
14153 @@ -87,7 +87,7 @@ static void __exit_idle(void)
14154 void exit_idle(void)
14155 {
14156 /* idle loop has pid 0 */
14157 - if (current->pid)
14158 + if (task_pid_nr(current))
14159 return;
14160 __exit_idle();
14161 }
14162 @@ -260,8 +260,7 @@ int copy_thread(unsigned long clone_flag
14163 struct pt_regs *childregs;
14164 struct task_struct *me = current;
14165
14166 - childregs = ((struct pt_regs *)
14167 - (THREAD_SIZE + task_stack_page(p))) - 1;
14168 + childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
14169 *childregs = *regs;
14170
14171 childregs->ax = 0;
14172 @@ -273,6 +272,7 @@ int copy_thread(unsigned long clone_flag
14173 p->thread.sp = (unsigned long) childregs;
14174 p->thread.sp0 = (unsigned long) (childregs+1);
14175 p->thread.usersp = me->thread.usersp;
14176 + p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
14177
14178 set_tsk_thread_flag(p, TIF_FORK);
14179
14180 @@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p,
14181 struct thread_struct *prev = &prev_p->thread;
14182 struct thread_struct *next = &next_p->thread;
14183 int cpu = smp_processor_id();
14184 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
14185 + struct tss_struct *tss = init_tss + cpu;
14186 unsigned fsindex, gsindex;
14187 bool preload_fpu;
14188
14189 @@ -471,10 +471,9 @@ __switch_to(struct task_struct *prev_p,
14190 prev->usersp = percpu_read(old_rsp);
14191 percpu_write(old_rsp, next->usersp);
14192 percpu_write(current_task, next_p);
14193 + percpu_write(current_tinfo, &next_p->tinfo);
14194
14195 - percpu_write(kernel_stack,
14196 - (unsigned long)task_stack_page(next_p) +
14197 - THREAD_SIZE - KERNEL_STACK_OFFSET);
14198 + percpu_write(kernel_stack, next->sp0);
14199
14200 /*
14201 * Now maybe reload the debug registers and handle I/O bitmaps
14202 @@ -536,12 +535,11 @@ unsigned long get_wchan(struct task_stru
14203 if (!p || p == current || p->state == TASK_RUNNING)
14204 return 0;
14205 stack = (unsigned long)task_stack_page(p);
14206 - if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
14207 + if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
14208 return 0;
14209 fp = *(u64 *)(p->thread.sp);
14210 do {
14211 - if (fp < (unsigned long)stack ||
14212 - fp >= (unsigned long)stack+THREAD_SIZE)
14213 + if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
14214 return 0;
14215 ip = *(u64 *)(fp+8);
14216 if (!in_sched_functions(ip))
14217 diff -urNp linux-2.6.39.4/arch/x86/kernel/process.c linux-2.6.39.4/arch/x86/kernel/process.c
14218 --- linux-2.6.39.4/arch/x86/kernel/process.c 2011-05-19 00:06:34.000000000 -0400
14219 +++ linux-2.6.39.4/arch/x86/kernel/process.c 2011-08-05 19:44:35.000000000 -0400
14220 @@ -48,16 +48,33 @@ void free_thread_xstate(struct task_stru
14221
14222 void free_thread_info(struct thread_info *ti)
14223 {
14224 - free_thread_xstate(ti->task);
14225 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
14226 }
14227
14228 +static struct kmem_cache *task_struct_cachep;
14229 +
14230 void arch_task_cache_init(void)
14231 {
14232 - task_xstate_cachep =
14233 - kmem_cache_create("task_xstate", xstate_size,
14234 + /* create a slab on which task_structs can be allocated */
14235 + task_struct_cachep =
14236 + kmem_cache_create("task_struct", sizeof(struct task_struct),
14237 + ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
14238 +
14239 + task_xstate_cachep =
14240 + kmem_cache_create("task_xstate", xstate_size,
14241 __alignof__(union thread_xstate),
14242 - SLAB_PANIC | SLAB_NOTRACK, NULL);
14243 + SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
14244 +}
14245 +
14246 +struct task_struct *alloc_task_struct_node(int node)
14247 +{
14248 + return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
14249 +}
14250 +
14251 +void free_task_struct(struct task_struct *task)
14252 +{
14253 + free_thread_xstate(task);
14254 + kmem_cache_free(task_struct_cachep, task);
14255 }
14256
14257 /*
14258 @@ -70,7 +87,7 @@ void exit_thread(void)
14259 unsigned long *bp = t->io_bitmap_ptr;
14260
14261 if (bp) {
14262 - struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
14263 + struct tss_struct *tss = init_tss + get_cpu();
14264
14265 t->io_bitmap_ptr = NULL;
14266 clear_thread_flag(TIF_IO_BITMAP);
14267 @@ -106,7 +123,7 @@ void show_regs_common(void)
14268
14269 printk(KERN_CONT "\n");
14270 printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s",
14271 - current->pid, current->comm, print_tainted(),
14272 + task_pid_nr(current), current->comm, print_tainted(),
14273 init_utsname()->release,
14274 (int)strcspn(init_utsname()->version, " "),
14275 init_utsname()->version);
14276 @@ -120,6 +137,9 @@ void flush_thread(void)
14277 {
14278 struct task_struct *tsk = current;
14279
14280 +#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
14281 + loadsegment(gs, 0);
14282 +#endif
14283 flush_ptrace_hw_breakpoint(tsk);
14284 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
14285 /*
14286 @@ -282,10 +302,10 @@ int kernel_thread(int (*fn)(void *), voi
14287 regs.di = (unsigned long) arg;
14288
14289 #ifdef CONFIG_X86_32
14290 - regs.ds = __USER_DS;
14291 - regs.es = __USER_DS;
14292 + regs.ds = __KERNEL_DS;
14293 + regs.es = __KERNEL_DS;
14294 regs.fs = __KERNEL_PERCPU;
14295 - regs.gs = __KERNEL_STACK_CANARY;
14296 + savesegment(gs, regs.gs);
14297 #else
14298 regs.ss = __KERNEL_DS;
14299 #endif
14300 @@ -401,7 +421,7 @@ void default_idle(void)
14301 EXPORT_SYMBOL(default_idle);
14302 #endif
14303
14304 -void stop_this_cpu(void *dummy)
14305 +__noreturn void stop_this_cpu(void *dummy)
14306 {
14307 local_irq_disable();
14308 /*
14309 @@ -665,16 +685,34 @@ static int __init idle_setup(char *str)
14310 }
14311 early_param("idle", idle_setup);
14312
14313 -unsigned long arch_align_stack(unsigned long sp)
14314 +#ifdef CONFIG_PAX_RANDKSTACK
14315 +asmlinkage void pax_randomize_kstack(void)
14316 {
14317 - if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
14318 - sp -= get_random_int() % 8192;
14319 - return sp & ~0xf;
14320 -}
14321 + struct thread_struct *thread = &current->thread;
14322 + unsigned long time;
14323
14324 -unsigned long arch_randomize_brk(struct mm_struct *mm)
14325 -{
14326 - unsigned long range_end = mm->brk + 0x02000000;
14327 - return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
14328 -}
14329 + if (!randomize_va_space)
14330 + return;
14331 +
14332 + rdtscl(time);
14333 +
14334 + /* P4 seems to return a 0 LSB, ignore it */
14335 +#ifdef CONFIG_MPENTIUM4
14336 + time &= 0x3EUL;
14337 + time <<= 2;
14338 +#elif defined(CONFIG_X86_64)
14339 + time &= 0xFUL;
14340 + time <<= 4;
14341 +#else
14342 + time &= 0x1FUL;
14343 + time <<= 3;
14344 +#endif
14345 +
14346 + thread->sp0 ^= time;
14347 + load_sp0(init_tss + smp_processor_id(), thread);
14348
14349 +#ifdef CONFIG_X86_64
14350 + percpu_write(kernel_stack, thread->sp0);
14351 +#endif
14352 +}
14353 +#endif
14354 diff -urNp linux-2.6.39.4/arch/x86/kernel/ptrace.c linux-2.6.39.4/arch/x86/kernel/ptrace.c
14355 --- linux-2.6.39.4/arch/x86/kernel/ptrace.c 2011-05-19 00:06:34.000000000 -0400
14356 +++ linux-2.6.39.4/arch/x86/kernel/ptrace.c 2011-08-05 19:44:35.000000000 -0400
14357 @@ -821,7 +821,7 @@ long arch_ptrace(struct task_struct *chi
14358 unsigned long addr, unsigned long data)
14359 {
14360 int ret;
14361 - unsigned long __user *datap = (unsigned long __user *)data;
14362 + unsigned long __user *datap = (__force unsigned long __user *)data;
14363
14364 switch (request) {
14365 /* read the word at location addr in the USER area. */
14366 @@ -906,14 +906,14 @@ long arch_ptrace(struct task_struct *chi
14367 if ((int) addr < 0)
14368 return -EIO;
14369 ret = do_get_thread_area(child, addr,
14370 - (struct user_desc __user *)data);
14371 + (__force struct user_desc __user *) data);
14372 break;
14373
14374 case PTRACE_SET_THREAD_AREA:
14375 if ((int) addr < 0)
14376 return -EIO;
14377 ret = do_set_thread_area(child, addr,
14378 - (struct user_desc __user *)data, 0);
14379 + (__force struct user_desc __user *) data, 0);
14380 break;
14381 #endif
14382
14383 @@ -1330,7 +1330,7 @@ static void fill_sigtrap_info(struct tas
14384 memset(info, 0, sizeof(*info));
14385 info->si_signo = SIGTRAP;
14386 info->si_code = si_code;
14387 - info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
14388 + info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
14389 }
14390
14391 void user_single_step_siginfo(struct task_struct *tsk,
14392 @@ -1363,7 +1363,7 @@ void send_sigtrap(struct task_struct *ts
14393 * We must return the syscall number to actually look up in the table.
14394 * This can be -1L to skip running any syscall at all.
14395 */
14396 -asmregparm long syscall_trace_enter(struct pt_regs *regs)
14397 +long syscall_trace_enter(struct pt_regs *regs)
14398 {
14399 long ret = 0;
14400
14401 @@ -1408,7 +1408,7 @@ asmregparm long syscall_trace_enter(stru
14402 return ret ?: regs->orig_ax;
14403 }
14404
14405 -asmregparm void syscall_trace_leave(struct pt_regs *regs)
14406 +void syscall_trace_leave(struct pt_regs *regs)
14407 {
14408 bool step;
14409
14410 diff -urNp linux-2.6.39.4/arch/x86/kernel/pvclock.c linux-2.6.39.4/arch/x86/kernel/pvclock.c
14411 --- linux-2.6.39.4/arch/x86/kernel/pvclock.c 2011-05-19 00:06:34.000000000 -0400
14412 +++ linux-2.6.39.4/arch/x86/kernel/pvclock.c 2011-08-05 19:44:35.000000000 -0400
14413 @@ -81,11 +81,11 @@ unsigned long pvclock_tsc_khz(struct pvc
14414 return pv_tsc_khz;
14415 }
14416
14417 -static atomic64_t last_value = ATOMIC64_INIT(0);
14418 +static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
14419
14420 void pvclock_resume(void)
14421 {
14422 - atomic64_set(&last_value, 0);
14423 + atomic64_set_unchecked(&last_value, 0);
14424 }
14425
14426 cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
14427 @@ -121,11 +121,11 @@ cycle_t pvclock_clocksource_read(struct
14428 * updating at the same time, and one of them could be slightly behind,
14429 * making the assumption that last_value always go forward fail to hold.
14430 */
14431 - last = atomic64_read(&last_value);
14432 + last = atomic64_read_unchecked(&last_value);
14433 do {
14434 if (ret < last)
14435 return last;
14436 - last = atomic64_cmpxchg(&last_value, last, ret);
14437 + last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
14438 } while (unlikely(last != ret));
14439
14440 return ret;
14441 diff -urNp linux-2.6.39.4/arch/x86/kernel/reboot.c linux-2.6.39.4/arch/x86/kernel/reboot.c
14442 --- linux-2.6.39.4/arch/x86/kernel/reboot.c 2011-08-05 21:11:51.000000000 -0400
14443 +++ linux-2.6.39.4/arch/x86/kernel/reboot.c 2011-08-05 21:12:20.000000000 -0400
14444 @@ -35,7 +35,7 @@ void (*pm_power_off)(void);
14445 EXPORT_SYMBOL(pm_power_off);
14446
14447 static const struct desc_ptr no_idt = {};
14448 -static int reboot_mode;
14449 +static unsigned short reboot_mode;
14450 enum reboot_type reboot_type = BOOT_KBD;
14451 int reboot_force;
14452
14453 @@ -307,13 +307,17 @@ core_initcall(reboot_init);
14454 extern const unsigned char machine_real_restart_asm[];
14455 extern const u64 machine_real_restart_gdt[3];
14456
14457 -void machine_real_restart(unsigned int type)
14458 +__noreturn void machine_real_restart(unsigned int type)
14459 {
14460 void *restart_va;
14461 unsigned long restart_pa;
14462 - void (*restart_lowmem)(unsigned int);
14463 + void (* __noreturn restart_lowmem)(unsigned int);
14464 u64 *lowmem_gdt;
14465
14466 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14467 + struct desc_struct *gdt;
14468 +#endif
14469 +
14470 local_irq_disable();
14471
14472 /* Write zero to CMOS register number 0x0f, which the BIOS POST
14473 @@ -339,14 +343,14 @@ void machine_real_restart(unsigned int t
14474 boot)". This seems like a fairly standard thing that gets set by
14475 REBOOT.COM programs, and the previous reset routine did this
14476 too. */
14477 - *((unsigned short *)0x472) = reboot_mode;
14478 + *(unsigned short *)(__va(0x472)) = reboot_mode;
14479
14480 /* Patch the GDT in the low memory trampoline */
14481 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
14482
14483 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
14484 restart_pa = virt_to_phys(restart_va);
14485 - restart_lowmem = (void (*)(unsigned int))restart_pa;
14486 + restart_lowmem = (void *)restart_pa;
14487
14488 /* GDT[0]: GDT self-pointer */
14489 lowmem_gdt[0] =
14490 @@ -357,7 +361,33 @@ void machine_real_restart(unsigned int t
14491 GDT_ENTRY(0x009b, restart_pa, 0xffff);
14492
14493 /* Jump to the identity-mapped low memory code */
14494 +
14495 +#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
14496 + gdt = get_cpu_gdt_table(smp_processor_id());
14497 + pax_open_kernel();
14498 +#ifdef CONFIG_PAX_MEMORY_UDEREF
14499 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
14500 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
14501 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
14502 +#endif
14503 +#ifdef CONFIG_PAX_KERNEXEC
14504 + gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
14505 + gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
14506 + gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
14507 + gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
14508 + gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
14509 + gdt[GDT_ENTRY_KERNEL_CS].g = 1;
14510 +#endif
14511 + pax_close_kernel();
14512 +#endif
14513 +
14514 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
14515 + asm volatile("push %0; push %1; lret\n" : : "i" (__KERNEL_CS), "rm" (restart_lowmem), "a" (type));
14516 + unreachable();
14517 +#else
14518 restart_lowmem(type);
14519 +#endif
14520 +
14521 }
14522 #ifdef CONFIG_APM_MODULE
14523 EXPORT_SYMBOL(machine_real_restart);
14524 @@ -486,7 +516,7 @@ void __attribute__((weak)) mach_reboot_f
14525 {
14526 }
14527
14528 -static void native_machine_emergency_restart(void)
14529 +__noreturn static void native_machine_emergency_restart(void)
14530 {
14531 int i;
14532
14533 @@ -601,13 +631,13 @@ void native_machine_shutdown(void)
14534 #endif
14535 }
14536
14537 -static void __machine_emergency_restart(int emergency)
14538 +static __noreturn void __machine_emergency_restart(int emergency)
14539 {
14540 reboot_emergency = emergency;
14541 machine_ops.emergency_restart();
14542 }
14543
14544 -static void native_machine_restart(char *__unused)
14545 +static __noreturn void native_machine_restart(char *__unused)
14546 {
14547 printk("machine restart\n");
14548
14549 @@ -616,7 +646,7 @@ static void native_machine_restart(char
14550 __machine_emergency_restart(0);
14551 }
14552
14553 -static void native_machine_halt(void)
14554 +static __noreturn void native_machine_halt(void)
14555 {
14556 /* stop other cpus and apics */
14557 machine_shutdown();
14558 @@ -627,7 +657,7 @@ static void native_machine_halt(void)
14559 stop_this_cpu(NULL);
14560 }
14561
14562 -static void native_machine_power_off(void)
14563 +__noreturn static void native_machine_power_off(void)
14564 {
14565 if (pm_power_off) {
14566 if (!reboot_force)
14567 @@ -636,6 +666,7 @@ static void native_machine_power_off(voi
14568 }
14569 /* a fallback in case there is no PM info available */
14570 tboot_shutdown(TB_SHUTDOWN_HALT);
14571 + unreachable();
14572 }
14573
14574 struct machine_ops machine_ops = {
14575 diff -urNp linux-2.6.39.4/arch/x86/kernel/setup.c linux-2.6.39.4/arch/x86/kernel/setup.c
14576 --- linux-2.6.39.4/arch/x86/kernel/setup.c 2011-06-25 12:55:22.000000000 -0400
14577 +++ linux-2.6.39.4/arch/x86/kernel/setup.c 2011-08-05 19:44:35.000000000 -0400
14578 @@ -650,7 +650,7 @@ static void __init trim_bios_range(void)
14579 * area (640->1Mb) as ram even though it is not.
14580 * take them out.
14581 */
14582 - e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
14583 + e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
14584 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
14585 }
14586
14587 @@ -775,14 +775,14 @@ void __init setup_arch(char **cmdline_p)
14588
14589 if (!boot_params.hdr.root_flags)
14590 root_mountflags &= ~MS_RDONLY;
14591 - init_mm.start_code = (unsigned long) _text;
14592 - init_mm.end_code = (unsigned long) _etext;
14593 + init_mm.start_code = ktla_ktva((unsigned long) _text);
14594 + init_mm.end_code = ktla_ktva((unsigned long) _etext);
14595 init_mm.end_data = (unsigned long) _edata;
14596 init_mm.brk = _brk_end;
14597
14598 - code_resource.start = virt_to_phys(_text);
14599 - code_resource.end = virt_to_phys(_etext)-1;
14600 - data_resource.start = virt_to_phys(_etext);
14601 + code_resource.start = virt_to_phys(ktla_ktva(_text));
14602 + code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
14603 + data_resource.start = virt_to_phys(_sdata);
14604 data_resource.end = virt_to_phys(_edata)-1;
14605 bss_resource.start = virt_to_phys(&__bss_start);
14606 bss_resource.end = virt_to_phys(&__bss_stop)-1;
14607 diff -urNp linux-2.6.39.4/arch/x86/kernel/setup_percpu.c linux-2.6.39.4/arch/x86/kernel/setup_percpu.c
14608 --- linux-2.6.39.4/arch/x86/kernel/setup_percpu.c 2011-05-19 00:06:34.000000000 -0400
14609 +++ linux-2.6.39.4/arch/x86/kernel/setup_percpu.c 2011-08-05 19:44:35.000000000 -0400
14610 @@ -21,19 +21,17 @@
14611 #include <asm/cpu.h>
14612 #include <asm/stackprotector.h>
14613
14614 -DEFINE_PER_CPU(int, cpu_number);
14615 +#ifdef CONFIG_SMP
14616 +DEFINE_PER_CPU(unsigned int, cpu_number);
14617 EXPORT_PER_CPU_SYMBOL(cpu_number);
14618 +#endif
14619
14620 -#ifdef CONFIG_X86_64
14621 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
14622 -#else
14623 -#define BOOT_PERCPU_OFFSET 0
14624 -#endif
14625
14626 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
14627 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
14628
14629 -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
14630 +unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
14631 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
14632 };
14633 EXPORT_SYMBOL(__per_cpu_offset);
14634 @@ -155,10 +153,10 @@ static inline void setup_percpu_segment(
14635 {
14636 #ifdef CONFIG_X86_32
14637 struct desc_struct gdt;
14638 + unsigned long base = per_cpu_offset(cpu);
14639
14640 - pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
14641 - 0x2 | DESCTYPE_S, 0x8);
14642 - gdt.s = 1;
14643 + pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
14644 + 0x83 | DESCTYPE_S, 0xC);
14645 write_gdt_entry(get_cpu_gdt_table(cpu),
14646 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
14647 #endif
14648 @@ -207,6 +205,11 @@ void __init setup_per_cpu_areas(void)
14649 /* alrighty, percpu areas up and running */
14650 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
14651 for_each_possible_cpu(cpu) {
14652 +#ifdef CONFIG_CC_STACKPROTECTOR
14653 +#ifdef CONFIG_X86_32
14654 + unsigned long canary = per_cpu(stack_canary.canary, cpu);
14655 +#endif
14656 +#endif
14657 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
14658 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
14659 per_cpu(cpu_number, cpu) = cpu;
14660 @@ -247,6 +250,12 @@ void __init setup_per_cpu_areas(void)
14661 */
14662 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
14663 #endif
14664 +#ifdef CONFIG_CC_STACKPROTECTOR
14665 +#ifdef CONFIG_X86_32
14666 + if (!cpu)
14667 + per_cpu(stack_canary.canary, cpu) = canary;
14668 +#endif
14669 +#endif
14670 /*
14671 * Up to this point, the boot CPU has been using .init.data
14672 * area. Reload any changed state for the boot CPU.
14673 diff -urNp linux-2.6.39.4/arch/x86/kernel/signal.c linux-2.6.39.4/arch/x86/kernel/signal.c
14674 --- linux-2.6.39.4/arch/x86/kernel/signal.c 2011-05-19 00:06:34.000000000 -0400
14675 +++ linux-2.6.39.4/arch/x86/kernel/signal.c 2011-08-05 19:44:35.000000000 -0400
14676 @@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi
14677 * Align the stack pointer according to the i386 ABI,
14678 * i.e. so that on function entry ((sp + 4) & 15) == 0.
14679 */
14680 - sp = ((sp + 4) & -16ul) - 4;
14681 + sp = ((sp - 12) & -16ul) - 4;
14682 #else /* !CONFIG_X86_32 */
14683 sp = round_down(sp, 16) - 8;
14684 #endif
14685 @@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str
14686 * Return an always-bogus address instead so we will die with SIGSEGV.
14687 */
14688 if (onsigstack && !likely(on_sig_stack(sp)))
14689 - return (void __user *)-1L;
14690 + return (__force void __user *)-1L;
14691
14692 /* save i387 state */
14693 if (used_math() && save_i387_xstate(*fpstate) < 0)
14694 - return (void __user *)-1L;
14695 + return (__force void __user *)-1L;
14696
14697 return (void __user *)sp;
14698 }
14699 @@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio
14700 }
14701
14702 if (current->mm->context.vdso)
14703 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14704 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
14705 else
14706 - restorer = &frame->retcode;
14707 + restorer = (void __user *)&frame->retcode;
14708 if (ka->sa.sa_flags & SA_RESTORER)
14709 restorer = ka->sa.sa_restorer;
14710
14711 @@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio
14712 * reasons and because gdb uses it as a signature to notice
14713 * signal handler stack frames.
14714 */
14715 - err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
14716 + err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
14717
14718 if (err)
14719 return -EFAULT;
14720 @@ -378,7 +378,10 @@ static int __setup_rt_frame(int sig, str
14721 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
14722
14723 /* Set up to return from userspace. */
14724 - restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14725 + if (current->mm->context.vdso)
14726 + restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
14727 + else
14728 + restorer = (void __user *)&frame->retcode;
14729 if (ka->sa.sa_flags & SA_RESTORER)
14730 restorer = ka->sa.sa_restorer;
14731 put_user_ex(restorer, &frame->pretcode);
14732 @@ -390,7 +393,7 @@ static int __setup_rt_frame(int sig, str
14733 * reasons and because gdb uses it as a signature to notice
14734 * signal handler stack frames.
14735 */
14736 - put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
14737 + put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
14738 } put_user_catch(err);
14739
14740 if (err)
14741 @@ -773,6 +776,8 @@ static void do_signal(struct pt_regs *re
14742 int signr;
14743 sigset_t *oldset;
14744
14745 + pax_track_stack();
14746 +
14747 /*
14748 * We want the common case to go fast, which is why we may in certain
14749 * cases get here from kernel mode. Just return without doing anything
14750 @@ -780,7 +785,7 @@ static void do_signal(struct pt_regs *re
14751 * X86_32: vm86 regs switched out by assembly code before reaching
14752 * here, so testing against kernel CS suffices.
14753 */
14754 - if (!user_mode(regs))
14755 + if (!user_mode_novm(regs))
14756 return;
14757
14758 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
14759 diff -urNp linux-2.6.39.4/arch/x86/kernel/smpboot.c linux-2.6.39.4/arch/x86/kernel/smpboot.c
14760 --- linux-2.6.39.4/arch/x86/kernel/smpboot.c 2011-06-25 12:55:22.000000000 -0400
14761 +++ linux-2.6.39.4/arch/x86/kernel/smpboot.c 2011-08-05 19:44:35.000000000 -0400
14762 @@ -709,17 +709,20 @@ static int __cpuinit do_boot_cpu(int api
14763 set_idle_for_cpu(cpu, c_idle.idle);
14764 do_rest:
14765 per_cpu(current_task, cpu) = c_idle.idle;
14766 + per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
14767 #ifdef CONFIG_X86_32
14768 /* Stack for startup_32 can be just as for start_secondary onwards */
14769 irq_ctx_init(cpu);
14770 #else
14771 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
14772 initial_gs = per_cpu_offset(cpu);
14773 - per_cpu(kernel_stack, cpu) =
14774 - (unsigned long)task_stack_page(c_idle.idle) -
14775 - KERNEL_STACK_OFFSET + THREAD_SIZE;
14776 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
14777 #endif
14778 +
14779 + pax_open_kernel();
14780 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
14781 + pax_close_kernel();
14782 +
14783 initial_code = (unsigned long)start_secondary;
14784 stack_start = c_idle.idle->thread.sp;
14785
14786 @@ -861,6 +864,12 @@ int __cpuinit native_cpu_up(unsigned int
14787
14788 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
14789
14790 +#ifdef CONFIG_PAX_PER_CPU_PGD
14791 + clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
14792 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14793 + KERNEL_PGD_PTRS);
14794 +#endif
14795 +
14796 err = do_boot_cpu(apicid, cpu);
14797 if (err) {
14798 pr_debug("do_boot_cpu failed %d\n", err);
14799 diff -urNp linux-2.6.39.4/arch/x86/kernel/step.c linux-2.6.39.4/arch/x86/kernel/step.c
14800 --- linux-2.6.39.4/arch/x86/kernel/step.c 2011-05-19 00:06:34.000000000 -0400
14801 +++ linux-2.6.39.4/arch/x86/kernel/step.c 2011-08-05 19:44:35.000000000 -0400
14802 @@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
14803 struct desc_struct *desc;
14804 unsigned long base;
14805
14806 - seg &= ~7UL;
14807 + seg >>= 3;
14808
14809 mutex_lock(&child->mm->context.lock);
14810 - if (unlikely((seg >> 3) >= child->mm->context.size))
14811 + if (unlikely(seg >= child->mm->context.size))
14812 addr = -1L; /* bogus selector, access would fault */
14813 else {
14814 desc = child->mm->context.ldt + seg;
14815 @@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
14816 addr += base;
14817 }
14818 mutex_unlock(&child->mm->context.lock);
14819 - }
14820 + } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
14821 + addr = ktla_ktva(addr);
14822
14823 return addr;
14824 }
14825 @@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
14826 unsigned char opcode[15];
14827 unsigned long addr = convert_ip_to_linear(child, regs);
14828
14829 + if (addr == -EINVAL)
14830 + return 0;
14831 +
14832 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
14833 for (i = 0; i < copied; i++) {
14834 switch (opcode[i]) {
14835 @@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
14836
14837 #ifdef CONFIG_X86_64
14838 case 0x40 ... 0x4f:
14839 - if (regs->cs != __USER_CS)
14840 + if ((regs->cs & 0xffff) != __USER_CS)
14841 /* 32-bit mode: register increment */
14842 return 0;
14843 /* 64-bit mode: REX prefix */
14844 diff -urNp linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S
14845 --- linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S 2011-05-19 00:06:34.000000000 -0400
14846 +++ linux-2.6.39.4/arch/x86/kernel/syscall_table_32.S 2011-08-05 19:44:35.000000000 -0400
14847 @@ -1,3 +1,4 @@
14848 +.section .rodata,"a",@progbits
14849 ENTRY(sys_call_table)
14850 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
14851 .long sys_exit
14852 diff -urNp linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c
14853 --- linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c 2011-05-19 00:06:34.000000000 -0400
14854 +++ linux-2.6.39.4/arch/x86/kernel/sys_i386_32.c 2011-08-05 19:44:35.000000000 -0400
14855 @@ -24,17 +24,224 @@
14856
14857 #include <asm/syscalls.h>
14858
14859 -/*
14860 - * Do a system call from kernel instead of calling sys_execve so we
14861 - * end up with proper pt_regs.
14862 - */
14863 -int kernel_execve(const char *filename,
14864 - const char *const argv[],
14865 - const char *const envp[])
14866 +int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
14867 {
14868 - long __res;
14869 - asm volatile ("int $0x80"
14870 - : "=a" (__res)
14871 - : "0" (__NR_execve), "b" (filename), "c" (argv), "d" (envp) : "memory");
14872 - return __res;
14873 + unsigned long pax_task_size = TASK_SIZE;
14874 +
14875 +#ifdef CONFIG_PAX_SEGMEXEC
14876 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
14877 + pax_task_size = SEGMEXEC_TASK_SIZE;
14878 +#endif
14879 +
14880 + if (len > pax_task_size || addr > pax_task_size - len)
14881 + return -EINVAL;
14882 +
14883 + return 0;
14884 +}
14885 +
14886 +unsigned long
14887 +arch_get_unmapped_area(struct file *filp, unsigned long addr,
14888 + unsigned long len, unsigned long pgoff, unsigned long flags)
14889 +{
14890 + struct mm_struct *mm = current->mm;
14891 + struct vm_area_struct *vma;
14892 + unsigned long start_addr, pax_task_size = TASK_SIZE;
14893 +
14894 +#ifdef CONFIG_PAX_SEGMEXEC
14895 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
14896 + pax_task_size = SEGMEXEC_TASK_SIZE;
14897 +#endif
14898 +
14899 + pax_task_size -= PAGE_SIZE;
14900 +
14901 + if (len > pax_task_size)
14902 + return -ENOMEM;
14903 +
14904 + if (flags & MAP_FIXED)
14905 + return addr;
14906 +
14907 +#ifdef CONFIG_PAX_RANDMMAP
14908 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
14909 +#endif
14910 +
14911 + if (addr) {
14912 + addr = PAGE_ALIGN(addr);
14913 + if (pax_task_size - len >= addr) {
14914 + vma = find_vma(mm, addr);
14915 + if (check_heap_stack_gap(vma, addr, len))
14916 + return addr;
14917 + }
14918 + }
14919 + if (len > mm->cached_hole_size) {
14920 + start_addr = addr = mm->free_area_cache;
14921 + } else {
14922 + start_addr = addr = mm->mmap_base;
14923 + mm->cached_hole_size = 0;
14924 + }
14925 +
14926 +#ifdef CONFIG_PAX_PAGEEXEC
14927 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
14928 + start_addr = 0x00110000UL;
14929 +
14930 +#ifdef CONFIG_PAX_RANDMMAP
14931 + if (mm->pax_flags & MF_PAX_RANDMMAP)
14932 + start_addr += mm->delta_mmap & 0x03FFF000UL;
14933 +#endif
14934 +
14935 + if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
14936 + start_addr = addr = mm->mmap_base;
14937 + else
14938 + addr = start_addr;
14939 + }
14940 +#endif
14941 +
14942 +full_search:
14943 + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
14944 + /* At this point: (!vma || addr < vma->vm_end). */
14945 + if (pax_task_size - len < addr) {
14946 + /*
14947 + * Start a new search - just in case we missed
14948 + * some holes.
14949 + */
14950 + if (start_addr != mm->mmap_base) {
14951 + start_addr = addr = mm->mmap_base;
14952 + mm->cached_hole_size = 0;
14953 + goto full_search;
14954 + }
14955 + return -ENOMEM;
14956 + }
14957 + if (check_heap_stack_gap(vma, addr, len))
14958 + break;
14959 + if (addr + mm->cached_hole_size < vma->vm_start)
14960 + mm->cached_hole_size = vma->vm_start - addr;
14961 + addr = vma->vm_end;
14962 + if (mm->start_brk <= addr && addr < mm->mmap_base) {
14963 + start_addr = addr = mm->mmap_base;
14964 + mm->cached_hole_size = 0;
14965 + goto full_search;
14966 + }
14967 + }
14968 +
14969 + /*
14970 + * Remember the place where we stopped the search:
14971 + */
14972 + mm->free_area_cache = addr + len;
14973 + return addr;
14974 +}
14975 +
14976 +unsigned long
14977 +arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
14978 + const unsigned long len, const unsigned long pgoff,
14979 + const unsigned long flags)
14980 +{
14981 + struct vm_area_struct *vma;
14982 + struct mm_struct *mm = current->mm;
14983 + unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
14984 +
14985 +#ifdef CONFIG_PAX_SEGMEXEC
14986 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
14987 + pax_task_size = SEGMEXEC_TASK_SIZE;
14988 +#endif
14989 +
14990 + pax_task_size -= PAGE_SIZE;
14991 +
14992 + /* requested length too big for entire address space */
14993 + if (len > pax_task_size)
14994 + return -ENOMEM;
14995 +
14996 + if (flags & MAP_FIXED)
14997 + return addr;
14998 +
14999 +#ifdef CONFIG_PAX_PAGEEXEC
15000 + if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
15001 + goto bottomup;
15002 +#endif
15003 +
15004 +#ifdef CONFIG_PAX_RANDMMAP
15005 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15006 +#endif
15007 +
15008 + /* requesting a specific address */
15009 + if (addr) {
15010 + addr = PAGE_ALIGN(addr);
15011 + if (pax_task_size - len >= addr) {
15012 + vma = find_vma(mm, addr);
15013 + if (check_heap_stack_gap(vma, addr, len))
15014 + return addr;
15015 + }
15016 + }
15017 +
15018 + /* check if free_area_cache is useful for us */
15019 + if (len <= mm->cached_hole_size) {
15020 + mm->cached_hole_size = 0;
15021 + mm->free_area_cache = mm->mmap_base;
15022 + }
15023 +
15024 + /* either no address requested or can't fit in requested address hole */
15025 + addr = mm->free_area_cache;
15026 +
15027 + /* make sure it can fit in the remaining address space */
15028 + if (addr > len) {
15029 + vma = find_vma(mm, addr-len);
15030 + if (check_heap_stack_gap(vma, addr - len, len))
15031 + /* remember the address as a hint for next time */
15032 + return (mm->free_area_cache = addr-len);
15033 + }
15034 +
15035 + if (mm->mmap_base < len)
15036 + goto bottomup;
15037 +
15038 + addr = mm->mmap_base-len;
15039 +
15040 + do {
15041 + /*
15042 + * Lookup failure means no vma is above this address,
15043 + * else if new region fits below vma->vm_start,
15044 + * return with success:
15045 + */
15046 + vma = find_vma(mm, addr);
15047 + if (check_heap_stack_gap(vma, addr, len))
15048 + /* remember the address as a hint for next time */
15049 + return (mm->free_area_cache = addr);
15050 +
15051 + /* remember the largest hole we saw so far */
15052 + if (addr + mm->cached_hole_size < vma->vm_start)
15053 + mm->cached_hole_size = vma->vm_start - addr;
15054 +
15055 + /* try just below the current vma->vm_start */
15056 + addr = skip_heap_stack_gap(vma, len);
15057 + } while (!IS_ERR_VALUE(addr));
15058 +
15059 +bottomup:
15060 + /*
15061 + * A failed mmap() very likely causes application failure,
15062 + * so fall back to the bottom-up function here. This scenario
15063 + * can happen with large stack limits and large mmap()
15064 + * allocations.
15065 + */
15066 +
15067 +#ifdef CONFIG_PAX_SEGMEXEC
15068 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
15069 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
15070 + else
15071 +#endif
15072 +
15073 + mm->mmap_base = TASK_UNMAPPED_BASE;
15074 +
15075 +#ifdef CONFIG_PAX_RANDMMAP
15076 + if (mm->pax_flags & MF_PAX_RANDMMAP)
15077 + mm->mmap_base += mm->delta_mmap;
15078 +#endif
15079 +
15080 + mm->free_area_cache = mm->mmap_base;
15081 + mm->cached_hole_size = ~0UL;
15082 + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15083 + /*
15084 + * Restore the topdown base:
15085 + */
15086 + mm->mmap_base = base;
15087 + mm->free_area_cache = base;
15088 + mm->cached_hole_size = ~0UL;
15089 +
15090 + return addr;
15091 }
15092 diff -urNp linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c
15093 --- linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c 2011-05-19 00:06:34.000000000 -0400
15094 +++ linux-2.6.39.4/arch/x86/kernel/sys_x86_64.c 2011-08-05 19:44:35.000000000 -0400
15095 @@ -32,8 +32,8 @@ out:
15096 return error;
15097 }
15098
15099 -static void find_start_end(unsigned long flags, unsigned long *begin,
15100 - unsigned long *end)
15101 +static void find_start_end(struct mm_struct *mm, unsigned long flags,
15102 + unsigned long *begin, unsigned long *end)
15103 {
15104 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
15105 unsigned long new_begin;
15106 @@ -52,7 +52,7 @@ static void find_start_end(unsigned long
15107 *begin = new_begin;
15108 }
15109 } else {
15110 - *begin = TASK_UNMAPPED_BASE;
15111 + *begin = mm->mmap_base;
15112 *end = TASK_SIZE;
15113 }
15114 }
15115 @@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
15116 if (flags & MAP_FIXED)
15117 return addr;
15118
15119 - find_start_end(flags, &begin, &end);
15120 + find_start_end(mm, flags, &begin, &end);
15121
15122 if (len > end)
15123 return -ENOMEM;
15124
15125 +#ifdef CONFIG_PAX_RANDMMAP
15126 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15127 +#endif
15128 +
15129 if (addr) {
15130 addr = PAGE_ALIGN(addr);
15131 vma = find_vma(mm, addr);
15132 - if (end - len >= addr &&
15133 - (!vma || addr + len <= vma->vm_start))
15134 + if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
15135 return addr;
15136 }
15137 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
15138 @@ -106,7 +109,7 @@ full_search:
15139 }
15140 return -ENOMEM;
15141 }
15142 - if (!vma || addr + len <= vma->vm_start) {
15143 + if (check_heap_stack_gap(vma, addr, len)) {
15144 /*
15145 * Remember the place where we stopped the search:
15146 */
15147 @@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
15148 {
15149 struct vm_area_struct *vma;
15150 struct mm_struct *mm = current->mm;
15151 - unsigned long addr = addr0;
15152 + unsigned long base = mm->mmap_base, addr = addr0;
15153
15154 /* requested length too big for entire address space */
15155 if (len > TASK_SIZE)
15156 @@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
15157 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
15158 goto bottomup;
15159
15160 +#ifdef CONFIG_PAX_RANDMMAP
15161 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
15162 +#endif
15163 +
15164 /* requesting a specific address */
15165 if (addr) {
15166 addr = PAGE_ALIGN(addr);
15167 - vma = find_vma(mm, addr);
15168 - if (TASK_SIZE - len >= addr &&
15169 - (!vma || addr + len <= vma->vm_start))
15170 - return addr;
15171 + if (TASK_SIZE - len >= addr) {
15172 + vma = find_vma(mm, addr);
15173 + if (check_heap_stack_gap(vma, addr, len))
15174 + return addr;
15175 + }
15176 }
15177
15178 /* check if free_area_cache is useful for us */
15179 @@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
15180 /* make sure it can fit in the remaining address space */
15181 if (addr > len) {
15182 vma = find_vma(mm, addr-len);
15183 - if (!vma || addr <= vma->vm_start)
15184 + if (check_heap_stack_gap(vma, addr - len, len))
15185 /* remember the address as a hint for next time */
15186 return mm->free_area_cache = addr-len;
15187 }
15188 @@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
15189 * return with success:
15190 */
15191 vma = find_vma(mm, addr);
15192 - if (!vma || addr+len <= vma->vm_start)
15193 + if (check_heap_stack_gap(vma, addr, len))
15194 /* remember the address as a hint for next time */
15195 return mm->free_area_cache = addr;
15196
15197 @@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
15198 mm->cached_hole_size = vma->vm_start - addr;
15199
15200 /* try just below the current vma->vm_start */
15201 - addr = vma->vm_start-len;
15202 - } while (len < vma->vm_start);
15203 + addr = skip_heap_stack_gap(vma, len);
15204 + } while (!IS_ERR_VALUE(addr));
15205
15206 bottomup:
15207 /*
15208 @@ -198,13 +206,21 @@ bottomup:
15209 * can happen with large stack limits and large mmap()
15210 * allocations.
15211 */
15212 + mm->mmap_base = TASK_UNMAPPED_BASE;
15213 +
15214 +#ifdef CONFIG_PAX_RANDMMAP
15215 + if (mm->pax_flags & MF_PAX_RANDMMAP)
15216 + mm->mmap_base += mm->delta_mmap;
15217 +#endif
15218 +
15219 + mm->free_area_cache = mm->mmap_base;
15220 mm->cached_hole_size = ~0UL;
15221 - mm->free_area_cache = TASK_UNMAPPED_BASE;
15222 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
15223 /*
15224 * Restore the topdown base:
15225 */
15226 - mm->free_area_cache = mm->mmap_base;
15227 + mm->mmap_base = base;
15228 + mm->free_area_cache = base;
15229 mm->cached_hole_size = ~0UL;
15230
15231 return addr;
15232 diff -urNp linux-2.6.39.4/arch/x86/kernel/tboot.c linux-2.6.39.4/arch/x86/kernel/tboot.c
15233 --- linux-2.6.39.4/arch/x86/kernel/tboot.c 2011-05-19 00:06:34.000000000 -0400
15234 +++ linux-2.6.39.4/arch/x86/kernel/tboot.c 2011-08-05 19:44:35.000000000 -0400
15235 @@ -218,7 +218,7 @@ static int tboot_setup_sleep(void)
15236
15237 void tboot_shutdown(u32 shutdown_type)
15238 {
15239 - void (*shutdown)(void);
15240 + void (* __noreturn shutdown)(void);
15241
15242 if (!tboot_enabled())
15243 return;
15244 @@ -240,7 +240,7 @@ void tboot_shutdown(u32 shutdown_type)
15245
15246 switch_to_tboot_pt();
15247
15248 - shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
15249 + shutdown = (void *)tboot->shutdown_entry;
15250 shutdown();
15251
15252 /* should not reach here */
15253 @@ -297,7 +297,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
15254 tboot_shutdown(acpi_shutdown_map[sleep_state]);
15255 }
15256
15257 -static atomic_t ap_wfs_count;
15258 +static atomic_unchecked_t ap_wfs_count;
15259
15260 static int tboot_wait_for_aps(int num_aps)
15261 {
15262 @@ -321,9 +321,9 @@ static int __cpuinit tboot_cpu_callback(
15263 {
15264 switch (action) {
15265 case CPU_DYING:
15266 - atomic_inc(&ap_wfs_count);
15267 + atomic_inc_unchecked(&ap_wfs_count);
15268 if (num_online_cpus() == 1)
15269 - if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
15270 + if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
15271 return NOTIFY_BAD;
15272 break;
15273 }
15274 @@ -342,7 +342,7 @@ static __init int tboot_late_init(void)
15275
15276 tboot_create_trampoline();
15277
15278 - atomic_set(&ap_wfs_count, 0);
15279 + atomic_set_unchecked(&ap_wfs_count, 0);
15280 register_hotcpu_notifier(&tboot_cpu_notifier);
15281 return 0;
15282 }
15283 diff -urNp linux-2.6.39.4/arch/x86/kernel/time.c linux-2.6.39.4/arch/x86/kernel/time.c
15284 --- linux-2.6.39.4/arch/x86/kernel/time.c 2011-05-19 00:06:34.000000000 -0400
15285 +++ linux-2.6.39.4/arch/x86/kernel/time.c 2011-08-05 19:44:35.000000000 -0400
15286 @@ -22,17 +22,13 @@
15287 #include <asm/hpet.h>
15288 #include <asm/time.h>
15289
15290 -#ifdef CONFIG_X86_64
15291 -volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
15292 -#endif
15293 -
15294 unsigned long profile_pc(struct pt_regs *regs)
15295 {
15296 unsigned long pc = instruction_pointer(regs);
15297
15298 - if (!user_mode_vm(regs) && in_lock_functions(pc)) {
15299 + if (!user_mode(regs) && in_lock_functions(pc)) {
15300 #ifdef CONFIG_FRAME_POINTER
15301 - return *(unsigned long *)(regs->bp + sizeof(long));
15302 + return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
15303 #else
15304 unsigned long *sp =
15305 (unsigned long *)kernel_stack_pointer(regs);
15306 @@ -41,11 +37,17 @@ unsigned long profile_pc(struct pt_regs
15307 * or above a saved flags. Eflags has bits 22-31 zero,
15308 * kernel addresses don't.
15309 */
15310 +
15311 +#ifdef CONFIG_PAX_KERNEXEC
15312 + return ktla_ktva(sp[0]);
15313 +#else
15314 if (sp[0] >> 22)
15315 return sp[0];
15316 if (sp[1] >> 22)
15317 return sp[1];
15318 #endif
15319 +
15320 +#endif
15321 }
15322 return pc;
15323 }
15324 diff -urNp linux-2.6.39.4/arch/x86/kernel/tls.c linux-2.6.39.4/arch/x86/kernel/tls.c
15325 --- linux-2.6.39.4/arch/x86/kernel/tls.c 2011-05-19 00:06:34.000000000 -0400
15326 +++ linux-2.6.39.4/arch/x86/kernel/tls.c 2011-08-05 19:44:35.000000000 -0400
15327 @@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
15328 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
15329 return -EINVAL;
15330
15331 +#ifdef CONFIG_PAX_SEGMEXEC
15332 + if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
15333 + return -EINVAL;
15334 +#endif
15335 +
15336 set_tls_desc(p, idx, &info, 1);
15337
15338 return 0;
15339 diff -urNp linux-2.6.39.4/arch/x86/kernel/trampoline_32.S linux-2.6.39.4/arch/x86/kernel/trampoline_32.S
15340 --- linux-2.6.39.4/arch/x86/kernel/trampoline_32.S 2011-05-19 00:06:34.000000000 -0400
15341 +++ linux-2.6.39.4/arch/x86/kernel/trampoline_32.S 2011-08-05 19:44:35.000000000 -0400
15342 @@ -32,6 +32,12 @@
15343 #include <asm/segment.h>
15344 #include <asm/page_types.h>
15345
15346 +#ifdef CONFIG_PAX_KERNEXEC
15347 +#define ta(X) (X)
15348 +#else
15349 +#define ta(X) ((X) - __PAGE_OFFSET)
15350 +#endif
15351 +
15352 #ifdef CONFIG_SMP
15353
15354 .section ".x86_trampoline","a"
15355 @@ -62,7 +68,7 @@ r_base = .
15356 inc %ax # protected mode (PE) bit
15357 lmsw %ax # into protected mode
15358 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
15359 - ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
15360 + ljmpl $__BOOT_CS, $ta(startup_32_smp)
15361
15362 # These need to be in the same 64K segment as the above;
15363 # hence we don't use the boot_gdt_descr defined in head.S
15364 diff -urNp linux-2.6.39.4/arch/x86/kernel/trampoline_64.S linux-2.6.39.4/arch/x86/kernel/trampoline_64.S
15365 --- linux-2.6.39.4/arch/x86/kernel/trampoline_64.S 2011-05-19 00:06:34.000000000 -0400
15366 +++ linux-2.6.39.4/arch/x86/kernel/trampoline_64.S 2011-08-05 19:44:35.000000000 -0400
15367 @@ -90,7 +90,7 @@ startup_32:
15368 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
15369 movl %eax, %ds
15370
15371 - movl $X86_CR4_PAE, %eax
15372 + movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
15373 movl %eax, %cr4 # Enable PAE mode
15374
15375 # Setup trampoline 4 level pagetables
15376 @@ -138,7 +138,7 @@ tidt:
15377 # so the kernel can live anywhere
15378 .balign 4
15379 tgdt:
15380 - .short tgdt_end - tgdt # gdt limit
15381 + .short tgdt_end - tgdt - 1 # gdt limit
15382 .long tgdt - r_base
15383 .short 0
15384 .quad 0x00cf9b000000ffff # __KERNEL32_CS
15385 diff -urNp linux-2.6.39.4/arch/x86/kernel/traps.c linux-2.6.39.4/arch/x86/kernel/traps.c
15386 --- linux-2.6.39.4/arch/x86/kernel/traps.c 2011-05-19 00:06:34.000000000 -0400
15387 +++ linux-2.6.39.4/arch/x86/kernel/traps.c 2011-08-05 19:44:35.000000000 -0400
15388 @@ -70,12 +70,6 @@ asmlinkage int system_call(void);
15389
15390 /* Do we ignore FPU interrupts ? */
15391 char ignore_fpu_irq;
15392 -
15393 -/*
15394 - * The IDT has to be page-aligned to simplify the Pentium
15395 - * F0 0F bug workaround.
15396 - */
15397 -gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
15398 #endif
15399
15400 DECLARE_BITMAP(used_vectors, NR_VECTORS);
15401 @@ -117,13 +111,13 @@ static inline void preempt_conditional_c
15402 }
15403
15404 static void __kprobes
15405 -do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
15406 +do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
15407 long error_code, siginfo_t *info)
15408 {
15409 struct task_struct *tsk = current;
15410
15411 #ifdef CONFIG_X86_32
15412 - if (regs->flags & X86_VM_MASK) {
15413 + if (v8086_mode(regs)) {
15414 /*
15415 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
15416 * On nmi (interrupt 2), do_trap should not be called.
15417 @@ -134,7 +128,7 @@ do_trap(int trapnr, int signr, char *str
15418 }
15419 #endif
15420
15421 - if (!user_mode(regs))
15422 + if (!user_mode_novm(regs))
15423 goto kernel_trap;
15424
15425 #ifdef CONFIG_X86_32
15426 @@ -157,7 +151,7 @@ trap_signal:
15427 printk_ratelimit()) {
15428 printk(KERN_INFO
15429 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
15430 - tsk->comm, tsk->pid, str,
15431 + tsk->comm, task_pid_nr(tsk), str,
15432 regs->ip, regs->sp, error_code);
15433 print_vma_addr(" in ", regs->ip);
15434 printk("\n");
15435 @@ -174,8 +168,20 @@ kernel_trap:
15436 if (!fixup_exception(regs)) {
15437 tsk->thread.error_code = error_code;
15438 tsk->thread.trap_no = trapnr;
15439 +
15440 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15441 + if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
15442 + str = "PAX: suspicious stack segment fault";
15443 +#endif
15444 +
15445 die(str, regs, error_code);
15446 }
15447 +
15448 +#ifdef CONFIG_PAX_REFCOUNT
15449 + if (trapnr == 4)
15450 + pax_report_refcount_overflow(regs);
15451 +#endif
15452 +
15453 return;
15454
15455 #ifdef CONFIG_X86_32
15456 @@ -264,14 +270,30 @@ do_general_protection(struct pt_regs *re
15457 conditional_sti(regs);
15458
15459 #ifdef CONFIG_X86_32
15460 - if (regs->flags & X86_VM_MASK)
15461 + if (v8086_mode(regs))
15462 goto gp_in_vm86;
15463 #endif
15464
15465 tsk = current;
15466 - if (!user_mode(regs))
15467 + if (!user_mode_novm(regs))
15468 goto gp_in_kernel;
15469
15470 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
15471 + if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
15472 + struct mm_struct *mm = tsk->mm;
15473 + unsigned long limit;
15474 +
15475 + down_write(&mm->mmap_sem);
15476 + limit = mm->context.user_cs_limit;
15477 + if (limit < TASK_SIZE) {
15478 + track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
15479 + up_write(&mm->mmap_sem);
15480 + return;
15481 + }
15482 + up_write(&mm->mmap_sem);
15483 + }
15484 +#endif
15485 +
15486 tsk->thread.error_code = error_code;
15487 tsk->thread.trap_no = 13;
15488
15489 @@ -304,6 +326,13 @@ gp_in_kernel:
15490 if (notify_die(DIE_GPF, "general protection fault", regs,
15491 error_code, 13, SIGSEGV) == NOTIFY_STOP)
15492 return;
15493 +
15494 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15495 + if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
15496 + die("PAX: suspicious general protection fault", regs, error_code);
15497 + else
15498 +#endif
15499 +
15500 die("general protection fault", regs, error_code);
15501 }
15502
15503 @@ -433,6 +462,17 @@ static notrace __kprobes void default_do
15504 dotraplinkage notrace __kprobes void
15505 do_nmi(struct pt_regs *regs, long error_code)
15506 {
15507 +
15508 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15509 + if (!user_mode(regs)) {
15510 + unsigned long cs = regs->cs & 0xFFFF;
15511 + unsigned long ip = ktva_ktla(regs->ip);
15512 +
15513 + if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
15514 + regs->ip = ip;
15515 + }
15516 +#endif
15517 +
15518 nmi_enter();
15519
15520 inc_irq_stat(__nmi_count);
15521 @@ -569,7 +609,7 @@ dotraplinkage void __kprobes do_debug(st
15522 /* It's safe to allow irq's after DR6 has been saved */
15523 preempt_conditional_sti(regs);
15524
15525 - if (regs->flags & X86_VM_MASK) {
15526 + if (v8086_mode(regs)) {
15527 handle_vm86_trap((struct kernel_vm86_regs *) regs,
15528 error_code, 1);
15529 preempt_conditional_cli(regs);
15530 @@ -583,7 +623,7 @@ dotraplinkage void __kprobes do_debug(st
15531 * We already checked v86 mode above, so we can check for kernel mode
15532 * by just checking the CPL of CS.
15533 */
15534 - if ((dr6 & DR_STEP) && !user_mode(regs)) {
15535 + if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
15536 tsk->thread.debugreg6 &= ~DR_STEP;
15537 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
15538 regs->flags &= ~X86_EFLAGS_TF;
15539 @@ -612,7 +652,7 @@ void math_error(struct pt_regs *regs, in
15540 return;
15541 conditional_sti(regs);
15542
15543 - if (!user_mode_vm(regs))
15544 + if (!user_mode(regs))
15545 {
15546 if (!fixup_exception(regs)) {
15547 task->thread.error_code = error_code;
15548 @@ -723,7 +763,7 @@ asmlinkage void __attribute__((weak)) sm
15549 void __math_state_restore(void)
15550 {
15551 struct thread_info *thread = current_thread_info();
15552 - struct task_struct *tsk = thread->task;
15553 + struct task_struct *tsk = current;
15554
15555 /*
15556 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
15557 @@ -750,8 +790,7 @@ void __math_state_restore(void)
15558 */
15559 asmlinkage void math_state_restore(void)
15560 {
15561 - struct thread_info *thread = current_thread_info();
15562 - struct task_struct *tsk = thread->task;
15563 + struct task_struct *tsk = current;
15564
15565 if (!tsk_used_math(tsk)) {
15566 local_irq_enable();
15567 diff -urNp linux-2.6.39.4/arch/x86/kernel/verify_cpu.S linux-2.6.39.4/arch/x86/kernel/verify_cpu.S
15568 --- linux-2.6.39.4/arch/x86/kernel/verify_cpu.S 2011-05-19 00:06:34.000000000 -0400
15569 +++ linux-2.6.39.4/arch/x86/kernel/verify_cpu.S 2011-08-05 19:44:35.000000000 -0400
15570 @@ -20,6 +20,7 @@
15571 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
15572 * arch/x86/kernel/trampoline_64.S: secondary processor verification
15573 * arch/x86/kernel/head_32.S: processor startup
15574 + * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
15575 *
15576 * verify_cpu, returns the status of longmode and SSE in register %eax.
15577 * 0: Success 1: Failure
15578 diff -urNp linux-2.6.39.4/arch/x86/kernel/vm86_32.c linux-2.6.39.4/arch/x86/kernel/vm86_32.c
15579 --- linux-2.6.39.4/arch/x86/kernel/vm86_32.c 2011-05-19 00:06:34.000000000 -0400
15580 +++ linux-2.6.39.4/arch/x86/kernel/vm86_32.c 2011-08-05 19:44:35.000000000 -0400
15581 @@ -41,6 +41,7 @@
15582 #include <linux/ptrace.h>
15583 #include <linux/audit.h>
15584 #include <linux/stddef.h>
15585 +#include <linux/grsecurity.h>
15586
15587 #include <asm/uaccess.h>
15588 #include <asm/io.h>
15589 @@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
15590 do_exit(SIGSEGV);
15591 }
15592
15593 - tss = &per_cpu(init_tss, get_cpu());
15594 + tss = init_tss + get_cpu();
15595 current->thread.sp0 = current->thread.saved_sp0;
15596 current->thread.sysenter_cs = __KERNEL_CS;
15597 load_sp0(tss, &current->thread);
15598 @@ -208,6 +209,13 @@ int sys_vm86old(struct vm86_struct __use
15599 struct task_struct *tsk;
15600 int tmp, ret = -EPERM;
15601
15602 +#ifdef CONFIG_GRKERNSEC_VM86
15603 + if (!capable(CAP_SYS_RAWIO)) {
15604 + gr_handle_vm86();
15605 + goto out;
15606 + }
15607 +#endif
15608 +
15609 tsk = current;
15610 if (tsk->thread.saved_sp0)
15611 goto out;
15612 @@ -238,6 +246,14 @@ int sys_vm86(unsigned long cmd, unsigned
15613 int tmp, ret;
15614 struct vm86plus_struct __user *v86;
15615
15616 +#ifdef CONFIG_GRKERNSEC_VM86
15617 + if (!capable(CAP_SYS_RAWIO)) {
15618 + gr_handle_vm86();
15619 + ret = -EPERM;
15620 + goto out;
15621 + }
15622 +#endif
15623 +
15624 tsk = current;
15625 switch (cmd) {
15626 case VM86_REQUEST_IRQ:
15627 @@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
15628 tsk->thread.saved_fs = info->regs32->fs;
15629 tsk->thread.saved_gs = get_user_gs(info->regs32);
15630
15631 - tss = &per_cpu(init_tss, get_cpu());
15632 + tss = init_tss + get_cpu();
15633 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
15634 if (cpu_has_sep)
15635 tsk->thread.sysenter_cs = 0;
15636 @@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
15637 goto cannot_handle;
15638 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
15639 goto cannot_handle;
15640 - intr_ptr = (unsigned long __user *) (i << 2);
15641 + intr_ptr = (__force unsigned long __user *) (i << 2);
15642 if (get_user(segoffs, intr_ptr))
15643 goto cannot_handle;
15644 if ((segoffs >> 16) == BIOSSEG)
15645 diff -urNp linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S
15646 --- linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S 2011-05-19 00:06:34.000000000 -0400
15647 +++ linux-2.6.39.4/arch/x86/kernel/vmlinux.lds.S 2011-08-05 19:44:35.000000000 -0400
15648 @@ -26,6 +26,13 @@
15649 #include <asm/page_types.h>
15650 #include <asm/cache.h>
15651 #include <asm/boot.h>
15652 +#include <asm/segment.h>
15653 +
15654 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
15655 +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
15656 +#else
15657 +#define __KERNEL_TEXT_OFFSET 0
15658 +#endif
15659
15660 #undef i386 /* in case the preprocessor is a 32bit one */
15661
15662 @@ -34,11 +41,9 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
15663 #ifdef CONFIG_X86_32
15664 OUTPUT_ARCH(i386)
15665 ENTRY(phys_startup_32)
15666 -jiffies = jiffies_64;
15667 #else
15668 OUTPUT_ARCH(i386:x86-64)
15669 ENTRY(phys_startup_64)
15670 -jiffies_64 = jiffies;
15671 #endif
15672
15673 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
15674 @@ -69,31 +74,46 @@ jiffies_64 = jiffies;
15675
15676 PHDRS {
15677 text PT_LOAD FLAGS(5); /* R_E */
15678 +#ifdef CONFIG_X86_32
15679 + module PT_LOAD FLAGS(5); /* R_E */
15680 +#endif
15681 +#ifdef CONFIG_XEN
15682 + rodata PT_LOAD FLAGS(5); /* R_E */
15683 +#else
15684 + rodata PT_LOAD FLAGS(4); /* R__ */
15685 +#endif
15686 data PT_LOAD FLAGS(6); /* RW_ */
15687 #ifdef CONFIG_X86_64
15688 user PT_LOAD FLAGS(5); /* R_E */
15689 +#endif
15690 + init.begin PT_LOAD FLAGS(6); /* RW_ */
15691 #ifdef CONFIG_SMP
15692 percpu PT_LOAD FLAGS(6); /* RW_ */
15693 #endif
15694 + text.init PT_LOAD FLAGS(5); /* R_E */
15695 + text.exit PT_LOAD FLAGS(5); /* R_E */
15696 init PT_LOAD FLAGS(7); /* RWE */
15697 -#endif
15698 note PT_NOTE FLAGS(0); /* ___ */
15699 }
15700
15701 SECTIONS
15702 {
15703 #ifdef CONFIG_X86_32
15704 - . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
15705 - phys_startup_32 = startup_32 - LOAD_OFFSET;
15706 + . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
15707 #else
15708 - . = __START_KERNEL;
15709 - phys_startup_64 = startup_64 - LOAD_OFFSET;
15710 + . = __START_KERNEL;
15711 #endif
15712
15713 /* Text and read-only data */
15714 - .text : AT(ADDR(.text) - LOAD_OFFSET) {
15715 - _text = .;
15716 + .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15717 /* bootstrapping code */
15718 +#ifdef CONFIG_X86_32
15719 + phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15720 +#else
15721 + phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15722 +#endif
15723 + __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
15724 + _text = .;
15725 HEAD_TEXT
15726 #ifdef CONFIG_X86_32
15727 . = ALIGN(PAGE_SIZE);
15728 @@ -109,13 +129,47 @@ SECTIONS
15729 IRQENTRY_TEXT
15730 *(.fixup)
15731 *(.gnu.warning)
15732 - /* End of text section */
15733 - _etext = .;
15734 } :text = 0x9090
15735
15736 - NOTES :text :note
15737 + . += __KERNEL_TEXT_OFFSET;
15738 +
15739 +#ifdef CONFIG_X86_32
15740 + . = ALIGN(PAGE_SIZE);
15741 + .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
15742 +
15743 +#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
15744 + MODULES_EXEC_VADDR = .;
15745 + BYTE(0)
15746 + . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
15747 + . = ALIGN(HPAGE_SIZE);
15748 + MODULES_EXEC_END = . - 1;
15749 +#endif
15750 +
15751 + } :module
15752 +#endif
15753 +
15754 + .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
15755 + /* End of text section */
15756 + _etext = . - __KERNEL_TEXT_OFFSET;
15757 + }
15758
15759 - EXCEPTION_TABLE(16) :text = 0x9090
15760 +#ifdef CONFIG_X86_32
15761 + . = ALIGN(PAGE_SIZE);
15762 + .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
15763 + *(.idt)
15764 + . = ALIGN(PAGE_SIZE);
15765 + *(.empty_zero_page)
15766 + *(.initial_pg_fixmap)
15767 + *(.initial_pg_pmd)
15768 + *(.initial_page_table)
15769 + *(.swapper_pg_dir)
15770 + } :rodata
15771 +#endif
15772 +
15773 + . = ALIGN(PAGE_SIZE);
15774 + NOTES :rodata :note
15775 +
15776 + EXCEPTION_TABLE(16) :rodata
15777
15778 #if defined(CONFIG_DEBUG_RODATA)
15779 /* .text should occupy whole number of pages */
15780 @@ -127,16 +181,20 @@ SECTIONS
15781
15782 /* Data */
15783 .data : AT(ADDR(.data) - LOAD_OFFSET) {
15784 +
15785 +#ifdef CONFIG_PAX_KERNEXEC
15786 + . = ALIGN(HPAGE_SIZE);
15787 +#else
15788 + . = ALIGN(PAGE_SIZE);
15789 +#endif
15790 +
15791 /* Start of data section */
15792 _sdata = .;
15793
15794 /* init_task */
15795 INIT_TASK_DATA(THREAD_SIZE)
15796
15797 -#ifdef CONFIG_X86_32
15798 - /* 32 bit has nosave before _edata */
15799 NOSAVE_DATA
15800 -#endif
15801
15802 PAGE_ALIGNED_DATA(PAGE_SIZE)
15803
15804 @@ -145,6 +203,8 @@ SECTIONS
15805 DATA_DATA
15806 CONSTRUCTORS
15807
15808 + jiffies = jiffies_64;
15809 +
15810 /* rarely changed data like cpu maps */
15811 READ_MOSTLY_DATA(INTERNODE_CACHE_BYTES)
15812
15813 @@ -199,12 +259,6 @@ SECTIONS
15814 }
15815 vgetcpu_mode = VVIRT(.vgetcpu_mode);
15816
15817 - . = ALIGN(L1_CACHE_BYTES);
15818 - .jiffies : AT(VLOAD(.jiffies)) {
15819 - *(.jiffies)
15820 - }
15821 - jiffies = VVIRT(.jiffies);
15822 -
15823 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
15824 *(.vsyscall_3)
15825 }
15826 @@ -220,12 +274,19 @@ SECTIONS
15827 #endif /* CONFIG_X86_64 */
15828
15829 /* Init code and data - will be freed after init */
15830 - . = ALIGN(PAGE_SIZE);
15831 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
15832 + BYTE(0)
15833 +
15834 +#ifdef CONFIG_PAX_KERNEXEC
15835 + . = ALIGN(HPAGE_SIZE);
15836 +#else
15837 + . = ALIGN(PAGE_SIZE);
15838 +#endif
15839 +
15840 __init_begin = .; /* paired with __init_end */
15841 - }
15842 + } :init.begin
15843
15844 -#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
15845 +#ifdef CONFIG_SMP
15846 /*
15847 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
15848 * output PHDR, so the next output section - .init.text - should
15849 @@ -234,12 +295,27 @@ SECTIONS
15850 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
15851 #endif
15852
15853 - INIT_TEXT_SECTION(PAGE_SIZE)
15854 -#ifdef CONFIG_X86_64
15855 - :init
15856 -#endif
15857 + . = ALIGN(PAGE_SIZE);
15858 + init_begin = .;
15859 + .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
15860 + VMLINUX_SYMBOL(_sinittext) = .;
15861 + INIT_TEXT
15862 + VMLINUX_SYMBOL(_einittext) = .;
15863 + . = ALIGN(PAGE_SIZE);
15864 + } :text.init
15865
15866 - INIT_DATA_SECTION(16)
15867 + /*
15868 + * .exit.text is discard at runtime, not link time, to deal with
15869 + * references from .altinstructions and .eh_frame
15870 + */
15871 + .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
15872 + EXIT_TEXT
15873 + . = ALIGN(16);
15874 + } :text.exit
15875 + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
15876 +
15877 + . = ALIGN(PAGE_SIZE);
15878 + INIT_DATA_SECTION(16) :init
15879
15880 /*
15881 * Code and data for a variety of lowlevel trampolines, to be
15882 @@ -306,19 +382,12 @@ SECTIONS
15883 }
15884
15885 . = ALIGN(8);
15886 - /*
15887 - * .exit.text is discard at runtime, not link time, to deal with
15888 - * references from .altinstructions and .eh_frame
15889 - */
15890 - .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
15891 - EXIT_TEXT
15892 - }
15893
15894 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
15895 EXIT_DATA
15896 }
15897
15898 -#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
15899 +#ifndef CONFIG_SMP
15900 PERCPU(INTERNODE_CACHE_BYTES, PAGE_SIZE)
15901 #endif
15902
15903 @@ -337,16 +406,10 @@ SECTIONS
15904 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
15905 __smp_locks = .;
15906 *(.smp_locks)
15907 - . = ALIGN(PAGE_SIZE);
15908 __smp_locks_end = .;
15909 + . = ALIGN(PAGE_SIZE);
15910 }
15911
15912 -#ifdef CONFIG_X86_64
15913 - .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
15914 - NOSAVE_DATA
15915 - }
15916 -#endif
15917 -
15918 /* BSS */
15919 . = ALIGN(PAGE_SIZE);
15920 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
15921 @@ -362,6 +425,7 @@ SECTIONS
15922 __brk_base = .;
15923 . += 64 * 1024; /* 64k alignment slop space */
15924 *(.brk_reservation) /* areas brk users have reserved */
15925 + . = ALIGN(HPAGE_SIZE);
15926 __brk_limit = .;
15927 }
15928
15929 @@ -388,13 +452,12 @@ SECTIONS
15930 * for the boot processor.
15931 */
15932 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
15933 -INIT_PER_CPU(gdt_page);
15934 INIT_PER_CPU(irq_stack_union);
15935
15936 /*
15937 * Build-time check on the image size:
15938 */
15939 -. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
15940 +. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
15941 "kernel image bigger than KERNEL_IMAGE_SIZE");
15942
15943 #ifdef CONFIG_SMP
15944 diff -urNp linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c
15945 --- linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c 2011-05-19 00:06:34.000000000 -0400
15946 +++ linux-2.6.39.4/arch/x86/kernel/vsyscall_64.c 2011-08-05 19:44:35.000000000 -0400
15947 @@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
15948
15949 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
15950 /* copy vsyscall data */
15951 + strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
15952 vsyscall_gtod_data.clock.vread = clock->vread;
15953 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
15954 vsyscall_gtod_data.clock.mask = clock->mask;
15955 @@ -208,7 +209,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
15956 We do this here because otherwise user space would do it on
15957 its own in a likely inferior way (no access to jiffies).
15958 If you don't like it pass NULL. */
15959 - if (tcache && tcache->blob[0] == (j = __jiffies)) {
15960 + if (tcache && tcache->blob[0] == (j = jiffies)) {
15961 p = tcache->blob[1];
15962 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
15963 /* Load per CPU data from RDTSCP */
15964 diff -urNp linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c
15965 --- linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c 2011-05-19 00:06:34.000000000 -0400
15966 +++ linux-2.6.39.4/arch/x86/kernel/x8664_ksyms_64.c 2011-08-05 19:44:35.000000000 -0400
15967 @@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8);
15968 EXPORT_SYMBOL(copy_user_generic_string);
15969 EXPORT_SYMBOL(copy_user_generic_unrolled);
15970 EXPORT_SYMBOL(__copy_user_nocache);
15971 -EXPORT_SYMBOL(_copy_from_user);
15972 -EXPORT_SYMBOL(_copy_to_user);
15973
15974 EXPORT_SYMBOL(copy_page);
15975 EXPORT_SYMBOL(clear_page);
15976 diff -urNp linux-2.6.39.4/arch/x86/kernel/xsave.c linux-2.6.39.4/arch/x86/kernel/xsave.c
15977 --- linux-2.6.39.4/arch/x86/kernel/xsave.c 2011-05-19 00:06:34.000000000 -0400
15978 +++ linux-2.6.39.4/arch/x86/kernel/xsave.c 2011-08-05 19:44:35.000000000 -0400
15979 @@ -130,7 +130,7 @@ int check_for_xstate(struct i387_fxsave_
15980 fx_sw_user->xstate_size > fx_sw_user->extended_size)
15981 return -EINVAL;
15982
15983 - err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
15984 + err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
15985 fx_sw_user->extended_size -
15986 FP_XSTATE_MAGIC2_SIZE));
15987 if (err)
15988 @@ -267,7 +267,7 @@ fx_only:
15989 * the other extended state.
15990 */
15991 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
15992 - return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
15993 + return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
15994 }
15995
15996 /*
15997 @@ -299,7 +299,7 @@ int restore_i387_xstate(void __user *buf
15998 if (use_xsave())
15999 err = restore_user_xstate(buf);
16000 else
16001 - err = fxrstor_checking((__force struct i387_fxsave_struct *)
16002 + err = fxrstor_checking((struct i387_fxsave_struct __user *)
16003 buf);
16004 if (unlikely(err)) {
16005 /*
16006 diff -urNp linux-2.6.39.4/arch/x86/kvm/emulate.c linux-2.6.39.4/arch/x86/kvm/emulate.c
16007 --- linux-2.6.39.4/arch/x86/kvm/emulate.c 2011-05-19 00:06:34.000000000 -0400
16008 +++ linux-2.6.39.4/arch/x86/kvm/emulate.c 2011-08-05 19:44:35.000000000 -0400
16009 @@ -89,7 +89,7 @@
16010 #define Src2ImmByte (2<<29)
16011 #define Src2One (3<<29)
16012 #define Src2Imm (4<<29)
16013 -#define Src2Mask (7<<29)
16014 +#define Src2Mask (7U<<29)
16015
16016 #define X2(x...) x, x
16017 #define X3(x...) X2(x), x
16018 @@ -190,6 +190,7 @@ struct group_dual {
16019
16020 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \
16021 do { \
16022 + unsigned long _tmp; \
16023 __asm__ __volatile__ ( \
16024 _PRE_EFLAGS("0", "4", "2") \
16025 _op _suffix " %"_x"3,%1; " \
16026 @@ -203,8 +204,6 @@ struct group_dual {
16027 /* Raw emulation: instruction has two explicit operands. */
16028 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
16029 do { \
16030 - unsigned long _tmp; \
16031 - \
16032 switch ((_dst).bytes) { \
16033 case 2: \
16034 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\
16035 @@ -220,7 +219,6 @@ struct group_dual {
16036
16037 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
16038 do { \
16039 - unsigned long _tmp; \
16040 switch ((_dst).bytes) { \
16041 case 1: \
16042 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \
16043 diff -urNp linux-2.6.39.4/arch/x86/kvm/lapic.c linux-2.6.39.4/arch/x86/kvm/lapic.c
16044 --- linux-2.6.39.4/arch/x86/kvm/lapic.c 2011-05-19 00:06:34.000000000 -0400
16045 +++ linux-2.6.39.4/arch/x86/kvm/lapic.c 2011-08-05 19:44:35.000000000 -0400
16046 @@ -53,7 +53,7 @@
16047 #define APIC_BUS_CYCLE_NS 1
16048
16049 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
16050 -#define apic_debug(fmt, arg...)
16051 +#define apic_debug(fmt, arg...) do {} while (0)
16052
16053 #define APIC_LVT_NUM 6
16054 /* 14 is the version for Xeon and Pentium 8.4.8*/
16055 diff -urNp linux-2.6.39.4/arch/x86/kvm/mmu.c linux-2.6.39.4/arch/x86/kvm/mmu.c
16056 --- linux-2.6.39.4/arch/x86/kvm/mmu.c 2011-05-19 00:06:34.000000000 -0400
16057 +++ linux-2.6.39.4/arch/x86/kvm/mmu.c 2011-08-05 19:44:35.000000000 -0400
16058 @@ -3240,7 +3240,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16059
16060 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
16061
16062 - invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
16063 + invlpg_counter = atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter);
16064
16065 /*
16066 * Assume that the pte write on a page table of the same type
16067 @@ -3275,7 +3275,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *
16068 smp_rmb();
16069
16070 spin_lock(&vcpu->kvm->mmu_lock);
16071 - if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16072 + if (atomic_read_unchecked(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
16073 gentry = 0;
16074 kvm_mmu_free_some_pages(vcpu);
16075 ++vcpu->kvm->stat.mmu_pte_write;
16076 diff -urNp linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h
16077 --- linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h 2011-05-19 00:06:34.000000000 -0400
16078 +++ linux-2.6.39.4/arch/x86/kvm/paging_tmpl.h 2011-08-05 19:44:35.000000000 -0400
16079 @@ -552,6 +552,8 @@ static int FNAME(page_fault)(struct kvm_
16080 unsigned long mmu_seq;
16081 bool map_writable;
16082
16083 + pax_track_stack();
16084 +
16085 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
16086
16087 r = mmu_topup_memory_caches(vcpu);
16088 @@ -672,7 +674,7 @@ static void FNAME(invlpg)(struct kvm_vcp
16089 if (need_flush)
16090 kvm_flush_remote_tlbs(vcpu->kvm);
16091
16092 - atomic_inc(&vcpu->kvm->arch.invlpg_counter);
16093 + atomic_inc_unchecked(&vcpu->kvm->arch.invlpg_counter);
16094
16095 spin_unlock(&vcpu->kvm->mmu_lock);
16096
16097 diff -urNp linux-2.6.39.4/arch/x86/kvm/svm.c linux-2.6.39.4/arch/x86/kvm/svm.c
16098 --- linux-2.6.39.4/arch/x86/kvm/svm.c 2011-05-19 00:06:34.000000000 -0400
16099 +++ linux-2.6.39.4/arch/x86/kvm/svm.c 2011-08-05 20:34:06.000000000 -0400
16100 @@ -3278,7 +3278,11 @@ static void reload_tss(struct kvm_vcpu *
16101 int cpu = raw_smp_processor_id();
16102
16103 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
16104 +
16105 + pax_open_kernel();
16106 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
16107 + pax_close_kernel();
16108 +
16109 load_TR_desc();
16110 }
16111
16112 @@ -3656,6 +3660,10 @@ static void svm_vcpu_run(struct kvm_vcpu
16113 #endif
16114 #endif
16115
16116 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16117 + __set_fs(current_thread_info()->addr_limit);
16118 +#endif
16119 +
16120 reload_tss(vcpu);
16121
16122 local_irq_disable();
16123 diff -urNp linux-2.6.39.4/arch/x86/kvm/vmx.c linux-2.6.39.4/arch/x86/kvm/vmx.c
16124 --- linux-2.6.39.4/arch/x86/kvm/vmx.c 2011-05-19 00:06:34.000000000 -0400
16125 +++ linux-2.6.39.4/arch/x86/kvm/vmx.c 2011-08-05 20:34:06.000000000 -0400
16126 @@ -725,7 +725,11 @@ static void reload_tss(void)
16127 struct desc_struct *descs;
16128
16129 descs = (void *)gdt->address;
16130 +
16131 + pax_open_kernel();
16132 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
16133 + pax_close_kernel();
16134 +
16135 load_TR_desc();
16136 }
16137
16138 @@ -1648,8 +1652,11 @@ static __init int hardware_setup(void)
16139 if (!cpu_has_vmx_flexpriority())
16140 flexpriority_enabled = 0;
16141
16142 - if (!cpu_has_vmx_tpr_shadow())
16143 - kvm_x86_ops->update_cr8_intercept = NULL;
16144 + if (!cpu_has_vmx_tpr_shadow()) {
16145 + pax_open_kernel();
16146 + *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
16147 + pax_close_kernel();
16148 + }
16149
16150 if (enable_ept && !cpu_has_vmx_ept_2m_page())
16151 kvm_disable_largepages();
16152 @@ -2693,7 +2700,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
16153 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
16154
16155 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
16156 - vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
16157 + vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
16158 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
16159 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
16160 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host));
16161 @@ -4068,6 +4075,12 @@ static void __noclone vmx_vcpu_run(struc
16162 "jmp .Lkvm_vmx_return \n\t"
16163 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
16164 ".Lkvm_vmx_return: "
16165 +
16166 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16167 + "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
16168 + ".Lkvm_vmx_return2: "
16169 +#endif
16170 +
16171 /* Save guest registers, load host registers, keep flags */
16172 "mov %0, %c[wordsize](%%"R"sp) \n\t"
16173 "pop %0 \n\t"
16174 @@ -4116,6 +4129,11 @@ static void __noclone vmx_vcpu_run(struc
16175 #endif
16176 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
16177 [wordsize]"i"(sizeof(ulong))
16178 +
16179 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16180 + ,[cs]"i"(__KERNEL_CS)
16181 +#endif
16182 +
16183 : "cc", "memory"
16184 , R"ax", R"bx", R"di", R"si"
16185 #ifdef CONFIG_X86_64
16186 @@ -4130,7 +4148,16 @@ static void __noclone vmx_vcpu_run(struc
16187
16188 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
16189
16190 - asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
16191 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
16192 +
16193 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16194 + loadsegment(fs, __KERNEL_PERCPU);
16195 +#endif
16196 +
16197 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
16198 + __set_fs(current_thread_info()->addr_limit);
16199 +#endif
16200 +
16201 vmx->launched = 1;
16202
16203 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
16204 diff -urNp linux-2.6.39.4/arch/x86/kvm/x86.c linux-2.6.39.4/arch/x86/kvm/x86.c
16205 --- linux-2.6.39.4/arch/x86/kvm/x86.c 2011-05-19 00:06:34.000000000 -0400
16206 +++ linux-2.6.39.4/arch/x86/kvm/x86.c 2011-08-05 20:34:06.000000000 -0400
16207 @@ -2050,6 +2050,8 @@ long kvm_arch_dev_ioctl(struct file *fil
16208 if (n < msr_list.nmsrs)
16209 goto out;
16210 r = -EFAULT;
16211 + if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
16212 + goto out;
16213 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
16214 num_msrs_to_save * sizeof(u32)))
16215 goto out;
16216 @@ -2217,15 +2219,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
16217 struct kvm_cpuid2 *cpuid,
16218 struct kvm_cpuid_entry2 __user *entries)
16219 {
16220 - int r;
16221 + int r, i;
16222
16223 r = -E2BIG;
16224 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
16225 goto out;
16226 r = -EFAULT;
16227 - if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
16228 - cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16229 + if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
16230 goto out;
16231 + for (i = 0; i < cpuid->nent; ++i) {
16232 + struct kvm_cpuid_entry2 cpuid_entry;
16233 + if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
16234 + goto out;
16235 + vcpu->arch.cpuid_entries[i] = cpuid_entry;
16236 + }
16237 vcpu->arch.cpuid_nent = cpuid->nent;
16238 kvm_apic_set_version(vcpu);
16239 kvm_x86_ops->cpuid_update(vcpu);
16240 @@ -2240,15 +2247,19 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
16241 struct kvm_cpuid2 *cpuid,
16242 struct kvm_cpuid_entry2 __user *entries)
16243 {
16244 - int r;
16245 + int r, i;
16246
16247 r = -E2BIG;
16248 if (cpuid->nent < vcpu->arch.cpuid_nent)
16249 goto out;
16250 r = -EFAULT;
16251 - if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
16252 - vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16253 + if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
16254 goto out;
16255 + for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
16256 + struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
16257 + if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
16258 + goto out;
16259 + }
16260 return 0;
16261
16262 out:
16263 @@ -2526,7 +2537,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
16264 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
16265 struct kvm_interrupt *irq)
16266 {
16267 - if (irq->irq < 0 || irq->irq >= 256)
16268 + if (irq->irq >= 256)
16269 return -EINVAL;
16270 if (irqchip_in_kernel(vcpu->kvm))
16271 return -ENXIO;
16272 @@ -4690,7 +4701,7 @@ void kvm_after_handle_nmi(struct kvm_vcp
16273 }
16274 EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);
16275
16276 -int kvm_arch_init(void *opaque)
16277 +int kvm_arch_init(const void *opaque)
16278 {
16279 int r;
16280 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
16281 diff -urNp linux-2.6.39.4/arch/x86/lguest/boot.c linux-2.6.39.4/arch/x86/lguest/boot.c
16282 --- linux-2.6.39.4/arch/x86/lguest/boot.c 2011-06-25 12:55:22.000000000 -0400
16283 +++ linux-2.6.39.4/arch/x86/lguest/boot.c 2011-08-05 20:34:06.000000000 -0400
16284 @@ -1178,9 +1178,10 @@ static __init int early_put_chars(u32 vt
16285 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
16286 * Launcher to reboot us.
16287 */
16288 -static void lguest_restart(char *reason)
16289 +static __noreturn void lguest_restart(char *reason)
16290 {
16291 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
16292 + BUG();
16293 }
16294
16295 /*G:050
16296 diff -urNp linux-2.6.39.4/arch/x86/lib/atomic64_32.c linux-2.6.39.4/arch/x86/lib/atomic64_32.c
16297 --- linux-2.6.39.4/arch/x86/lib/atomic64_32.c 2011-05-19 00:06:34.000000000 -0400
16298 +++ linux-2.6.39.4/arch/x86/lib/atomic64_32.c 2011-08-05 19:44:35.000000000 -0400
16299 @@ -8,18 +8,30 @@
16300
16301 long long atomic64_read_cx8(long long, const atomic64_t *v);
16302 EXPORT_SYMBOL(atomic64_read_cx8);
16303 +long long atomic64_read_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16304 +EXPORT_SYMBOL(atomic64_read_unchecked_cx8);
16305 long long atomic64_set_cx8(long long, const atomic64_t *v);
16306 EXPORT_SYMBOL(atomic64_set_cx8);
16307 +long long atomic64_set_unchecked_cx8(long long, const atomic64_unchecked_t *v);
16308 +EXPORT_SYMBOL(atomic64_set_unchecked_cx8);
16309 long long atomic64_xchg_cx8(long long, unsigned high);
16310 EXPORT_SYMBOL(atomic64_xchg_cx8);
16311 long long atomic64_add_return_cx8(long long a, atomic64_t *v);
16312 EXPORT_SYMBOL(atomic64_add_return_cx8);
16313 +long long atomic64_add_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16314 +EXPORT_SYMBOL(atomic64_add_return_unchecked_cx8);
16315 long long atomic64_sub_return_cx8(long long a, atomic64_t *v);
16316 EXPORT_SYMBOL(atomic64_sub_return_cx8);
16317 +long long atomic64_sub_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16318 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_cx8);
16319 long long atomic64_inc_return_cx8(long long a, atomic64_t *v);
16320 EXPORT_SYMBOL(atomic64_inc_return_cx8);
16321 +long long atomic64_inc_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16322 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_cx8);
16323 long long atomic64_dec_return_cx8(long long a, atomic64_t *v);
16324 EXPORT_SYMBOL(atomic64_dec_return_cx8);
16325 +long long atomic64_dec_return_unchecked_cx8(long long a, atomic64_unchecked_t *v);
16326 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_cx8);
16327 long long atomic64_dec_if_positive_cx8(atomic64_t *v);
16328 EXPORT_SYMBOL(atomic64_dec_if_positive_cx8);
16329 int atomic64_inc_not_zero_cx8(atomic64_t *v);
16330 @@ -30,26 +42,46 @@ EXPORT_SYMBOL(atomic64_add_unless_cx8);
16331 #ifndef CONFIG_X86_CMPXCHG64
16332 long long atomic64_read_386(long long, const atomic64_t *v);
16333 EXPORT_SYMBOL(atomic64_read_386);
16334 +long long atomic64_read_unchecked_386(long long, const atomic64_unchecked_t *v);
16335 +EXPORT_SYMBOL(atomic64_read_unchecked_386);
16336 long long atomic64_set_386(long long, const atomic64_t *v);
16337 EXPORT_SYMBOL(atomic64_set_386);
16338 +long long atomic64_set_unchecked_386(long long, const atomic64_unchecked_t *v);
16339 +EXPORT_SYMBOL(atomic64_set_unchecked_386);
16340 long long atomic64_xchg_386(long long, unsigned high);
16341 EXPORT_SYMBOL(atomic64_xchg_386);
16342 long long atomic64_add_return_386(long long a, atomic64_t *v);
16343 EXPORT_SYMBOL(atomic64_add_return_386);
16344 +long long atomic64_add_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16345 +EXPORT_SYMBOL(atomic64_add_return_unchecked_386);
16346 long long atomic64_sub_return_386(long long a, atomic64_t *v);
16347 EXPORT_SYMBOL(atomic64_sub_return_386);
16348 +long long atomic64_sub_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16349 +EXPORT_SYMBOL(atomic64_sub_return_unchecked_386);
16350 long long atomic64_inc_return_386(long long a, atomic64_t *v);
16351 EXPORT_SYMBOL(atomic64_inc_return_386);
16352 +long long atomic64_inc_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16353 +EXPORT_SYMBOL(atomic64_inc_return_unchecked_386);
16354 long long atomic64_dec_return_386(long long a, atomic64_t *v);
16355 EXPORT_SYMBOL(atomic64_dec_return_386);
16356 +long long atomic64_dec_return_unchecked_386(long long a, atomic64_unchecked_t *v);
16357 +EXPORT_SYMBOL(atomic64_dec_return_unchecked_386);
16358 long long atomic64_add_386(long long a, atomic64_t *v);
16359 EXPORT_SYMBOL(atomic64_add_386);
16360 +long long atomic64_add_unchecked_386(long long a, atomic64_unchecked_t *v);
16361 +EXPORT_SYMBOL(atomic64_add_unchecked_386);
16362 long long atomic64_sub_386(long long a, atomic64_t *v);
16363 EXPORT_SYMBOL(atomic64_sub_386);
16364 +long long atomic64_sub_unchecked_386(long long a, atomic64_unchecked_t *v);
16365 +EXPORT_SYMBOL(atomic64_sub_unchecked_386);
16366 long long atomic64_inc_386(long long a, atomic64_t *v);
16367 EXPORT_SYMBOL(atomic64_inc_386);
16368 +long long atomic64_inc_unchecked_386(long long a, atomic64_unchecked_t *v);
16369 +EXPORT_SYMBOL(atomic64_inc_unchecked_386);
16370 long long atomic64_dec_386(long long a, atomic64_t *v);
16371 EXPORT_SYMBOL(atomic64_dec_386);
16372 +long long atomic64_dec_unchecked_386(long long a, atomic64_unchecked_t *v);
16373 +EXPORT_SYMBOL(atomic64_dec_unchecked_386);
16374 long long atomic64_dec_if_positive_386(atomic64_t *v);
16375 EXPORT_SYMBOL(atomic64_dec_if_positive_386);
16376 int atomic64_inc_not_zero_386(atomic64_t *v);
16377 diff -urNp linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S
16378 --- linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S 2011-05-19 00:06:34.000000000 -0400
16379 +++ linux-2.6.39.4/arch/x86/lib/atomic64_386_32.S 2011-08-05 19:44:35.000000000 -0400
16380 @@ -48,6 +48,10 @@ BEGIN(read)
16381 movl (v), %eax
16382 movl 4(v), %edx
16383 RET_ENDP
16384 +BEGIN(read_unchecked)
16385 + movl (v), %eax
16386 + movl 4(v), %edx
16387 +RET_ENDP
16388 #undef v
16389
16390 #define v %esi
16391 @@ -55,6 +59,10 @@ BEGIN(set)
16392 movl %ebx, (v)
16393 movl %ecx, 4(v)
16394 RET_ENDP
16395 +BEGIN(set_unchecked)
16396 + movl %ebx, (v)
16397 + movl %ecx, 4(v)
16398 +RET_ENDP
16399 #undef v
16400
16401 #define v %esi
16402 @@ -70,6 +78,20 @@ RET_ENDP
16403 BEGIN(add)
16404 addl %eax, (v)
16405 adcl %edx, 4(v)
16406 +
16407 +#ifdef CONFIG_PAX_REFCOUNT
16408 + jno 0f
16409 + subl %eax, (v)
16410 + sbbl %edx, 4(v)
16411 + int $4
16412 +0:
16413 + _ASM_EXTABLE(0b, 0b)
16414 +#endif
16415 +
16416 +RET_ENDP
16417 +BEGIN(add_unchecked)
16418 + addl %eax, (v)
16419 + adcl %edx, 4(v)
16420 RET_ENDP
16421 #undef v
16422
16423 @@ -77,6 +99,24 @@ RET_ENDP
16424 BEGIN(add_return)
16425 addl (v), %eax
16426 adcl 4(v), %edx
16427 +
16428 +#ifdef CONFIG_PAX_REFCOUNT
16429 + into
16430 +1234:
16431 + _ASM_EXTABLE(1234b, 2f)
16432 +#endif
16433 +
16434 + movl %eax, (v)
16435 + movl %edx, 4(v)
16436 +
16437 +#ifdef CONFIG_PAX_REFCOUNT
16438 +2:
16439 +#endif
16440 +
16441 +RET_ENDP
16442 +BEGIN(add_return_unchecked)
16443 + addl (v), %eax
16444 + adcl 4(v), %edx
16445 movl %eax, (v)
16446 movl %edx, 4(v)
16447 RET_ENDP
16448 @@ -86,6 +126,20 @@ RET_ENDP
16449 BEGIN(sub)
16450 subl %eax, (v)
16451 sbbl %edx, 4(v)
16452 +
16453 +#ifdef CONFIG_PAX_REFCOUNT
16454 + jno 0f
16455 + addl %eax, (v)
16456 + adcl %edx, 4(v)
16457 + int $4
16458 +0:
16459 + _ASM_EXTABLE(0b, 0b)
16460 +#endif
16461 +
16462 +RET_ENDP
16463 +BEGIN(sub_unchecked)
16464 + subl %eax, (v)
16465 + sbbl %edx, 4(v)
16466 RET_ENDP
16467 #undef v
16468
16469 @@ -96,6 +150,27 @@ BEGIN(sub_return)
16470 sbbl $0, %edx
16471 addl (v), %eax
16472 adcl 4(v), %edx
16473 +
16474 +#ifdef CONFIG_PAX_REFCOUNT
16475 + into
16476 +1234:
16477 + _ASM_EXTABLE(1234b, 2f)
16478 +#endif
16479 +
16480 + movl %eax, (v)
16481 + movl %edx, 4(v)
16482 +
16483 +#ifdef CONFIG_PAX_REFCOUNT
16484 +2:
16485 +#endif
16486 +
16487 +RET_ENDP
16488 +BEGIN(sub_return_unchecked)
16489 + negl %edx
16490 + negl %eax
16491 + sbbl $0, %edx
16492 + addl (v), %eax
16493 + adcl 4(v), %edx
16494 movl %eax, (v)
16495 movl %edx, 4(v)
16496 RET_ENDP
16497 @@ -105,6 +180,20 @@ RET_ENDP
16498 BEGIN(inc)
16499 addl $1, (v)
16500 adcl $0, 4(v)
16501 +
16502 +#ifdef CONFIG_PAX_REFCOUNT
16503 + jno 0f
16504 + subl $1, (v)
16505 + sbbl $0, 4(v)
16506 + int $4
16507 +0:
16508 + _ASM_EXTABLE(0b, 0b)
16509 +#endif
16510 +
16511 +RET_ENDP
16512 +BEGIN(inc_unchecked)
16513 + addl $1, (v)
16514 + adcl $0, 4(v)
16515 RET_ENDP
16516 #undef v
16517
16518 @@ -114,6 +203,26 @@ BEGIN(inc_return)
16519 movl 4(v), %edx
16520 addl $1, %eax
16521 adcl $0, %edx
16522 +
16523 +#ifdef CONFIG_PAX_REFCOUNT
16524 + into
16525 +1234:
16526 + _ASM_EXTABLE(1234b, 2f)
16527 +#endif
16528 +
16529 + movl %eax, (v)
16530 + movl %edx, 4(v)
16531 +
16532 +#ifdef CONFIG_PAX_REFCOUNT
16533 +2:
16534 +#endif
16535 +
16536 +RET_ENDP
16537 +BEGIN(inc_return_unchecked)
16538 + movl (v), %eax
16539 + movl 4(v), %edx
16540 + addl $1, %eax
16541 + adcl $0, %edx
16542 movl %eax, (v)
16543 movl %edx, 4(v)
16544 RET_ENDP
16545 @@ -123,6 +232,20 @@ RET_ENDP
16546 BEGIN(dec)
16547 subl $1, (v)
16548 sbbl $0, 4(v)
16549 +
16550 +#ifdef CONFIG_PAX_REFCOUNT
16551 + jno 0f
16552 + addl $1, (v)
16553 + adcl $0, 4(v)
16554 + int $4
16555 +0:
16556 + _ASM_EXTABLE(0b, 0b)
16557 +#endif
16558 +
16559 +RET_ENDP
16560 +BEGIN(dec_unchecked)
16561 + subl $1, (v)
16562 + sbbl $0, 4(v)
16563 RET_ENDP
16564 #undef v
16565
16566 @@ -132,6 +255,26 @@ BEGIN(dec_return)
16567 movl 4(v), %edx
16568 subl $1, %eax
16569 sbbl $0, %edx
16570 +
16571 +#ifdef CONFIG_PAX_REFCOUNT
16572 + into
16573 +1234:
16574 + _ASM_EXTABLE(1234b, 2f)
16575 +#endif
16576 +
16577 + movl %eax, (v)
16578 + movl %edx, 4(v)
16579 +
16580 +#ifdef CONFIG_PAX_REFCOUNT
16581 +2:
16582 +#endif
16583 +
16584 +RET_ENDP
16585 +BEGIN(dec_return_unchecked)
16586 + movl (v), %eax
16587 + movl 4(v), %edx
16588 + subl $1, %eax
16589 + sbbl $0, %edx
16590 movl %eax, (v)
16591 movl %edx, 4(v)
16592 RET_ENDP
16593 @@ -143,6 +286,13 @@ BEGIN(add_unless)
16594 adcl %edx, %edi
16595 addl (v), %eax
16596 adcl 4(v), %edx
16597 +
16598 +#ifdef CONFIG_PAX_REFCOUNT
16599 + into
16600 +1234:
16601 + _ASM_EXTABLE(1234b, 2f)
16602 +#endif
16603 +
16604 cmpl %eax, %esi
16605 je 3f
16606 1:
16607 @@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
16608 1:
16609 addl $1, %eax
16610 adcl $0, %edx
16611 +
16612 +#ifdef CONFIG_PAX_REFCOUNT
16613 + into
16614 +1234:
16615 + _ASM_EXTABLE(1234b, 2f)
16616 +#endif
16617 +
16618 movl %eax, (v)
16619 movl %edx, 4(v)
16620 movl $1, %eax
16621 @@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
16622 movl 4(v), %edx
16623 subl $1, %eax
16624 sbbl $0, %edx
16625 +
16626 +#ifdef CONFIG_PAX_REFCOUNT
16627 + into
16628 +1234:
16629 + _ASM_EXTABLE(1234b, 1f)
16630 +#endif
16631 +
16632 js 1f
16633 movl %eax, (v)
16634 movl %edx, 4(v)
16635 diff -urNp linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S
16636 --- linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S 2011-05-19 00:06:34.000000000 -0400
16637 +++ linux-2.6.39.4/arch/x86/lib/atomic64_cx8_32.S 2011-08-05 19:44:35.000000000 -0400
16638 @@ -39,6 +39,14 @@ ENTRY(atomic64_read_cx8)
16639 CFI_ENDPROC
16640 ENDPROC(atomic64_read_cx8)
16641
16642 +ENTRY(atomic64_read_unchecked_cx8)
16643 + CFI_STARTPROC
16644 +
16645 + read64 %ecx
16646 + ret
16647 + CFI_ENDPROC
16648 +ENDPROC(atomic64_read_unchecked_cx8)
16649 +
16650 ENTRY(atomic64_set_cx8)
16651 CFI_STARTPROC
16652
16653 @@ -52,6 +60,19 @@ ENTRY(atomic64_set_cx8)
16654 CFI_ENDPROC
16655 ENDPROC(atomic64_set_cx8)
16656
16657 +ENTRY(atomic64_set_unchecked_cx8)
16658 + CFI_STARTPROC
16659 +
16660 +1:
16661 +/* we don't need LOCK_PREFIX since aligned 64-bit writes
16662 + * are atomic on 586 and newer */
16663 + cmpxchg8b (%esi)
16664 + jne 1b
16665 +
16666 + ret
16667 + CFI_ENDPROC
16668 +ENDPROC(atomic64_set_unchecked_cx8)
16669 +
16670 ENTRY(atomic64_xchg_cx8)
16671 CFI_STARTPROC
16672
16673 @@ -66,8 +87,8 @@ ENTRY(atomic64_xchg_cx8)
16674 CFI_ENDPROC
16675 ENDPROC(atomic64_xchg_cx8)
16676
16677 -.macro addsub_return func ins insc
16678 -ENTRY(atomic64_\func\()_return_cx8)
16679 +.macro addsub_return func ins insc unchecked=""
16680 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16681 CFI_STARTPROC
16682 SAVE ebp
16683 SAVE ebx
16684 @@ -84,27 +105,43 @@ ENTRY(atomic64_\func\()_return_cx8)
16685 movl %edx, %ecx
16686 \ins\()l %esi, %ebx
16687 \insc\()l %edi, %ecx
16688 +
16689 +.ifb \unchecked
16690 +#ifdef CONFIG_PAX_REFCOUNT
16691 + into
16692 +2:
16693 + _ASM_EXTABLE(2b, 3f)
16694 +#endif
16695 +.endif
16696 +
16697 LOCK_PREFIX
16698 cmpxchg8b (%ebp)
16699 jne 1b
16700 -
16701 -10:
16702 movl %ebx, %eax
16703 movl %ecx, %edx
16704 +
16705 +.ifb \unchecked
16706 +#ifdef CONFIG_PAX_REFCOUNT
16707 +3:
16708 +#endif
16709 +.endif
16710 +
16711 RESTORE edi
16712 RESTORE esi
16713 RESTORE ebx
16714 RESTORE ebp
16715 ret
16716 CFI_ENDPROC
16717 -ENDPROC(atomic64_\func\()_return_cx8)
16718 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16719 .endm
16720
16721 addsub_return add add adc
16722 addsub_return sub sub sbb
16723 +addsub_return add add adc _unchecked
16724 +addsub_return sub sub sbb _unchecked
16725
16726 -.macro incdec_return func ins insc
16727 -ENTRY(atomic64_\func\()_return_cx8)
16728 +.macro incdec_return func ins insc unchecked
16729 +ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
16730 CFI_STARTPROC
16731 SAVE ebx
16732
16733 @@ -114,21 +151,38 @@ ENTRY(atomic64_\func\()_return_cx8)
16734 movl %edx, %ecx
16735 \ins\()l $1, %ebx
16736 \insc\()l $0, %ecx
16737 +
16738 +.ifb \unchecked
16739 +#ifdef CONFIG_PAX_REFCOUNT
16740 + into
16741 +2:
16742 + _ASM_EXTABLE(2b, 3f)
16743 +#endif
16744 +.endif
16745 +
16746 LOCK_PREFIX
16747 cmpxchg8b (%esi)
16748 jne 1b
16749
16750 -10:
16751 movl %ebx, %eax
16752 movl %ecx, %edx
16753 +
16754 +.ifb \unchecked
16755 +#ifdef CONFIG_PAX_REFCOUNT
16756 +3:
16757 +#endif
16758 +.endif
16759 +
16760 RESTORE ebx
16761 ret
16762 CFI_ENDPROC
16763 -ENDPROC(atomic64_\func\()_return_cx8)
16764 +ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
16765 .endm
16766
16767 incdec_return inc add adc
16768 incdec_return dec sub sbb
16769 +incdec_return inc add adc _unchecked
16770 +incdec_return dec sub sbb _unchecked
16771
16772 ENTRY(atomic64_dec_if_positive_cx8)
16773 CFI_STARTPROC
16774 @@ -140,6 +194,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
16775 movl %edx, %ecx
16776 subl $1, %ebx
16777 sbb $0, %ecx
16778 +
16779 +#ifdef CONFIG_PAX_REFCOUNT
16780 + into
16781 +1234:
16782 + _ASM_EXTABLE(1234b, 2f)
16783 +#endif
16784 +
16785 js 2f
16786 LOCK_PREFIX
16787 cmpxchg8b (%esi)
16788 @@ -174,6 +235,13 @@ ENTRY(atomic64_add_unless_cx8)
16789 movl %edx, %ecx
16790 addl %esi, %ebx
16791 adcl %edi, %ecx
16792 +
16793 +#ifdef CONFIG_PAX_REFCOUNT
16794 + into
16795 +1234:
16796 + _ASM_EXTABLE(1234b, 3f)
16797 +#endif
16798 +
16799 LOCK_PREFIX
16800 cmpxchg8b (%ebp)
16801 jne 1b
16802 @@ -206,6 +274,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
16803 movl %edx, %ecx
16804 addl $1, %ebx
16805 adcl $0, %ecx
16806 +
16807 +#ifdef CONFIG_PAX_REFCOUNT
16808 + into
16809 +1234:
16810 + _ASM_EXTABLE(1234b, 3f)
16811 +#endif
16812 +
16813 LOCK_PREFIX
16814 cmpxchg8b (%esi)
16815 jne 1b
16816 diff -urNp linux-2.6.39.4/arch/x86/lib/checksum_32.S linux-2.6.39.4/arch/x86/lib/checksum_32.S
16817 --- linux-2.6.39.4/arch/x86/lib/checksum_32.S 2011-05-19 00:06:34.000000000 -0400
16818 +++ linux-2.6.39.4/arch/x86/lib/checksum_32.S 2011-08-05 19:44:35.000000000 -0400
16819 @@ -28,7 +28,8 @@
16820 #include <linux/linkage.h>
16821 #include <asm/dwarf2.h>
16822 #include <asm/errno.h>
16823 -
16824 +#include <asm/segment.h>
16825 +
16826 /*
16827 * computes a partial checksum, e.g. for TCP/UDP fragments
16828 */
16829 @@ -296,9 +297,24 @@ unsigned int csum_partial_copy_generic (
16830
16831 #define ARGBASE 16
16832 #define FP 12
16833 -
16834 -ENTRY(csum_partial_copy_generic)
16835 +
16836 +ENTRY(csum_partial_copy_generic_to_user)
16837 CFI_STARTPROC
16838 +
16839 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16840 + pushl_cfi %gs
16841 + popl_cfi %es
16842 + jmp csum_partial_copy_generic
16843 +#endif
16844 +
16845 +ENTRY(csum_partial_copy_generic_from_user)
16846 +
16847 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16848 + pushl_cfi %gs
16849 + popl_cfi %ds
16850 +#endif
16851 +
16852 +ENTRY(csum_partial_copy_generic)
16853 subl $4,%esp
16854 CFI_ADJUST_CFA_OFFSET 4
16855 pushl_cfi %edi
16856 @@ -320,7 +336,7 @@ ENTRY(csum_partial_copy_generic)
16857 jmp 4f
16858 SRC(1: movw (%esi), %bx )
16859 addl $2, %esi
16860 -DST( movw %bx, (%edi) )
16861 +DST( movw %bx, %es:(%edi) )
16862 addl $2, %edi
16863 addw %bx, %ax
16864 adcl $0, %eax
16865 @@ -332,30 +348,30 @@ DST( movw %bx, (%edi) )
16866 SRC(1: movl (%esi), %ebx )
16867 SRC( movl 4(%esi), %edx )
16868 adcl %ebx, %eax
16869 -DST( movl %ebx, (%edi) )
16870 +DST( movl %ebx, %es:(%edi) )
16871 adcl %edx, %eax
16872 -DST( movl %edx, 4(%edi) )
16873 +DST( movl %edx, %es:4(%edi) )
16874
16875 SRC( movl 8(%esi), %ebx )
16876 SRC( movl 12(%esi), %edx )
16877 adcl %ebx, %eax
16878 -DST( movl %ebx, 8(%edi) )
16879 +DST( movl %ebx, %es:8(%edi) )
16880 adcl %edx, %eax
16881 -DST( movl %edx, 12(%edi) )
16882 +DST( movl %edx, %es:12(%edi) )
16883
16884 SRC( movl 16(%esi), %ebx )
16885 SRC( movl 20(%esi), %edx )
16886 adcl %ebx, %eax
16887 -DST( movl %ebx, 16(%edi) )
16888 +DST( movl %ebx, %es:16(%edi) )
16889 adcl %edx, %eax
16890 -DST( movl %edx, 20(%edi) )
16891 +DST( movl %edx, %es:20(%edi) )
16892
16893 SRC( movl 24(%esi), %ebx )
16894 SRC( movl 28(%esi), %edx )
16895 adcl %ebx, %eax
16896 -DST( movl %ebx, 24(%edi) )
16897 +DST( movl %ebx, %es:24(%edi) )
16898 adcl %edx, %eax
16899 -DST( movl %edx, 28(%edi) )
16900 +DST( movl %edx, %es:28(%edi) )
16901
16902 lea 32(%esi), %esi
16903 lea 32(%edi), %edi
16904 @@ -369,7 +385,7 @@ DST( movl %edx, 28(%edi) )
16905 shrl $2, %edx # This clears CF
16906 SRC(3: movl (%esi), %ebx )
16907 adcl %ebx, %eax
16908 -DST( movl %ebx, (%edi) )
16909 +DST( movl %ebx, %es:(%edi) )
16910 lea 4(%esi), %esi
16911 lea 4(%edi), %edi
16912 dec %edx
16913 @@ -381,12 +397,12 @@ DST( movl %ebx, (%edi) )
16914 jb 5f
16915 SRC( movw (%esi), %cx )
16916 leal 2(%esi), %esi
16917 -DST( movw %cx, (%edi) )
16918 +DST( movw %cx, %es:(%edi) )
16919 leal 2(%edi), %edi
16920 je 6f
16921 shll $16,%ecx
16922 SRC(5: movb (%esi), %cl )
16923 -DST( movb %cl, (%edi) )
16924 +DST( movb %cl, %es:(%edi) )
16925 6: addl %ecx, %eax
16926 adcl $0, %eax
16927 7:
16928 @@ -397,7 +413,7 @@ DST( movb %cl, (%edi) )
16929
16930 6001:
16931 movl ARGBASE+20(%esp), %ebx # src_err_ptr
16932 - movl $-EFAULT, (%ebx)
16933 + movl $-EFAULT, %ss:(%ebx)
16934
16935 # zero the complete destination - computing the rest
16936 # is too much work
16937 @@ -410,11 +426,15 @@ DST( movb %cl, (%edi) )
16938
16939 6002:
16940 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
16941 - movl $-EFAULT,(%ebx)
16942 + movl $-EFAULT,%ss:(%ebx)
16943 jmp 5000b
16944
16945 .previous
16946
16947 + pushl_cfi %ss
16948 + popl_cfi %ds
16949 + pushl_cfi %ss
16950 + popl_cfi %es
16951 popl_cfi %ebx
16952 CFI_RESTORE ebx
16953 popl_cfi %esi
16954 @@ -424,26 +444,43 @@ DST( movb %cl, (%edi) )
16955 popl_cfi %ecx # equivalent to addl $4,%esp
16956 ret
16957 CFI_ENDPROC
16958 -ENDPROC(csum_partial_copy_generic)
16959 +ENDPROC(csum_partial_copy_generic_to_user)
16960
16961 #else
16962
16963 /* Version for PentiumII/PPro */
16964
16965 #define ROUND1(x) \
16966 + nop; nop; nop; \
16967 SRC(movl x(%esi), %ebx ) ; \
16968 addl %ebx, %eax ; \
16969 - DST(movl %ebx, x(%edi) ) ;
16970 + DST(movl %ebx, %es:x(%edi)) ;
16971
16972 #define ROUND(x) \
16973 + nop; nop; nop; \
16974 SRC(movl x(%esi), %ebx ) ; \
16975 adcl %ebx, %eax ; \
16976 - DST(movl %ebx, x(%edi) ) ;
16977 + DST(movl %ebx, %es:x(%edi)) ;
16978
16979 #define ARGBASE 12
16980 -
16981 -ENTRY(csum_partial_copy_generic)
16982 +
16983 +ENTRY(csum_partial_copy_generic_to_user)
16984 CFI_STARTPROC
16985 +
16986 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16987 + pushl_cfi %gs
16988 + popl_cfi %es
16989 + jmp csum_partial_copy_generic
16990 +#endif
16991 +
16992 +ENTRY(csum_partial_copy_generic_from_user)
16993 +
16994 +#ifdef CONFIG_PAX_MEMORY_UDEREF
16995 + pushl_cfi %gs
16996 + popl_cfi %ds
16997 +#endif
16998 +
16999 +ENTRY(csum_partial_copy_generic)
17000 pushl_cfi %ebx
17001 CFI_REL_OFFSET ebx, 0
17002 pushl_cfi %edi
17003 @@ -464,7 +501,7 @@ ENTRY(csum_partial_copy_generic)
17004 subl %ebx, %edi
17005 lea -1(%esi),%edx
17006 andl $-32,%edx
17007 - lea 3f(%ebx,%ebx), %ebx
17008 + lea 3f(%ebx,%ebx,2), %ebx
17009 testl %esi, %esi
17010 jmp *%ebx
17011 1: addl $64,%esi
17012 @@ -485,19 +522,19 @@ ENTRY(csum_partial_copy_generic)
17013 jb 5f
17014 SRC( movw (%esi), %dx )
17015 leal 2(%esi), %esi
17016 -DST( movw %dx, (%edi) )
17017 +DST( movw %dx, %es:(%edi) )
17018 leal 2(%edi), %edi
17019 je 6f
17020 shll $16,%edx
17021 5:
17022 SRC( movb (%esi), %dl )
17023 -DST( movb %dl, (%edi) )
17024 +DST( movb %dl, %es:(%edi) )
17025 6: addl %edx, %eax
17026 adcl $0, %eax
17027 7:
17028 .section .fixup, "ax"
17029 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
17030 - movl $-EFAULT, (%ebx)
17031 + movl $-EFAULT, %ss:(%ebx)
17032 # zero the complete destination (computing the rest is too much work)
17033 movl ARGBASE+8(%esp),%edi # dst
17034 movl ARGBASE+12(%esp),%ecx # len
17035 @@ -505,10 +542,17 @@ DST( movb %dl, (%edi) )
17036 rep; stosb
17037 jmp 7b
17038 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
17039 - movl $-EFAULT, (%ebx)
17040 + movl $-EFAULT, %ss:(%ebx)
17041 jmp 7b
17042 .previous
17043
17044 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17045 + pushl_cfi %ss
17046 + popl_cfi %ds
17047 + pushl_cfi %ss
17048 + popl_cfi %es
17049 +#endif
17050 +
17051 popl_cfi %esi
17052 CFI_RESTORE esi
17053 popl_cfi %edi
17054 @@ -517,7 +561,7 @@ DST( movb %dl, (%edi) )
17055 CFI_RESTORE ebx
17056 ret
17057 CFI_ENDPROC
17058 -ENDPROC(csum_partial_copy_generic)
17059 +ENDPROC(csum_partial_copy_generic_to_user)
17060
17061 #undef ROUND
17062 #undef ROUND1
17063 diff -urNp linux-2.6.39.4/arch/x86/lib/clear_page_64.S linux-2.6.39.4/arch/x86/lib/clear_page_64.S
17064 --- linux-2.6.39.4/arch/x86/lib/clear_page_64.S 2011-05-19 00:06:34.000000000 -0400
17065 +++ linux-2.6.39.4/arch/x86/lib/clear_page_64.S 2011-08-05 19:44:35.000000000 -0400
17066 @@ -43,7 +43,7 @@ ENDPROC(clear_page)
17067
17068 #include <asm/cpufeature.h>
17069
17070 - .section .altinstr_replacement,"ax"
17071 + .section .altinstr_replacement,"a"
17072 1: .byte 0xeb /* jmp <disp8> */
17073 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
17074 2:
17075 diff -urNp linux-2.6.39.4/arch/x86/lib/copy_page_64.S linux-2.6.39.4/arch/x86/lib/copy_page_64.S
17076 --- linux-2.6.39.4/arch/x86/lib/copy_page_64.S 2011-05-19 00:06:34.000000000 -0400
17077 +++ linux-2.6.39.4/arch/x86/lib/copy_page_64.S 2011-08-05 19:44:35.000000000 -0400
17078 @@ -104,7 +104,7 @@ ENDPROC(copy_page)
17079
17080 #include <asm/cpufeature.h>
17081
17082 - .section .altinstr_replacement,"ax"
17083 + .section .altinstr_replacement,"a"
17084 1: .byte 0xeb /* jmp <disp8> */
17085 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
17086 2:
17087 diff -urNp linux-2.6.39.4/arch/x86/lib/copy_user_64.S linux-2.6.39.4/arch/x86/lib/copy_user_64.S
17088 --- linux-2.6.39.4/arch/x86/lib/copy_user_64.S 2011-06-03 00:04:13.000000000 -0400
17089 +++ linux-2.6.39.4/arch/x86/lib/copy_user_64.S 2011-08-05 19:44:35.000000000 -0400
17090 @@ -15,13 +15,14 @@
17091 #include <asm/asm-offsets.h>
17092 #include <asm/thread_info.h>
17093 #include <asm/cpufeature.h>
17094 +#include <asm/pgtable.h>
17095
17096 .macro ALTERNATIVE_JUMP feature,orig,alt
17097 0:
17098 .byte 0xe9 /* 32bit jump */
17099 .long \orig-1f /* by default jump to orig */
17100 1:
17101 - .section .altinstr_replacement,"ax"
17102 + .section .altinstr_replacement,"a"
17103 2: .byte 0xe9 /* near jump with 32bit immediate */
17104 .long \alt-1b /* offset */ /* or alternatively to alt */
17105 .previous
17106 @@ -64,37 +65,13 @@
17107 #endif
17108 .endm
17109
17110 -/* Standard copy_to_user with segment limit checking */
17111 -ENTRY(_copy_to_user)
17112 - CFI_STARTPROC
17113 - GET_THREAD_INFO(%rax)
17114 - movq %rdi,%rcx
17115 - addq %rdx,%rcx
17116 - jc bad_to_user
17117 - cmpq TI_addr_limit(%rax),%rcx
17118 - ja bad_to_user
17119 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
17120 - CFI_ENDPROC
17121 -ENDPROC(_copy_to_user)
17122 -
17123 -/* Standard copy_from_user with segment limit checking */
17124 -ENTRY(_copy_from_user)
17125 - CFI_STARTPROC
17126 - GET_THREAD_INFO(%rax)
17127 - movq %rsi,%rcx
17128 - addq %rdx,%rcx
17129 - jc bad_from_user
17130 - cmpq TI_addr_limit(%rax),%rcx
17131 - ja bad_from_user
17132 - ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
17133 - CFI_ENDPROC
17134 -ENDPROC(_copy_from_user)
17135 -
17136 .section .fixup,"ax"
17137 /* must zero dest */
17138 ENTRY(bad_from_user)
17139 bad_from_user:
17140 CFI_STARTPROC
17141 + testl %edx,%edx
17142 + js bad_to_user
17143 movl %edx,%ecx
17144 xorl %eax,%eax
17145 rep
17146 diff -urNp linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S
17147 --- linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S 2011-05-19 00:06:34.000000000 -0400
17148 +++ linux-2.6.39.4/arch/x86/lib/copy_user_nocache_64.S 2011-08-05 19:44:35.000000000 -0400
17149 @@ -14,6 +14,7 @@
17150 #include <asm/current.h>
17151 #include <asm/asm-offsets.h>
17152 #include <asm/thread_info.h>
17153 +#include <asm/pgtable.h>
17154
17155 .macro ALIGN_DESTINATION
17156 #ifdef FIX_ALIGNMENT
17157 @@ -50,6 +51,15 @@
17158 */
17159 ENTRY(__copy_user_nocache)
17160 CFI_STARTPROC
17161 +
17162 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17163 + mov $PAX_USER_SHADOW_BASE,%rcx
17164 + cmp %rcx,%rsi
17165 + jae 1f
17166 + add %rcx,%rsi
17167 +1:
17168 +#endif
17169 +
17170 cmpl $8,%edx
17171 jb 20f /* less then 8 bytes, go to byte copy loop */
17172 ALIGN_DESTINATION
17173 diff -urNp linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c
17174 --- linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c 2011-05-19 00:06:34.000000000 -0400
17175 +++ linux-2.6.39.4/arch/x86/lib/csum-wrappers_64.c 2011-08-05 19:44:35.000000000 -0400
17176 @@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
17177 len -= 2;
17178 }
17179 }
17180 +
17181 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17182 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
17183 + src += PAX_USER_SHADOW_BASE;
17184 +#endif
17185 +
17186 isum = csum_partial_copy_generic((__force const void *)src,
17187 dst, len, isum, errp, NULL);
17188 if (unlikely(*errp))
17189 @@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
17190 }
17191
17192 *errp = 0;
17193 +
17194 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17195 + if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
17196 + dst += PAX_USER_SHADOW_BASE;
17197 +#endif
17198 +
17199 return csum_partial_copy_generic(src, (void __force *)dst,
17200 len, isum, NULL, errp);
17201 }
17202 diff -urNp linux-2.6.39.4/arch/x86/lib/getuser.S linux-2.6.39.4/arch/x86/lib/getuser.S
17203 --- linux-2.6.39.4/arch/x86/lib/getuser.S 2011-05-19 00:06:34.000000000 -0400
17204 +++ linux-2.6.39.4/arch/x86/lib/getuser.S 2011-08-05 19:44:35.000000000 -0400
17205 @@ -33,14 +33,35 @@
17206 #include <asm/asm-offsets.h>
17207 #include <asm/thread_info.h>
17208 #include <asm/asm.h>
17209 +#include <asm/segment.h>
17210 +#include <asm/pgtable.h>
17211 +
17212 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17213 +#define __copyuser_seg gs;
17214 +#else
17215 +#define __copyuser_seg
17216 +#endif
17217
17218 .text
17219 ENTRY(__get_user_1)
17220 CFI_STARTPROC
17221 +
17222 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17223 GET_THREAD_INFO(%_ASM_DX)
17224 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17225 jae bad_get_user
17226 -1: movzb (%_ASM_AX),%edx
17227 +
17228 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17229 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17230 + cmp %_ASM_DX,%_ASM_AX
17231 + jae 1234f
17232 + add %_ASM_DX,%_ASM_AX
17233 +1234:
17234 +#endif
17235 +
17236 +#endif
17237 +
17238 +1: __copyuser_seg movzb (%_ASM_AX),%edx
17239 xor %eax,%eax
17240 ret
17241 CFI_ENDPROC
17242 @@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
17243 ENTRY(__get_user_2)
17244 CFI_STARTPROC
17245 add $1,%_ASM_AX
17246 +
17247 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17248 jc bad_get_user
17249 GET_THREAD_INFO(%_ASM_DX)
17250 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17251 jae bad_get_user
17252 -2: movzwl -1(%_ASM_AX),%edx
17253 +
17254 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17255 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17256 + cmp %_ASM_DX,%_ASM_AX
17257 + jae 1234f
17258 + add %_ASM_DX,%_ASM_AX
17259 +1234:
17260 +#endif
17261 +
17262 +#endif
17263 +
17264 +2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
17265 xor %eax,%eax
17266 ret
17267 CFI_ENDPROC
17268 @@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
17269 ENTRY(__get_user_4)
17270 CFI_STARTPROC
17271 add $3,%_ASM_AX
17272 +
17273 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17274 jc bad_get_user
17275 GET_THREAD_INFO(%_ASM_DX)
17276 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17277 jae bad_get_user
17278 -3: mov -3(%_ASM_AX),%edx
17279 +
17280 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17281 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17282 + cmp %_ASM_DX,%_ASM_AX
17283 + jae 1234f
17284 + add %_ASM_DX,%_ASM_AX
17285 +1234:
17286 +#endif
17287 +
17288 +#endif
17289 +
17290 +3: __copyuser_seg mov -3(%_ASM_AX),%edx
17291 xor %eax,%eax
17292 ret
17293 CFI_ENDPROC
17294 @@ -80,6 +127,15 @@ ENTRY(__get_user_8)
17295 GET_THREAD_INFO(%_ASM_DX)
17296 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
17297 jae bad_get_user
17298 +
17299 +#ifdef CONFIG_PAX_MEMORY_UDEREF
17300 + mov $PAX_USER_SHADOW_BASE,%_ASM_DX
17301 + cmp %_ASM_DX,%_ASM_AX
17302 + jae 1234f
17303 + add %_ASM_DX,%_ASM_AX
17304 +1234:
17305 +#endif
17306 +
17307 4: movq -7(%_ASM_AX),%_ASM_DX
17308 xor %eax,%eax
17309 ret
17310 diff -urNp linux-2.6.39.4/arch/x86/lib/insn.c linux-2.6.39.4/arch/x86/lib/insn.c
17311 --- linux-2.6.39.4/arch/x86/lib/insn.c 2011-05-19 00:06:34.000000000 -0400
17312 +++ linux-2.6.39.4/arch/x86/lib/insn.c 2011-08-05 19:44:35.000000000 -0400
17313 @@ -21,6 +21,11 @@
17314 #include <linux/string.h>
17315 #include <asm/inat.h>
17316 #include <asm/insn.h>
17317 +#ifdef __KERNEL__
17318 +#include <asm/pgtable_types.h>
17319 +#else
17320 +#define ktla_ktva(addr) addr
17321 +#endif
17322
17323 #define get_next(t, insn) \
17324 ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
17325 @@ -40,8 +45,8 @@
17326 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
17327 {
17328 memset(insn, 0, sizeof(*insn));
17329 - insn->kaddr = kaddr;
17330 - insn->next_byte = kaddr;
17331 + insn->kaddr = ktla_ktva(kaddr);
17332 + insn->next_byte = ktla_ktva(kaddr);
17333 insn->x86_64 = x86_64 ? 1 : 0;
17334 insn->opnd_bytes = 4;
17335 if (x86_64)
17336 diff -urNp linux-2.6.39.4/arch/x86/lib/mmx_32.c linux-2.6.39.4/arch/x86/lib/mmx_32.c
17337 --- linux-2.6.39.4/arch/x86/lib/mmx_32.c 2011-05-19 00:06:34.000000000 -0400
17338 +++ linux-2.6.39.4/arch/x86/lib/mmx_32.c 2011-08-05 19:44:35.000000000 -0400
17339 @@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
17340 {
17341 void *p;
17342 int i;
17343 + unsigned long cr0;
17344
17345 if (unlikely(in_interrupt()))
17346 return __memcpy(to, from, len);
17347 @@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
17348 kernel_fpu_begin();
17349
17350 __asm__ __volatile__ (
17351 - "1: prefetch (%0)\n" /* This set is 28 bytes */
17352 - " prefetch 64(%0)\n"
17353 - " prefetch 128(%0)\n"
17354 - " prefetch 192(%0)\n"
17355 - " prefetch 256(%0)\n"
17356 + "1: prefetch (%1)\n" /* This set is 28 bytes */
17357 + " prefetch 64(%1)\n"
17358 + " prefetch 128(%1)\n"
17359 + " prefetch 192(%1)\n"
17360 + " prefetch 256(%1)\n"
17361 "2: \n"
17362 ".section .fixup, \"ax\"\n"
17363 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17364 + "3: \n"
17365 +
17366 +#ifdef CONFIG_PAX_KERNEXEC
17367 + " movl %%cr0, %0\n"
17368 + " movl %0, %%eax\n"
17369 + " andl $0xFFFEFFFF, %%eax\n"
17370 + " movl %%eax, %%cr0\n"
17371 +#endif
17372 +
17373 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17374 +
17375 +#ifdef CONFIG_PAX_KERNEXEC
17376 + " movl %0, %%cr0\n"
17377 +#endif
17378 +
17379 " jmp 2b\n"
17380 ".previous\n"
17381 _ASM_EXTABLE(1b, 3b)
17382 - : : "r" (from));
17383 + : "=&r" (cr0) : "r" (from) : "ax");
17384
17385 for ( ; i > 5; i--) {
17386 __asm__ __volatile__ (
17387 - "1: prefetch 320(%0)\n"
17388 - "2: movq (%0), %%mm0\n"
17389 - " movq 8(%0), %%mm1\n"
17390 - " movq 16(%0), %%mm2\n"
17391 - " movq 24(%0), %%mm3\n"
17392 - " movq %%mm0, (%1)\n"
17393 - " movq %%mm1, 8(%1)\n"
17394 - " movq %%mm2, 16(%1)\n"
17395 - " movq %%mm3, 24(%1)\n"
17396 - " movq 32(%0), %%mm0\n"
17397 - " movq 40(%0), %%mm1\n"
17398 - " movq 48(%0), %%mm2\n"
17399 - " movq 56(%0), %%mm3\n"
17400 - " movq %%mm0, 32(%1)\n"
17401 - " movq %%mm1, 40(%1)\n"
17402 - " movq %%mm2, 48(%1)\n"
17403 - " movq %%mm3, 56(%1)\n"
17404 + "1: prefetch 320(%1)\n"
17405 + "2: movq (%1), %%mm0\n"
17406 + " movq 8(%1), %%mm1\n"
17407 + " movq 16(%1), %%mm2\n"
17408 + " movq 24(%1), %%mm3\n"
17409 + " movq %%mm0, (%2)\n"
17410 + " movq %%mm1, 8(%2)\n"
17411 + " movq %%mm2, 16(%2)\n"
17412 + " movq %%mm3, 24(%2)\n"
17413 + " movq 32(%1), %%mm0\n"
17414 + " movq 40(%1), %%mm1\n"
17415 + " movq 48(%1), %%mm2\n"
17416 + " movq 56(%1), %%mm3\n"
17417 + " movq %%mm0, 32(%2)\n"
17418 + " movq %%mm1, 40(%2)\n"
17419 + " movq %%mm2, 48(%2)\n"
17420 + " movq %%mm3, 56(%2)\n"
17421 ".section .fixup, \"ax\"\n"
17422 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17423 + "3:\n"
17424 +
17425 +#ifdef CONFIG_PAX_KERNEXEC
17426 + " movl %%cr0, %0\n"
17427 + " movl %0, %%eax\n"
17428 + " andl $0xFFFEFFFF, %%eax\n"
17429 + " movl %%eax, %%cr0\n"
17430 +#endif
17431 +
17432 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17433 +
17434 +#ifdef CONFIG_PAX_KERNEXEC
17435 + " movl %0, %%cr0\n"
17436 +#endif
17437 +
17438 " jmp 2b\n"
17439 ".previous\n"
17440 _ASM_EXTABLE(1b, 3b)
17441 - : : "r" (from), "r" (to) : "memory");
17442 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17443
17444 from += 64;
17445 to += 64;
17446 @@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
17447 static void fast_copy_page(void *to, void *from)
17448 {
17449 int i;
17450 + unsigned long cr0;
17451
17452 kernel_fpu_begin();
17453
17454 @@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
17455 * but that is for later. -AV
17456 */
17457 __asm__ __volatile__(
17458 - "1: prefetch (%0)\n"
17459 - " prefetch 64(%0)\n"
17460 - " prefetch 128(%0)\n"
17461 - " prefetch 192(%0)\n"
17462 - " prefetch 256(%0)\n"
17463 + "1: prefetch (%1)\n"
17464 + " prefetch 64(%1)\n"
17465 + " prefetch 128(%1)\n"
17466 + " prefetch 192(%1)\n"
17467 + " prefetch 256(%1)\n"
17468 "2: \n"
17469 ".section .fixup, \"ax\"\n"
17470 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17471 + "3: \n"
17472 +
17473 +#ifdef CONFIG_PAX_KERNEXEC
17474 + " movl %%cr0, %0\n"
17475 + " movl %0, %%eax\n"
17476 + " andl $0xFFFEFFFF, %%eax\n"
17477 + " movl %%eax, %%cr0\n"
17478 +#endif
17479 +
17480 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17481 +
17482 +#ifdef CONFIG_PAX_KERNEXEC
17483 + " movl %0, %%cr0\n"
17484 +#endif
17485 +
17486 " jmp 2b\n"
17487 ".previous\n"
17488 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
17489 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17490
17491 for (i = 0; i < (4096-320)/64; i++) {
17492 __asm__ __volatile__ (
17493 - "1: prefetch 320(%0)\n"
17494 - "2: movq (%0), %%mm0\n"
17495 - " movntq %%mm0, (%1)\n"
17496 - " movq 8(%0), %%mm1\n"
17497 - " movntq %%mm1, 8(%1)\n"
17498 - " movq 16(%0), %%mm2\n"
17499 - " movntq %%mm2, 16(%1)\n"
17500 - " movq 24(%0), %%mm3\n"
17501 - " movntq %%mm3, 24(%1)\n"
17502 - " movq 32(%0), %%mm4\n"
17503 - " movntq %%mm4, 32(%1)\n"
17504 - " movq 40(%0), %%mm5\n"
17505 - " movntq %%mm5, 40(%1)\n"
17506 - " movq 48(%0), %%mm6\n"
17507 - " movntq %%mm6, 48(%1)\n"
17508 - " movq 56(%0), %%mm7\n"
17509 - " movntq %%mm7, 56(%1)\n"
17510 + "1: prefetch 320(%1)\n"
17511 + "2: movq (%1), %%mm0\n"
17512 + " movntq %%mm0, (%2)\n"
17513 + " movq 8(%1), %%mm1\n"
17514 + " movntq %%mm1, 8(%2)\n"
17515 + " movq 16(%1), %%mm2\n"
17516 + " movntq %%mm2, 16(%2)\n"
17517 + " movq 24(%1), %%mm3\n"
17518 + " movntq %%mm3, 24(%2)\n"
17519 + " movq 32(%1), %%mm4\n"
17520 + " movntq %%mm4, 32(%2)\n"
17521 + " movq 40(%1), %%mm5\n"
17522 + " movntq %%mm5, 40(%2)\n"
17523 + " movq 48(%1), %%mm6\n"
17524 + " movntq %%mm6, 48(%2)\n"
17525 + " movq 56(%1), %%mm7\n"
17526 + " movntq %%mm7, 56(%2)\n"
17527 ".section .fixup, \"ax\"\n"
17528 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17529 + "3:\n"
17530 +
17531 +#ifdef CONFIG_PAX_KERNEXEC
17532 + " movl %%cr0, %0\n"
17533 + " movl %0, %%eax\n"
17534 + " andl $0xFFFEFFFF, %%eax\n"
17535 + " movl %%eax, %%cr0\n"
17536 +#endif
17537 +
17538 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17539 +
17540 +#ifdef CONFIG_PAX_KERNEXEC
17541 + " movl %0, %%cr0\n"
17542 +#endif
17543 +
17544 " jmp 2b\n"
17545 ".previous\n"
17546 - _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
17547 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17548
17549 from += 64;
17550 to += 64;
17551 @@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
17552 static void fast_copy_page(void *to, void *from)
17553 {
17554 int i;
17555 + unsigned long cr0;
17556
17557 kernel_fpu_begin();
17558
17559 __asm__ __volatile__ (
17560 - "1: prefetch (%0)\n"
17561 - " prefetch 64(%0)\n"
17562 - " prefetch 128(%0)\n"
17563 - " prefetch 192(%0)\n"
17564 - " prefetch 256(%0)\n"
17565 + "1: prefetch (%1)\n"
17566 + " prefetch 64(%1)\n"
17567 + " prefetch 128(%1)\n"
17568 + " prefetch 192(%1)\n"
17569 + " prefetch 256(%1)\n"
17570 "2: \n"
17571 ".section .fixup, \"ax\"\n"
17572 - "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17573 + "3: \n"
17574 +
17575 +#ifdef CONFIG_PAX_KERNEXEC
17576 + " movl %%cr0, %0\n"
17577 + " movl %0, %%eax\n"
17578 + " andl $0xFFFEFFFF, %%eax\n"
17579 + " movl %%eax, %%cr0\n"
17580 +#endif
17581 +
17582 + " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
17583 +
17584 +#ifdef CONFIG_PAX_KERNEXEC
17585 + " movl %0, %%cr0\n"
17586 +#endif
17587 +
17588 " jmp 2b\n"
17589 ".previous\n"
17590 - _ASM_EXTABLE(1b, 3b) : : "r" (from));
17591 + _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
17592
17593 for (i = 0; i < 4096/64; i++) {
17594 __asm__ __volatile__ (
17595 - "1: prefetch 320(%0)\n"
17596 - "2: movq (%0), %%mm0\n"
17597 - " movq 8(%0), %%mm1\n"
17598 - " movq 16(%0), %%mm2\n"
17599 - " movq 24(%0), %%mm3\n"
17600 - " movq %%mm0, (%1)\n"
17601 - " movq %%mm1, 8(%1)\n"
17602 - " movq %%mm2, 16(%1)\n"
17603 - " movq %%mm3, 24(%1)\n"
17604 - " movq 32(%0), %%mm0\n"
17605 - " movq 40(%0), %%mm1\n"
17606 - " movq 48(%0), %%mm2\n"
17607 - " movq 56(%0), %%mm3\n"
17608 - " movq %%mm0, 32(%1)\n"
17609 - " movq %%mm1, 40(%1)\n"
17610 - " movq %%mm2, 48(%1)\n"
17611 - " movq %%mm3, 56(%1)\n"
17612 + "1: prefetch 320(%1)\n"
17613 + "2: movq (%1), %%mm0\n"
17614 + " movq 8(%1), %%mm1\n"
17615 + " movq 16(%1), %%mm2\n"
17616 + " movq 24(%1), %%mm3\n"
17617 + " movq %%mm0, (%2)\n"
17618 + " movq %%mm1, 8(%2)\n"
17619 + " movq %%mm2, 16(%2)\n"
17620 + " movq %%mm3, 24(%2)\n"
17621 + " movq 32(%1), %%mm0\n"
17622 + " movq 40(%1), %%mm1\n"
17623 + " movq 48(%1), %%mm2\n"
17624 + " movq 56(%1), %%mm3\n"
17625 + " movq %%mm0, 32(%2)\n"
17626 + " movq %%mm1, 40(%2)\n"
17627 + " movq %%mm2, 48(%2)\n"
17628 + " movq %%mm3, 56(%2)\n"
17629 ".section .fixup, \"ax\"\n"
17630 - "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17631 + "3:\n"
17632 +
17633 +#ifdef CONFIG_PAX_KERNEXEC
17634 + " movl %%cr0, %0\n"
17635 + " movl %0, %%eax\n"
17636 + " andl $0xFFFEFFFF, %%eax\n"
17637 + " movl %%eax, %%cr0\n"
17638 +#endif
17639 +
17640 + " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
17641 +
17642 +#ifdef CONFIG_PAX_KERNEXEC
17643 + " movl %0, %%cr0\n"
17644 +#endif
17645 +
17646 " jmp 2b\n"
17647 ".previous\n"
17648 _ASM_EXTABLE(1b, 3b)
17649 - : : "r" (from), "r" (to) : "memory");
17650 + : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
17651
17652 from += 64;
17653 to += 64;
17654 diff -urNp linux-2.6.39.4/arch/x86/lib/putuser.S linux-2.6.39.4/arch/x86/lib/putuser.S
17655 --- linux-2.6.39.4/arch/x86/lib/putuser.S 2011-05-19 00:06:34.000000000 -0400
17656 +++ linux-2.6.39.4/arch/x86/lib/putuser.S 2011-08-05 19:44:35.000000000 -0400
17657 @@ -15,7 +15,8 @@
17658 #include <asm/thread_info.h>
17659 #include <asm/errno.h>
17660 #include <asm/asm.h>
17661 -
17662 +#include <asm/segment.h>
17663 +#include <asm/pgtable.h>
17664
17665 /*
17666 * __put_user_X
17667 @@ -29,52 +30,119 @@
17668 * as they get called from within inline assembly.
17669 */
17670
17671 -#define ENTER CFI_STARTPROC ; \
17672 - GET_THREAD_INFO(%_ASM_BX)
17673 +#define ENTER CFI_STARTPROC
17674 #define EXIT ret ; \
17675 CFI_ENDPROC
17676
17677 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17678 +#define _DEST %_ASM_CX,%_ASM_BX
17679 +#else
17680 +#define _DEST %_ASM_CX
17681 +#endif
17682 +
17683 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
17684 +#define __copyuser_seg gs;
17685 +#else
17686 +#define __copyuser_seg
17687 +#endif
17688 +
17689 .text
17690 ENTRY(__put_user_1)
17691 ENTER
17692 +
17693 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17694 + GET_THREAD_INFO(%_ASM_BX)
17695 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
17696 jae bad_put_user
17697 -1: movb %al,(%_ASM_CX)
17698 +
17699 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17700 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17701 + cmp %_ASM_BX,%_ASM_CX
17702 + jb 1234f
17703 + xor %ebx,%ebx
17704 +1234:
17705 +#endif
17706 +
17707 +#endif
17708 +
17709 +1: __copyuser_seg movb %al,(_DEST)
17710 xor %eax,%eax
17711 EXIT
17712 ENDPROC(__put_user_1)
17713
17714 ENTRY(__put_user_2)
17715 ENTER
17716 +
17717 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17718 + GET_THREAD_INFO(%_ASM_BX)
17719 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17720 sub $1,%_ASM_BX
17721 cmp %_ASM_BX,%_ASM_CX
17722 jae bad_put_user
17723 -2: movw %ax,(%_ASM_CX)
17724 +
17725 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17726 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17727 + cmp %_ASM_BX,%_ASM_CX
17728 + jb 1234f
17729 + xor %ebx,%ebx
17730 +1234:
17731 +#endif
17732 +
17733 +#endif
17734 +
17735 +2: __copyuser_seg movw %ax,(_DEST)
17736 xor %eax,%eax
17737 EXIT
17738 ENDPROC(__put_user_2)
17739
17740 ENTRY(__put_user_4)
17741 ENTER
17742 +
17743 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17744 + GET_THREAD_INFO(%_ASM_BX)
17745 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17746 sub $3,%_ASM_BX
17747 cmp %_ASM_BX,%_ASM_CX
17748 jae bad_put_user
17749 -3: movl %eax,(%_ASM_CX)
17750 +
17751 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17752 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17753 + cmp %_ASM_BX,%_ASM_CX
17754 + jb 1234f
17755 + xor %ebx,%ebx
17756 +1234:
17757 +#endif
17758 +
17759 +#endif
17760 +
17761 +3: __copyuser_seg movl %eax,(_DEST)
17762 xor %eax,%eax
17763 EXIT
17764 ENDPROC(__put_user_4)
17765
17766 ENTRY(__put_user_8)
17767 ENTER
17768 +
17769 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
17770 + GET_THREAD_INFO(%_ASM_BX)
17771 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
17772 sub $7,%_ASM_BX
17773 cmp %_ASM_BX,%_ASM_CX
17774 jae bad_put_user
17775 -4: mov %_ASM_AX,(%_ASM_CX)
17776 +
17777 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
17778 + mov $PAX_USER_SHADOW_BASE,%_ASM_BX
17779 + cmp %_ASM_BX,%_ASM_CX
17780 + jb 1234f
17781 + xor %ebx,%ebx
17782 +1234:
17783 +#endif
17784 +
17785 +#endif
17786 +
17787 +4: __copyuser_seg mov %_ASM_AX,(_DEST)
17788 #ifdef CONFIG_X86_32
17789 -5: movl %edx,4(%_ASM_CX)
17790 +5: __copyuser_seg movl %edx,4(_DEST)
17791 #endif
17792 xor %eax,%eax
17793 EXIT
17794 diff -urNp linux-2.6.39.4/arch/x86/lib/usercopy_32.c linux-2.6.39.4/arch/x86/lib/usercopy_32.c
17795 --- linux-2.6.39.4/arch/x86/lib/usercopy_32.c 2011-05-19 00:06:34.000000000 -0400
17796 +++ linux-2.6.39.4/arch/x86/lib/usercopy_32.c 2011-08-05 19:44:35.000000000 -0400
17797 @@ -43,7 +43,7 @@ do { \
17798 __asm__ __volatile__( \
17799 " testl %1,%1\n" \
17800 " jz 2f\n" \
17801 - "0: lodsb\n" \
17802 + "0: "__copyuser_seg"lodsb\n" \
17803 " stosb\n" \
17804 " testb %%al,%%al\n" \
17805 " jz 1f\n" \
17806 @@ -128,10 +128,12 @@ do { \
17807 int __d0; \
17808 might_fault(); \
17809 __asm__ __volatile__( \
17810 + __COPYUSER_SET_ES \
17811 "0: rep; stosl\n" \
17812 " movl %2,%0\n" \
17813 "1: rep; stosb\n" \
17814 "2:\n" \
17815 + __COPYUSER_RESTORE_ES \
17816 ".section .fixup,\"ax\"\n" \
17817 "3: lea 0(%2,%0,4),%0\n" \
17818 " jmp 2b\n" \
17819 @@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
17820 might_fault();
17821
17822 __asm__ __volatile__(
17823 + __COPYUSER_SET_ES
17824 " testl %0, %0\n"
17825 " jz 3f\n"
17826 " andl %0,%%ecx\n"
17827 @@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
17828 " subl %%ecx,%0\n"
17829 " addl %0,%%eax\n"
17830 "1:\n"
17831 + __COPYUSER_RESTORE_ES
17832 ".section .fixup,\"ax\"\n"
17833 "2: xorl %%eax,%%eax\n"
17834 " jmp 1b\n"
17835 @@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
17836
17837 #ifdef CONFIG_X86_INTEL_USERCOPY
17838 static unsigned long
17839 -__copy_user_intel(void __user *to, const void *from, unsigned long size)
17840 +__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
17841 {
17842 int d0, d1;
17843 __asm__ __volatile__(
17844 @@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
17845 " .align 2,0x90\n"
17846 "3: movl 0(%4), %%eax\n"
17847 "4: movl 4(%4), %%edx\n"
17848 - "5: movl %%eax, 0(%3)\n"
17849 - "6: movl %%edx, 4(%3)\n"
17850 + "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
17851 + "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
17852 "7: movl 8(%4), %%eax\n"
17853 "8: movl 12(%4),%%edx\n"
17854 - "9: movl %%eax, 8(%3)\n"
17855 - "10: movl %%edx, 12(%3)\n"
17856 + "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
17857 + "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
17858 "11: movl 16(%4), %%eax\n"
17859 "12: movl 20(%4), %%edx\n"
17860 - "13: movl %%eax, 16(%3)\n"
17861 - "14: movl %%edx, 20(%3)\n"
17862 + "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
17863 + "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
17864 "15: movl 24(%4), %%eax\n"
17865 "16: movl 28(%4), %%edx\n"
17866 - "17: movl %%eax, 24(%3)\n"
17867 - "18: movl %%edx, 28(%3)\n"
17868 + "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
17869 + "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
17870 "19: movl 32(%4), %%eax\n"
17871 "20: movl 36(%4), %%edx\n"
17872 - "21: movl %%eax, 32(%3)\n"
17873 - "22: movl %%edx, 36(%3)\n"
17874 + "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
17875 + "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
17876 "23: movl 40(%4), %%eax\n"
17877 "24: movl 44(%4), %%edx\n"
17878 - "25: movl %%eax, 40(%3)\n"
17879 - "26: movl %%edx, 44(%3)\n"
17880 + "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
17881 + "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
17882 "27: movl 48(%4), %%eax\n"
17883 "28: movl 52(%4), %%edx\n"
17884 - "29: movl %%eax, 48(%3)\n"
17885 - "30: movl %%edx, 52(%3)\n"
17886 + "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
17887 + "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
17888 "31: movl 56(%4), %%eax\n"
17889 "32: movl 60(%4), %%edx\n"
17890 - "33: movl %%eax, 56(%3)\n"
17891 - "34: movl %%edx, 60(%3)\n"
17892 + "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
17893 + "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
17894 " addl $-64, %0\n"
17895 " addl $64, %4\n"
17896 " addl $64, %3\n"
17897 @@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
17898 " shrl $2, %0\n"
17899 " andl $3, %%eax\n"
17900 " cld\n"
17901 + __COPYUSER_SET_ES
17902 "99: rep; movsl\n"
17903 "36: movl %%eax, %0\n"
17904 "37: rep; movsb\n"
17905 "100:\n"
17906 + __COPYUSER_RESTORE_ES
17907 + ".section .fixup,\"ax\"\n"
17908 + "101: lea 0(%%eax,%0,4),%0\n"
17909 + " jmp 100b\n"
17910 + ".previous\n"
17911 + ".section __ex_table,\"a\"\n"
17912 + " .align 4\n"
17913 + " .long 1b,100b\n"
17914 + " .long 2b,100b\n"
17915 + " .long 3b,100b\n"
17916 + " .long 4b,100b\n"
17917 + " .long 5b,100b\n"
17918 + " .long 6b,100b\n"
17919 + " .long 7b,100b\n"
17920 + " .long 8b,100b\n"
17921 + " .long 9b,100b\n"
17922 + " .long 10b,100b\n"
17923 + " .long 11b,100b\n"
17924 + " .long 12b,100b\n"
17925 + " .long 13b,100b\n"
17926 + " .long 14b,100b\n"
17927 + " .long 15b,100b\n"
17928 + " .long 16b,100b\n"
17929 + " .long 17b,100b\n"
17930 + " .long 18b,100b\n"
17931 + " .long 19b,100b\n"
17932 + " .long 20b,100b\n"
17933 + " .long 21b,100b\n"
17934 + " .long 22b,100b\n"
17935 + " .long 23b,100b\n"
17936 + " .long 24b,100b\n"
17937 + " .long 25b,100b\n"
17938 + " .long 26b,100b\n"
17939 + " .long 27b,100b\n"
17940 + " .long 28b,100b\n"
17941 + " .long 29b,100b\n"
17942 + " .long 30b,100b\n"
17943 + " .long 31b,100b\n"
17944 + " .long 32b,100b\n"
17945 + " .long 33b,100b\n"
17946 + " .long 34b,100b\n"
17947 + " .long 35b,100b\n"
17948 + " .long 36b,100b\n"
17949 + " .long 37b,100b\n"
17950 + " .long 99b,101b\n"
17951 + ".previous"
17952 + : "=&c"(size), "=&D" (d0), "=&S" (d1)
17953 + : "1"(to), "2"(from), "0"(size)
17954 + : "eax", "edx", "memory");
17955 + return size;
17956 +}
17957 +
17958 +static unsigned long
17959 +__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
17960 +{
17961 + int d0, d1;
17962 + __asm__ __volatile__(
17963 + " .align 2,0x90\n"
17964 + "1: "__copyuser_seg" movl 32(%4), %%eax\n"
17965 + " cmpl $67, %0\n"
17966 + " jbe 3f\n"
17967 + "2: "__copyuser_seg" movl 64(%4), %%eax\n"
17968 + " .align 2,0x90\n"
17969 + "3: "__copyuser_seg" movl 0(%4), %%eax\n"
17970 + "4: "__copyuser_seg" movl 4(%4), %%edx\n"
17971 + "5: movl %%eax, 0(%3)\n"
17972 + "6: movl %%edx, 4(%3)\n"
17973 + "7: "__copyuser_seg" movl 8(%4), %%eax\n"
17974 + "8: "__copyuser_seg" movl 12(%4),%%edx\n"
17975 + "9: movl %%eax, 8(%3)\n"
17976 + "10: movl %%edx, 12(%3)\n"
17977 + "11: "__copyuser_seg" movl 16(%4), %%eax\n"
17978 + "12: "__copyuser_seg" movl 20(%4), %%edx\n"
17979 + "13: movl %%eax, 16(%3)\n"
17980 + "14: movl %%edx, 20(%3)\n"
17981 + "15: "__copyuser_seg" movl 24(%4), %%eax\n"
17982 + "16: "__copyuser_seg" movl 28(%4), %%edx\n"
17983 + "17: movl %%eax, 24(%3)\n"
17984 + "18: movl %%edx, 28(%3)\n"
17985 + "19: "__copyuser_seg" movl 32(%4), %%eax\n"
17986 + "20: "__copyuser_seg" movl 36(%4), %%edx\n"
17987 + "21: movl %%eax, 32(%3)\n"
17988 + "22: movl %%edx, 36(%3)\n"
17989 + "23: "__copyuser_seg" movl 40(%4), %%eax\n"
17990 + "24: "__copyuser_seg" movl 44(%4), %%edx\n"
17991 + "25: movl %%eax, 40(%3)\n"
17992 + "26: movl %%edx, 44(%3)\n"
17993 + "27: "__copyuser_seg" movl 48(%4), %%eax\n"
17994 + "28: "__copyuser_seg" movl 52(%4), %%edx\n"
17995 + "29: movl %%eax, 48(%3)\n"
17996 + "30: movl %%edx, 52(%3)\n"
17997 + "31: "__copyuser_seg" movl 56(%4), %%eax\n"
17998 + "32: "__copyuser_seg" movl 60(%4), %%edx\n"
17999 + "33: movl %%eax, 56(%3)\n"
18000 + "34: movl %%edx, 60(%3)\n"
18001 + " addl $-64, %0\n"
18002 + " addl $64, %4\n"
18003 + " addl $64, %3\n"
18004 + " cmpl $63, %0\n"
18005 + " ja 1b\n"
18006 + "35: movl %0, %%eax\n"
18007 + " shrl $2, %0\n"
18008 + " andl $3, %%eax\n"
18009 + " cld\n"
18010 + "99: rep; "__copyuser_seg" movsl\n"
18011 + "36: movl %%eax, %0\n"
18012 + "37: rep; "__copyuser_seg" movsb\n"
18013 + "100:\n"
18014 ".section .fixup,\"ax\"\n"
18015 "101: lea 0(%%eax,%0,4),%0\n"
18016 " jmp 100b\n"
18017 @@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
18018 int d0, d1;
18019 __asm__ __volatile__(
18020 " .align 2,0x90\n"
18021 - "0: movl 32(%4), %%eax\n"
18022 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18023 " cmpl $67, %0\n"
18024 " jbe 2f\n"
18025 - "1: movl 64(%4), %%eax\n"
18026 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18027 " .align 2,0x90\n"
18028 - "2: movl 0(%4), %%eax\n"
18029 - "21: movl 4(%4), %%edx\n"
18030 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18031 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18032 " movl %%eax, 0(%3)\n"
18033 " movl %%edx, 4(%3)\n"
18034 - "3: movl 8(%4), %%eax\n"
18035 - "31: movl 12(%4),%%edx\n"
18036 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18037 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18038 " movl %%eax, 8(%3)\n"
18039 " movl %%edx, 12(%3)\n"
18040 - "4: movl 16(%4), %%eax\n"
18041 - "41: movl 20(%4), %%edx\n"
18042 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18043 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18044 " movl %%eax, 16(%3)\n"
18045 " movl %%edx, 20(%3)\n"
18046 - "10: movl 24(%4), %%eax\n"
18047 - "51: movl 28(%4), %%edx\n"
18048 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18049 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18050 " movl %%eax, 24(%3)\n"
18051 " movl %%edx, 28(%3)\n"
18052 - "11: movl 32(%4), %%eax\n"
18053 - "61: movl 36(%4), %%edx\n"
18054 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18055 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18056 " movl %%eax, 32(%3)\n"
18057 " movl %%edx, 36(%3)\n"
18058 - "12: movl 40(%4), %%eax\n"
18059 - "71: movl 44(%4), %%edx\n"
18060 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18061 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18062 " movl %%eax, 40(%3)\n"
18063 " movl %%edx, 44(%3)\n"
18064 - "13: movl 48(%4), %%eax\n"
18065 - "81: movl 52(%4), %%edx\n"
18066 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18067 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18068 " movl %%eax, 48(%3)\n"
18069 " movl %%edx, 52(%3)\n"
18070 - "14: movl 56(%4), %%eax\n"
18071 - "91: movl 60(%4), %%edx\n"
18072 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18073 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18074 " movl %%eax, 56(%3)\n"
18075 " movl %%edx, 60(%3)\n"
18076 " addl $-64, %0\n"
18077 @@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
18078 " shrl $2, %0\n"
18079 " andl $3, %%eax\n"
18080 " cld\n"
18081 - "6: rep; movsl\n"
18082 + "6: rep; "__copyuser_seg" movsl\n"
18083 " movl %%eax,%0\n"
18084 - "7: rep; movsb\n"
18085 + "7: rep; "__copyuser_seg" movsb\n"
18086 "8:\n"
18087 ".section .fixup,\"ax\"\n"
18088 "9: lea 0(%%eax,%0,4),%0\n"
18089 @@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
18090
18091 __asm__ __volatile__(
18092 " .align 2,0x90\n"
18093 - "0: movl 32(%4), %%eax\n"
18094 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18095 " cmpl $67, %0\n"
18096 " jbe 2f\n"
18097 - "1: movl 64(%4), %%eax\n"
18098 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18099 " .align 2,0x90\n"
18100 - "2: movl 0(%4), %%eax\n"
18101 - "21: movl 4(%4), %%edx\n"
18102 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18103 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18104 " movnti %%eax, 0(%3)\n"
18105 " movnti %%edx, 4(%3)\n"
18106 - "3: movl 8(%4), %%eax\n"
18107 - "31: movl 12(%4),%%edx\n"
18108 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18109 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18110 " movnti %%eax, 8(%3)\n"
18111 " movnti %%edx, 12(%3)\n"
18112 - "4: movl 16(%4), %%eax\n"
18113 - "41: movl 20(%4), %%edx\n"
18114 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18115 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18116 " movnti %%eax, 16(%3)\n"
18117 " movnti %%edx, 20(%3)\n"
18118 - "10: movl 24(%4), %%eax\n"
18119 - "51: movl 28(%4), %%edx\n"
18120 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18121 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18122 " movnti %%eax, 24(%3)\n"
18123 " movnti %%edx, 28(%3)\n"
18124 - "11: movl 32(%4), %%eax\n"
18125 - "61: movl 36(%4), %%edx\n"
18126 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18127 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18128 " movnti %%eax, 32(%3)\n"
18129 " movnti %%edx, 36(%3)\n"
18130 - "12: movl 40(%4), %%eax\n"
18131 - "71: movl 44(%4), %%edx\n"
18132 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18133 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18134 " movnti %%eax, 40(%3)\n"
18135 " movnti %%edx, 44(%3)\n"
18136 - "13: movl 48(%4), %%eax\n"
18137 - "81: movl 52(%4), %%edx\n"
18138 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18139 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18140 " movnti %%eax, 48(%3)\n"
18141 " movnti %%edx, 52(%3)\n"
18142 - "14: movl 56(%4), %%eax\n"
18143 - "91: movl 60(%4), %%edx\n"
18144 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18145 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18146 " movnti %%eax, 56(%3)\n"
18147 " movnti %%edx, 60(%3)\n"
18148 " addl $-64, %0\n"
18149 @@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
18150 " shrl $2, %0\n"
18151 " andl $3, %%eax\n"
18152 " cld\n"
18153 - "6: rep; movsl\n"
18154 + "6: rep; "__copyuser_seg" movsl\n"
18155 " movl %%eax,%0\n"
18156 - "7: rep; movsb\n"
18157 + "7: rep; "__copyuser_seg" movsb\n"
18158 "8:\n"
18159 ".section .fixup,\"ax\"\n"
18160 "9: lea 0(%%eax,%0,4),%0\n"
18161 @@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
18162
18163 __asm__ __volatile__(
18164 " .align 2,0x90\n"
18165 - "0: movl 32(%4), %%eax\n"
18166 + "0: "__copyuser_seg" movl 32(%4), %%eax\n"
18167 " cmpl $67, %0\n"
18168 " jbe 2f\n"
18169 - "1: movl 64(%4), %%eax\n"
18170 + "1: "__copyuser_seg" movl 64(%4), %%eax\n"
18171 " .align 2,0x90\n"
18172 - "2: movl 0(%4), %%eax\n"
18173 - "21: movl 4(%4), %%edx\n"
18174 + "2: "__copyuser_seg" movl 0(%4), %%eax\n"
18175 + "21: "__copyuser_seg" movl 4(%4), %%edx\n"
18176 " movnti %%eax, 0(%3)\n"
18177 " movnti %%edx, 4(%3)\n"
18178 - "3: movl 8(%4), %%eax\n"
18179 - "31: movl 12(%4),%%edx\n"
18180 + "3: "__copyuser_seg" movl 8(%4), %%eax\n"
18181 + "31: "__copyuser_seg" movl 12(%4),%%edx\n"
18182 " movnti %%eax, 8(%3)\n"
18183 " movnti %%edx, 12(%3)\n"
18184 - "4: movl 16(%4), %%eax\n"
18185 - "41: movl 20(%4), %%edx\n"
18186 + "4: "__copyuser_seg" movl 16(%4), %%eax\n"
18187 + "41: "__copyuser_seg" movl 20(%4), %%edx\n"
18188 " movnti %%eax, 16(%3)\n"
18189 " movnti %%edx, 20(%3)\n"
18190 - "10: movl 24(%4), %%eax\n"
18191 - "51: movl 28(%4), %%edx\n"
18192 + "10: "__copyuser_seg" movl 24(%4), %%eax\n"
18193 + "51: "__copyuser_seg" movl 28(%4), %%edx\n"
18194 " movnti %%eax, 24(%3)\n"
18195 " movnti %%edx, 28(%3)\n"
18196 - "11: movl 32(%4), %%eax\n"
18197 - "61: movl 36(%4), %%edx\n"
18198 + "11: "__copyuser_seg" movl 32(%4), %%eax\n"
18199 + "61: "__copyuser_seg" movl 36(%4), %%edx\n"
18200 " movnti %%eax, 32(%3)\n"
18201 " movnti %%edx, 36(%3)\n"
18202 - "12: movl 40(%4), %%eax\n"
18203 - "71: movl 44(%4), %%edx\n"
18204 + "12: "__copyuser_seg" movl 40(%4), %%eax\n"
18205 + "71: "__copyuser_seg" movl 44(%4), %%edx\n"
18206 " movnti %%eax, 40(%3)\n"
18207 " movnti %%edx, 44(%3)\n"
18208 - "13: movl 48(%4), %%eax\n"
18209 - "81: movl 52(%4), %%edx\n"
18210 + "13: "__copyuser_seg" movl 48(%4), %%eax\n"
18211 + "81: "__copyuser_seg" movl 52(%4), %%edx\n"
18212 " movnti %%eax, 48(%3)\n"
18213 " movnti %%edx, 52(%3)\n"
18214 - "14: movl 56(%4), %%eax\n"
18215 - "91: movl 60(%4), %%edx\n"
18216 + "14: "__copyuser_seg" movl 56(%4), %%eax\n"
18217 + "91: "__copyuser_seg" movl 60(%4), %%edx\n"
18218 " movnti %%eax, 56(%3)\n"
18219 " movnti %%edx, 60(%3)\n"
18220 " addl $-64, %0\n"
18221 @@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
18222 " shrl $2, %0\n"
18223 " andl $3, %%eax\n"
18224 " cld\n"
18225 - "6: rep; movsl\n"
18226 + "6: rep; "__copyuser_seg" movsl\n"
18227 " movl %%eax,%0\n"
18228 - "7: rep; movsb\n"
18229 + "7: rep; "__copyuser_seg" movsb\n"
18230 "8:\n"
18231 ".section .fixup,\"ax\"\n"
18232 "9: lea 0(%%eax,%0,4),%0\n"
18233 @@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
18234 */
18235 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
18236 unsigned long size);
18237 -unsigned long __copy_user_intel(void __user *to, const void *from,
18238 +unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
18239 + unsigned long size);
18240 +unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
18241 unsigned long size);
18242 unsigned long __copy_user_zeroing_intel_nocache(void *to,
18243 const void __user *from, unsigned long size);
18244 #endif /* CONFIG_X86_INTEL_USERCOPY */
18245
18246 /* Generic arbitrary sized copy. */
18247 -#define __copy_user(to, from, size) \
18248 +#define __copy_user(to, from, size, prefix, set, restore) \
18249 do { \
18250 int __d0, __d1, __d2; \
18251 __asm__ __volatile__( \
18252 + set \
18253 " cmp $7,%0\n" \
18254 " jbe 1f\n" \
18255 " movl %1,%0\n" \
18256 " negl %0\n" \
18257 " andl $7,%0\n" \
18258 " subl %0,%3\n" \
18259 - "4: rep; movsb\n" \
18260 + "4: rep; "prefix"movsb\n" \
18261 " movl %3,%0\n" \
18262 " shrl $2,%0\n" \
18263 " andl $3,%3\n" \
18264 " .align 2,0x90\n" \
18265 - "0: rep; movsl\n" \
18266 + "0: rep; "prefix"movsl\n" \
18267 " movl %3,%0\n" \
18268 - "1: rep; movsb\n" \
18269 + "1: rep; "prefix"movsb\n" \
18270 "2:\n" \
18271 + restore \
18272 ".section .fixup,\"ax\"\n" \
18273 "5: addl %3,%0\n" \
18274 " jmp 2b\n" \
18275 @@ -682,14 +799,14 @@ do { \
18276 " negl %0\n" \
18277 " andl $7,%0\n" \
18278 " subl %0,%3\n" \
18279 - "4: rep; movsb\n" \
18280 + "4: rep; "__copyuser_seg"movsb\n" \
18281 " movl %3,%0\n" \
18282 " shrl $2,%0\n" \
18283 " andl $3,%3\n" \
18284 " .align 2,0x90\n" \
18285 - "0: rep; movsl\n" \
18286 + "0: rep; "__copyuser_seg"movsl\n" \
18287 " movl %3,%0\n" \
18288 - "1: rep; movsb\n" \
18289 + "1: rep; "__copyuser_seg"movsb\n" \
18290 "2:\n" \
18291 ".section .fixup,\"ax\"\n" \
18292 "5: addl %3,%0\n" \
18293 @@ -775,9 +892,9 @@ survive:
18294 }
18295 #endif
18296 if (movsl_is_ok(to, from, n))
18297 - __copy_user(to, from, n);
18298 + __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
18299 else
18300 - n = __copy_user_intel(to, from, n);
18301 + n = __generic_copy_to_user_intel(to, from, n);
18302 return n;
18303 }
18304 EXPORT_SYMBOL(__copy_to_user_ll);
18305 @@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
18306 unsigned long n)
18307 {
18308 if (movsl_is_ok(to, from, n))
18309 - __copy_user(to, from, n);
18310 + __copy_user(to, from, n, __copyuser_seg, "", "");
18311 else
18312 - n = __copy_user_intel((void __user *)to,
18313 - (const void *)from, n);
18314 + n = __generic_copy_from_user_intel(to, from, n);
18315 return n;
18316 }
18317 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
18318 @@ -827,65 +943,50 @@ unsigned long __copy_from_user_ll_nocach
18319 if (n > 64 && cpu_has_xmm2)
18320 n = __copy_user_intel_nocache(to, from, n);
18321 else
18322 - __copy_user(to, from, n);
18323 + __copy_user(to, from, n, __copyuser_seg, "", "");
18324 #else
18325 - __copy_user(to, from, n);
18326 + __copy_user(to, from, n, __copyuser_seg, "", "");
18327 #endif
18328 return n;
18329 }
18330 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
18331
18332 -/**
18333 - * copy_to_user: - Copy a block of data into user space.
18334 - * @to: Destination address, in user space.
18335 - * @from: Source address, in kernel space.
18336 - * @n: Number of bytes to copy.
18337 - *
18338 - * Context: User context only. This function may sleep.
18339 - *
18340 - * Copy data from kernel space to user space.
18341 - *
18342 - * Returns number of bytes that could not be copied.
18343 - * On success, this will be zero.
18344 - */
18345 -unsigned long
18346 -copy_to_user(void __user *to, const void *from, unsigned long n)
18347 +void copy_from_user_overflow(void)
18348 {
18349 - if (access_ok(VERIFY_WRITE, to, n))
18350 - n = __copy_to_user(to, from, n);
18351 - return n;
18352 + WARN(1, "Buffer overflow detected!\n");
18353 }
18354 -EXPORT_SYMBOL(copy_to_user);
18355 +EXPORT_SYMBOL(copy_from_user_overflow);
18356
18357 -/**
18358 - * copy_from_user: - Copy a block of data from user space.
18359 - * @to: Destination address, in kernel space.
18360 - * @from: Source address, in user space.
18361 - * @n: Number of bytes to copy.
18362 - *
18363 - * Context: User context only. This function may sleep.
18364 - *
18365 - * Copy data from user space to kernel space.
18366 - *
18367 - * Returns number of bytes that could not be copied.
18368 - * On success, this will be zero.
18369 - *
18370 - * If some data could not be copied, this function will pad the copied
18371 - * data to the requested size using zero bytes.
18372 - */
18373 -unsigned long
18374 -_copy_from_user(void *to, const void __user *from, unsigned long n)
18375 +void copy_to_user_overflow(void)
18376 {
18377 - if (access_ok(VERIFY_READ, from, n))
18378 - n = __copy_from_user(to, from, n);
18379 - else
18380 - memset(to, 0, n);
18381 - return n;
18382 + WARN(1, "Buffer overflow detected!\n");
18383 }
18384 -EXPORT_SYMBOL(_copy_from_user);
18385 +EXPORT_SYMBOL(copy_to_user_overflow);
18386
18387 -void copy_from_user_overflow(void)
18388 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18389 +void __set_fs(mm_segment_t x)
18390 {
18391 - WARN(1, "Buffer overflow detected!\n");
18392 + switch (x.seg) {
18393 + case 0:
18394 + loadsegment(gs, 0);
18395 + break;
18396 + case TASK_SIZE_MAX:
18397 + loadsegment(gs, __USER_DS);
18398 + break;
18399 + case -1UL:
18400 + loadsegment(gs, __KERNEL_DS);
18401 + break;
18402 + default:
18403 + BUG();
18404 + }
18405 + return;
18406 }
18407 -EXPORT_SYMBOL(copy_from_user_overflow);
18408 +EXPORT_SYMBOL(__set_fs);
18409 +
18410 +void set_fs(mm_segment_t x)
18411 +{
18412 + current_thread_info()->addr_limit = x;
18413 + __set_fs(x);
18414 +}
18415 +EXPORT_SYMBOL(set_fs);
18416 +#endif
18417 diff -urNp linux-2.6.39.4/arch/x86/lib/usercopy_64.c linux-2.6.39.4/arch/x86/lib/usercopy_64.c
18418 --- linux-2.6.39.4/arch/x86/lib/usercopy_64.c 2011-05-19 00:06:34.000000000 -0400
18419 +++ linux-2.6.39.4/arch/x86/lib/usercopy_64.c 2011-08-05 19:44:35.000000000 -0400
18420 @@ -42,6 +42,12 @@ long
18421 __strncpy_from_user(char *dst, const char __user *src, long count)
18422 {
18423 long res;
18424 +
18425 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18426 + if ((unsigned long)src < PAX_USER_SHADOW_BASE)
18427 + src += PAX_USER_SHADOW_BASE;
18428 +#endif
18429 +
18430 __do_strncpy_from_user(dst, src, count, res);
18431 return res;
18432 }
18433 @@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
18434 {
18435 long __d0;
18436 might_fault();
18437 +
18438 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18439 + if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
18440 + addr += PAX_USER_SHADOW_BASE;
18441 +#endif
18442 +
18443 /* no memory constraint because it doesn't change any memory gcc knows
18444 about */
18445 asm volatile(
18446 @@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
18447
18448 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
18449 {
18450 - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18451 + if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
18452 +
18453 +#ifdef CONFIG_PAX_MEMORY_UDEREF
18454 + if ((unsigned long)to < PAX_USER_SHADOW_BASE)
18455 + to += PAX_USER_SHADOW_BASE;
18456 + if ((unsigned long)from < PAX_USER_SHADOW_BASE)
18457 + from += PAX_USER_SHADOW_BASE;
18458 +#endif
18459 +
18460 return copy_user_generic((__force void *)to, (__force void *)from, len);
18461 - }
18462 - return len;
18463 + }
18464 + return len;
18465 }
18466 EXPORT_SYMBOL(copy_in_user);
18467
18468 diff -urNp linux-2.6.39.4/arch/x86/Makefile linux-2.6.39.4/arch/x86/Makefile
18469 --- linux-2.6.39.4/arch/x86/Makefile 2011-05-19 00:06:34.000000000 -0400
18470 +++ linux-2.6.39.4/arch/x86/Makefile 2011-08-05 19:44:35.000000000 -0400
18471 @@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
18472 else
18473 BITS := 64
18474 UTS_MACHINE := x86_64
18475 + biarch := $(call cc-option,-m64)
18476 CHECKFLAGS += -D__x86_64__ -m64
18477
18478 KBUILD_AFLAGS += -m64
18479 @@ -195,3 +196,12 @@ define archhelp
18480 echo ' FDARGS="..." arguments for the booted kernel'
18481 echo ' FDINITRD=file initrd for the booted kernel'
18482 endef
18483 +
18484 +define OLD_LD
18485 +
18486 +*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
18487 +*** Please upgrade your binutils to 2.18 or newer
18488 +endef
18489 +
18490 +archprepare:
18491 + $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
18492 diff -urNp linux-2.6.39.4/arch/x86/mm/extable.c linux-2.6.39.4/arch/x86/mm/extable.c
18493 --- linux-2.6.39.4/arch/x86/mm/extable.c 2011-05-19 00:06:34.000000000 -0400
18494 +++ linux-2.6.39.4/arch/x86/mm/extable.c 2011-08-05 19:44:35.000000000 -0400
18495 @@ -1,14 +1,71 @@
18496 #include <linux/module.h>
18497 #include <linux/spinlock.h>
18498 +#include <linux/sort.h>
18499 #include <asm/uaccess.h>
18500 +#include <asm/pgtable.h>
18501
18502 +/*
18503 + * The exception table needs to be sorted so that the binary
18504 + * search that we use to find entries in it works properly.
18505 + * This is used both for the kernel exception table and for
18506 + * the exception tables of modules that get loaded.
18507 + */
18508 +static int cmp_ex(const void *a, const void *b)
18509 +{
18510 + const struct exception_table_entry *x = a, *y = b;
18511 +
18512 + /* avoid overflow */
18513 + if (x->insn > y->insn)
18514 + return 1;
18515 + if (x->insn < y->insn)
18516 + return -1;
18517 + return 0;
18518 +}
18519 +
18520 +static void swap_ex(void *a, void *b, int size)
18521 +{
18522 + struct exception_table_entry t, *x = a, *y = b;
18523 +
18524 + t = *x;
18525 +
18526 + pax_open_kernel();
18527 + *x = *y;
18528 + *y = t;
18529 + pax_close_kernel();
18530 +}
18531 +
18532 +void sort_extable(struct exception_table_entry *start,
18533 + struct exception_table_entry *finish)
18534 +{
18535 + sort(start, finish - start, sizeof(struct exception_table_entry),
18536 + cmp_ex, swap_ex);
18537 +}
18538 +
18539 +#ifdef CONFIG_MODULES
18540 +/*
18541 + * If the exception table is sorted, any referring to the module init
18542 + * will be at the beginning or the end.
18543 + */
18544 +void trim_init_extable(struct module *m)
18545 +{
18546 + /*trim the beginning*/
18547 + while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
18548 + m->extable++;
18549 + m->num_exentries--;
18550 + }
18551 + /*trim the end*/
18552 + while (m->num_exentries &&
18553 + within_module_init(m->extable[m->num_exentries-1].insn, m))
18554 + m->num_exentries--;
18555 +}
18556 +#endif /* CONFIG_MODULES */
18557
18558 int fixup_exception(struct pt_regs *regs)
18559 {
18560 const struct exception_table_entry *fixup;
18561
18562 #ifdef CONFIG_PNPBIOS
18563 - if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
18564 + if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
18565 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
18566 extern u32 pnp_bios_is_utter_crap;
18567 pnp_bios_is_utter_crap = 1;
18568 diff -urNp linux-2.6.39.4/arch/x86/mm/fault.c linux-2.6.39.4/arch/x86/mm/fault.c
18569 --- linux-2.6.39.4/arch/x86/mm/fault.c 2011-05-19 00:06:34.000000000 -0400
18570 +++ linux-2.6.39.4/arch/x86/mm/fault.c 2011-08-17 20:06:06.000000000 -0400
18571 @@ -12,10 +12,18 @@
18572 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
18573 #include <linux/perf_event.h> /* perf_sw_event */
18574 #include <linux/hugetlb.h> /* hstate_index_to_shift */
18575 +#include <linux/unistd.h>
18576 +#include <linux/compiler.h>
18577
18578 #include <asm/traps.h> /* dotraplinkage, ... */
18579 #include <asm/pgalloc.h> /* pgd_*(), ... */
18580 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
18581 +#include <asm/vsyscall.h>
18582 +#include <asm/tlbflush.h>
18583 +
18584 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18585 +#include <asm/stacktrace.h>
18586 +#endif
18587
18588 /*
18589 * Page fault error code bits:
18590 @@ -53,7 +61,7 @@ static inline int __kprobes notify_page_
18591 int ret = 0;
18592
18593 /* kprobe_running() needs smp_processor_id() */
18594 - if (kprobes_built_in() && !user_mode_vm(regs)) {
18595 + if (kprobes_built_in() && !user_mode(regs)) {
18596 preempt_disable();
18597 if (kprobe_running() && kprobe_fault_handler(regs, 14))
18598 ret = 1;
18599 @@ -114,7 +122,10 @@ check_prefetch_opcode(struct pt_regs *re
18600 return !instr_lo || (instr_lo>>1) == 1;
18601 case 0x00:
18602 /* Prefetch instruction is 0x0F0D or 0x0F18 */
18603 - if (probe_kernel_address(instr, opcode))
18604 + if (user_mode(regs)) {
18605 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18606 + return 0;
18607 + } else if (probe_kernel_address(instr, opcode))
18608 return 0;
18609
18610 *prefetch = (instr_lo == 0xF) &&
18611 @@ -148,7 +159,10 @@ is_prefetch(struct pt_regs *regs, unsign
18612 while (instr < max_instr) {
18613 unsigned char opcode;
18614
18615 - if (probe_kernel_address(instr, opcode))
18616 + if (user_mode(regs)) {
18617 + if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
18618 + break;
18619 + } else if (probe_kernel_address(instr, opcode))
18620 break;
18621
18622 instr++;
18623 @@ -179,6 +193,30 @@ force_sig_info_fault(int si_signo, int s
18624 force_sig_info(si_signo, &info, tsk);
18625 }
18626
18627 +#ifdef CONFIG_PAX_EMUTRAMP
18628 +static int pax_handle_fetch_fault(struct pt_regs *regs);
18629 +#endif
18630 +
18631 +#ifdef CONFIG_PAX_PAGEEXEC
18632 +static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
18633 +{
18634 + pgd_t *pgd;
18635 + pud_t *pud;
18636 + pmd_t *pmd;
18637 +
18638 + pgd = pgd_offset(mm, address);
18639 + if (!pgd_present(*pgd))
18640 + return NULL;
18641 + pud = pud_offset(pgd, address);
18642 + if (!pud_present(*pud))
18643 + return NULL;
18644 + pmd = pmd_offset(pud, address);
18645 + if (!pmd_present(*pmd))
18646 + return NULL;
18647 + return pmd;
18648 +}
18649 +#endif
18650 +
18651 DEFINE_SPINLOCK(pgd_lock);
18652 LIST_HEAD(pgd_list);
18653
18654 @@ -229,10 +267,22 @@ void vmalloc_sync_all(void)
18655 for (address = VMALLOC_START & PMD_MASK;
18656 address >= TASK_SIZE && address < FIXADDR_TOP;
18657 address += PMD_SIZE) {
18658 +
18659 +#ifdef CONFIG_PAX_PER_CPU_PGD
18660 + unsigned long cpu;
18661 +#else
18662 struct page *page;
18663 +#endif
18664
18665 spin_lock(&pgd_lock);
18666 +
18667 +#ifdef CONFIG_PAX_PER_CPU_PGD
18668 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
18669 + pgd_t *pgd = get_cpu_pgd(cpu);
18670 + pmd_t *ret;
18671 +#else
18672 list_for_each_entry(page, &pgd_list, lru) {
18673 + pgd_t *pgd = page_address(page);
18674 spinlock_t *pgt_lock;
18675 pmd_t *ret;
18676
18677 @@ -240,8 +290,13 @@ void vmalloc_sync_all(void)
18678 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
18679
18680 spin_lock(pgt_lock);
18681 - ret = vmalloc_sync_one(page_address(page), address);
18682 +#endif
18683 +
18684 + ret = vmalloc_sync_one(pgd, address);
18685 +
18686 +#ifndef CONFIG_PAX_PER_CPU_PGD
18687 spin_unlock(pgt_lock);
18688 +#endif
18689
18690 if (!ret)
18691 break;
18692 @@ -275,6 +330,11 @@ static noinline __kprobes int vmalloc_fa
18693 * an interrupt in the middle of a task switch..
18694 */
18695 pgd_paddr = read_cr3();
18696 +
18697 +#ifdef CONFIG_PAX_PER_CPU_PGD
18698 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
18699 +#endif
18700 +
18701 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
18702 if (!pmd_k)
18703 return -1;
18704 @@ -370,7 +430,14 @@ static noinline __kprobes int vmalloc_fa
18705 * happen within a race in page table update. In the later
18706 * case just flush:
18707 */
18708 +
18709 +#ifdef CONFIG_PAX_PER_CPU_PGD
18710 + BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
18711 + pgd = pgd_offset_cpu(smp_processor_id(), address);
18712 +#else
18713 pgd = pgd_offset(current->active_mm, address);
18714 +#endif
18715 +
18716 pgd_ref = pgd_offset_k(address);
18717 if (pgd_none(*pgd_ref))
18718 return -1;
18719 @@ -532,7 +599,7 @@ static int is_errata93(struct pt_regs *r
18720 static int is_errata100(struct pt_regs *regs, unsigned long address)
18721 {
18722 #ifdef CONFIG_X86_64
18723 - if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
18724 + if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
18725 return 1;
18726 #endif
18727 return 0;
18728 @@ -559,7 +626,7 @@ static int is_f00f_bug(struct pt_regs *r
18729 }
18730
18731 static const char nx_warning[] = KERN_CRIT
18732 -"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
18733 +"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
18734
18735 static void
18736 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
18737 @@ -568,15 +635,26 @@ show_fault_oops(struct pt_regs *regs, un
18738 if (!oops_may_print())
18739 return;
18740
18741 - if (error_code & PF_INSTR) {
18742 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
18743 unsigned int level;
18744
18745 pte_t *pte = lookup_address(address, &level);
18746
18747 if (pte && pte_present(*pte) && !pte_exec(*pte))
18748 - printk(nx_warning, current_uid());
18749 + printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
18750 }
18751
18752 +#ifdef CONFIG_PAX_KERNEXEC
18753 + if (init_mm.start_code <= address && address < init_mm.end_code) {
18754 + if (current->signal->curr_ip)
18755 + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18756 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
18757 + else
18758 + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
18759 + current->comm, task_pid_nr(current), current_uid(), current_euid());
18760 + }
18761 +#endif
18762 +
18763 printk(KERN_ALERT "BUG: unable to handle kernel ");
18764 if (address < PAGE_SIZE)
18765 printk(KERN_CONT "NULL pointer dereference");
18766 @@ -701,6 +779,70 @@ __bad_area_nosemaphore(struct pt_regs *r
18767 unsigned long address, int si_code)
18768 {
18769 struct task_struct *tsk = current;
18770 +#if defined(CONFIG_X86_64) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18771 + struct mm_struct *mm = tsk->mm;
18772 +#endif
18773 +
18774 +#ifdef CONFIG_X86_64
18775 + if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
18776 + if (regs->ip == (unsigned long)vgettimeofday) {
18777 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
18778 + return;
18779 + } else if (regs->ip == (unsigned long)vtime) {
18780 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
18781 + return;
18782 + } else if (regs->ip == (unsigned long)vgetcpu) {
18783 + regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
18784 + return;
18785 + }
18786 + }
18787 +#endif
18788 +
18789 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18790 + if (mm && (error_code & PF_USER)) {
18791 + unsigned long ip = regs->ip;
18792 +
18793 + if (v8086_mode(regs))
18794 + ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
18795 +
18796 + /*
18797 + * It's possible to have interrupts off here:
18798 + */
18799 + local_irq_enable();
18800 +
18801 +#ifdef CONFIG_PAX_PAGEEXEC
18802 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
18803 + (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
18804 +
18805 +#ifdef CONFIG_PAX_EMUTRAMP
18806 + switch (pax_handle_fetch_fault(regs)) {
18807 + case 2:
18808 + return;
18809 + }
18810 +#endif
18811 +
18812 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18813 + do_group_exit(SIGKILL);
18814 + }
18815 +#endif
18816 +
18817 +#ifdef CONFIG_PAX_SEGMEXEC
18818 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
18819 +
18820 +#ifdef CONFIG_PAX_EMUTRAMP
18821 + switch (pax_handle_fetch_fault(regs)) {
18822 + case 2:
18823 + return;
18824 + }
18825 +#endif
18826 +
18827 + pax_report_fault(regs, (void *)ip, (void *)regs->sp);
18828 + do_group_exit(SIGKILL);
18829 + }
18830 +#endif
18831 +
18832 + }
18833 +#endif
18834
18835 /* User mode accesses just cause a SIGSEGV */
18836 if (error_code & PF_USER) {
18837 @@ -855,6 +997,99 @@ static int spurious_fault_check(unsigned
18838 return 1;
18839 }
18840
18841 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18842 +static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
18843 +{
18844 + pte_t *pte;
18845 + pmd_t *pmd;
18846 + spinlock_t *ptl;
18847 + unsigned char pte_mask;
18848 +
18849 + if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
18850 + !(mm->pax_flags & MF_PAX_PAGEEXEC))
18851 + return 0;
18852 +
18853 + /* PaX: it's our fault, let's handle it if we can */
18854 +
18855 + /* PaX: take a look at read faults before acquiring any locks */
18856 + if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
18857 + /* instruction fetch attempt from a protected page in user mode */
18858 + up_read(&mm->mmap_sem);
18859 +
18860 +#ifdef CONFIG_PAX_EMUTRAMP
18861 + switch (pax_handle_fetch_fault(regs)) {
18862 + case 2:
18863 + return 1;
18864 + }
18865 +#endif
18866 +
18867 + pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
18868 + do_group_exit(SIGKILL);
18869 + }
18870 +
18871 + pmd = pax_get_pmd(mm, address);
18872 + if (unlikely(!pmd))
18873 + return 0;
18874 +
18875 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
18876 + if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
18877 + pte_unmap_unlock(pte, ptl);
18878 + return 0;
18879 + }
18880 +
18881 + if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
18882 + /* write attempt to a protected page in user mode */
18883 + pte_unmap_unlock(pte, ptl);
18884 + return 0;
18885 + }
18886 +
18887 +#ifdef CONFIG_SMP
18888 + if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
18889 +#else
18890 + if (likely(address > get_limit(regs->cs)))
18891 +#endif
18892 + {
18893 + set_pte(pte, pte_mkread(*pte));
18894 + __flush_tlb_one(address);
18895 + pte_unmap_unlock(pte, ptl);
18896 + up_read(&mm->mmap_sem);
18897 + return 1;
18898 + }
18899 +
18900 + pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
18901 +
18902 + /*
18903 + * PaX: fill DTLB with user rights and retry
18904 + */
18905 + __asm__ __volatile__ (
18906 + "orb %2,(%1)\n"
18907 +#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
18908 +/*
18909 + * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
18910 + * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
18911 + * page fault when examined during a TLB load attempt. this is true not only
18912 + * for PTEs holding a non-present entry but also present entries that will
18913 + * raise a page fault (such as those set up by PaX, or the copy-on-write
18914 + * mechanism). in effect it means that we do *not* need to flush the TLBs
18915 + * for our target pages since their PTEs are simply not in the TLBs at all.
18916 +
18917 + * the best thing in omitting it is that we gain around 15-20% speed in the
18918 + * fast path of the page fault handler and can get rid of tracing since we
18919 + * can no longer flush unintended entries.
18920 + */
18921 + "invlpg (%0)\n"
18922 +#endif
18923 + __copyuser_seg"testb $0,(%0)\n"
18924 + "xorb %3,(%1)\n"
18925 + :
18926 + : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
18927 + : "memory", "cc");
18928 + pte_unmap_unlock(pte, ptl);
18929 + up_read(&mm->mmap_sem);
18930 + return 1;
18931 +}
18932 +#endif
18933 +
18934 /*
18935 * Handle a spurious fault caused by a stale TLB entry.
18936 *
18937 @@ -927,6 +1162,9 @@ int show_unhandled_signals = 1;
18938 static inline int
18939 access_error(unsigned long error_code, struct vm_area_struct *vma)
18940 {
18941 + if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
18942 + return 1;
18943 +
18944 if (error_code & PF_WRITE) {
18945 /* write, present and write, not present: */
18946 if (unlikely(!(vma->vm_flags & VM_WRITE)))
18947 @@ -960,19 +1198,33 @@ do_page_fault(struct pt_regs *regs, unsi
18948 {
18949 struct vm_area_struct *vma;
18950 struct task_struct *tsk;
18951 - unsigned long address;
18952 struct mm_struct *mm;
18953 int fault;
18954 int write = error_code & PF_WRITE;
18955 unsigned int flags = FAULT_FLAG_ALLOW_RETRY |
18956 (write ? FAULT_FLAG_WRITE : 0);
18957
18958 + /* Get the faulting address: */
18959 + unsigned long address = read_cr2();
18960 +
18961 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
18962 + if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
18963 + if (!search_exception_tables(regs->ip)) {
18964 + bad_area_nosemaphore(regs, error_code, address);
18965 + return;
18966 + }
18967 + if (address < PAX_USER_SHADOW_BASE) {
18968 + printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
18969 + printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
18970 + show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
18971 + } else
18972 + address -= PAX_USER_SHADOW_BASE;
18973 + }
18974 +#endif
18975 +
18976 tsk = current;
18977 mm = tsk->mm;
18978
18979 - /* Get the faulting address: */
18980 - address = read_cr2();
18981 -
18982 /*
18983 * Detect and handle instructions that would cause a page fault for
18984 * both a tracked kernel page and a userspace page.
18985 @@ -1032,7 +1284,7 @@ do_page_fault(struct pt_regs *regs, unsi
18986 * User-mode registers count as a user access even for any
18987 * potential system fault or CPU buglet:
18988 */
18989 - if (user_mode_vm(regs)) {
18990 + if (user_mode(regs)) {
18991 local_irq_enable();
18992 error_code |= PF_USER;
18993 } else {
18994 @@ -1087,6 +1339,11 @@ retry:
18995 might_sleep();
18996 }
18997
18998 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
18999 + if (pax_handle_pageexec_fault(regs, mm, address, error_code))
19000 + return;
19001 +#endif
19002 +
19003 vma = find_vma(mm, address);
19004 if (unlikely(!vma)) {
19005 bad_area(regs, error_code, address);
19006 @@ -1098,18 +1355,24 @@ retry:
19007 bad_area(regs, error_code, address);
19008 return;
19009 }
19010 - if (error_code & PF_USER) {
19011 - /*
19012 - * Accessing the stack below %sp is always a bug.
19013 - * The large cushion allows instructions like enter
19014 - * and pusha to work. ("enter $65535, $31" pushes
19015 - * 32 pointers and then decrements %sp by 65535.)
19016 - */
19017 - if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
19018 - bad_area(regs, error_code, address);
19019 - return;
19020 - }
19021 + /*
19022 + * Accessing the stack below %sp is always a bug.
19023 + * The large cushion allows instructions like enter
19024 + * and pusha to work. ("enter $65535, $31" pushes
19025 + * 32 pointers and then decrements %sp by 65535.)
19026 + */
19027 + if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
19028 + bad_area(regs, error_code, address);
19029 + return;
19030 + }
19031 +
19032 +#ifdef CONFIG_PAX_SEGMEXEC
19033 + if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
19034 + bad_area(regs, error_code, address);
19035 + return;
19036 }
19037 +#endif
19038 +
19039 if (unlikely(expand_stack(vma, address))) {
19040 bad_area(regs, error_code, address);
19041 return;
19042 @@ -1164,3 +1427,199 @@ good_area:
19043
19044 up_read(&mm->mmap_sem);
19045 }
19046 +
19047 +#ifdef CONFIG_PAX_EMUTRAMP
19048 +static int pax_handle_fetch_fault_32(struct pt_regs *regs)
19049 +{
19050 + int err;
19051 +
19052 + do { /* PaX: gcc trampoline emulation #1 */
19053 + unsigned char mov1, mov2;
19054 + unsigned short jmp;
19055 + unsigned int addr1, addr2;
19056 +
19057 +#ifdef CONFIG_X86_64
19058 + if ((regs->ip + 11) >> 32)
19059 + break;
19060 +#endif
19061 +
19062 + err = get_user(mov1, (unsigned char __user *)regs->ip);
19063 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19064 + err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
19065 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19066 + err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
19067 +
19068 + if (err)
19069 + break;
19070 +
19071 + if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
19072 + regs->cx = addr1;
19073 + regs->ax = addr2;
19074 + regs->ip = addr2;
19075 + return 2;
19076 + }
19077 + } while (0);
19078 +
19079 + do { /* PaX: gcc trampoline emulation #2 */
19080 + unsigned char mov, jmp;
19081 + unsigned int addr1, addr2;
19082 +
19083 +#ifdef CONFIG_X86_64
19084 + if ((regs->ip + 9) >> 32)
19085 + break;
19086 +#endif
19087 +
19088 + err = get_user(mov, (unsigned char __user *)regs->ip);
19089 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
19090 + err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
19091 + err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
19092 +
19093 + if (err)
19094 + break;
19095 +
19096 + if (mov == 0xB9 && jmp == 0xE9) {
19097 + regs->cx = addr1;
19098 + regs->ip = (unsigned int)(regs->ip + addr2 + 10);
19099 + return 2;
19100 + }
19101 + } while (0);
19102 +
19103 + return 1; /* PaX in action */
19104 +}
19105 +
19106 +#ifdef CONFIG_X86_64
19107 +static int pax_handle_fetch_fault_64(struct pt_regs *regs)
19108 +{
19109 + int err;
19110 +
19111 + do { /* PaX: gcc trampoline emulation #1 */
19112 + unsigned short mov1, mov2, jmp1;
19113 + unsigned char jmp2;
19114 + unsigned int addr1;
19115 + unsigned long addr2;
19116 +
19117 + err = get_user(mov1, (unsigned short __user *)regs->ip);
19118 + err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
19119 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
19120 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
19121 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
19122 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
19123 +
19124 + if (err)
19125 + break;
19126 +
19127 + if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19128 + regs->r11 = addr1;
19129 + regs->r10 = addr2;
19130 + regs->ip = addr1;
19131 + return 2;
19132 + }
19133 + } while (0);
19134 +
19135 + do { /* PaX: gcc trampoline emulation #2 */
19136 + unsigned short mov1, mov2, jmp1;
19137 + unsigned char jmp2;
19138 + unsigned long addr1, addr2;
19139 +
19140 + err = get_user(mov1, (unsigned short __user *)regs->ip);
19141 + err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
19142 + err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
19143 + err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
19144 + err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
19145 + err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
19146 +
19147 + if (err)
19148 + break;
19149 +
19150 + if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
19151 + regs->r11 = addr1;
19152 + regs->r10 = addr2;
19153 + regs->ip = addr1;
19154 + return 2;
19155 + }
19156 + } while (0);
19157 +
19158 + return 1; /* PaX in action */
19159 +}
19160 +#endif
19161 +
19162 +/*
19163 + * PaX: decide what to do with offenders (regs->ip = fault address)
19164 + *
19165 + * returns 1 when task should be killed
19166 + * 2 when gcc trampoline was detected
19167 + */
19168 +static int pax_handle_fetch_fault(struct pt_regs *regs)
19169 +{
19170 + if (v8086_mode(regs))
19171 + return 1;
19172 +
19173 + if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
19174 + return 1;
19175 +
19176 +#ifdef CONFIG_X86_32
19177 + return pax_handle_fetch_fault_32(regs);
19178 +#else
19179 + if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
19180 + return pax_handle_fetch_fault_32(regs);
19181 + else
19182 + return pax_handle_fetch_fault_64(regs);
19183 +#endif
19184 +}
19185 +#endif
19186 +
19187 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19188 +void pax_report_insns(void *pc, void *sp)
19189 +{
19190 + long i;
19191 +
19192 + printk(KERN_ERR "PAX: bytes at PC: ");
19193 + for (i = 0; i < 20; i++) {
19194 + unsigned char c;
19195 + if (get_user(c, (__force unsigned char __user *)pc+i))
19196 + printk(KERN_CONT "?? ");
19197 + else
19198 + printk(KERN_CONT "%02x ", c);
19199 + }
19200 + printk("\n");
19201 +
19202 + printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
19203 + for (i = -1; i < 80 / (long)sizeof(long); i++) {
19204 + unsigned long c;
19205 + if (get_user(c, (__force unsigned long __user *)sp+i))
19206 +#ifdef CONFIG_X86_32
19207 + printk(KERN_CONT "???????? ");
19208 +#else
19209 + printk(KERN_CONT "???????????????? ");
19210 +#endif
19211 + else
19212 + printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
19213 + }
19214 + printk("\n");
19215 +}
19216 +#endif
19217 +
19218 +/**
19219 + * probe_kernel_write(): safely attempt to write to a location
19220 + * @dst: address to write to
19221 + * @src: pointer to the data that shall be written
19222 + * @size: size of the data chunk
19223 + *
19224 + * Safely write to address @dst from the buffer at @src. If a kernel fault
19225 + * happens, handle that and return -EFAULT.
19226 + */
19227 +long notrace probe_kernel_write(void *dst, const void *src, size_t size)
19228 +{
19229 + long ret;
19230 + mm_segment_t old_fs = get_fs();
19231 +
19232 + set_fs(KERNEL_DS);
19233 + pagefault_disable();
19234 + pax_open_kernel();
19235 + ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
19236 + pax_close_kernel();
19237 + pagefault_enable();
19238 + set_fs(old_fs);
19239 +
19240 + return ret ? -EFAULT : 0;
19241 +}
19242 diff -urNp linux-2.6.39.4/arch/x86/mm/gup.c linux-2.6.39.4/arch/x86/mm/gup.c
19243 --- linux-2.6.39.4/arch/x86/mm/gup.c 2011-05-19 00:06:34.000000000 -0400
19244 +++ linux-2.6.39.4/arch/x86/mm/gup.c 2011-08-05 19:44:35.000000000 -0400
19245 @@ -263,7 +263,7 @@ int __get_user_pages_fast(unsigned long
19246 addr = start;
19247 len = (unsigned long) nr_pages << PAGE_SHIFT;
19248 end = start + len;
19249 - if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19250 + if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
19251 (void __user *)start, len)))
19252 return 0;
19253
19254 diff -urNp linux-2.6.39.4/arch/x86/mm/highmem_32.c linux-2.6.39.4/arch/x86/mm/highmem_32.c
19255 --- linux-2.6.39.4/arch/x86/mm/highmem_32.c 2011-05-19 00:06:34.000000000 -0400
19256 +++ linux-2.6.39.4/arch/x86/mm/highmem_32.c 2011-08-05 19:44:35.000000000 -0400
19257 @@ -44,7 +44,10 @@ void *kmap_atomic_prot(struct page *page
19258 idx = type + KM_TYPE_NR*smp_processor_id();
19259 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
19260 BUG_ON(!pte_none(*(kmap_pte-idx)));
19261 +
19262 + pax_open_kernel();
19263 set_pte(kmap_pte-idx, mk_pte(page, prot));
19264 + pax_close_kernel();
19265
19266 return (void *)vaddr;
19267 }
19268 diff -urNp linux-2.6.39.4/arch/x86/mm/hugetlbpage.c linux-2.6.39.4/arch/x86/mm/hugetlbpage.c
19269 --- linux-2.6.39.4/arch/x86/mm/hugetlbpage.c 2011-05-19 00:06:34.000000000 -0400
19270 +++ linux-2.6.39.4/arch/x86/mm/hugetlbpage.c 2011-08-05 19:44:35.000000000 -0400
19271 @@ -266,13 +266,20 @@ static unsigned long hugetlb_get_unmappe
19272 struct hstate *h = hstate_file(file);
19273 struct mm_struct *mm = current->mm;
19274 struct vm_area_struct *vma;
19275 - unsigned long start_addr;
19276 + unsigned long start_addr, pax_task_size = TASK_SIZE;
19277 +
19278 +#ifdef CONFIG_PAX_SEGMEXEC
19279 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19280 + pax_task_size = SEGMEXEC_TASK_SIZE;
19281 +#endif
19282 +
19283 + pax_task_size -= PAGE_SIZE;
19284
19285 if (len > mm->cached_hole_size) {
19286 - start_addr = mm->free_area_cache;
19287 + start_addr = mm->free_area_cache;
19288 } else {
19289 - start_addr = TASK_UNMAPPED_BASE;
19290 - mm->cached_hole_size = 0;
19291 + start_addr = mm->mmap_base;
19292 + mm->cached_hole_size = 0;
19293 }
19294
19295 full_search:
19296 @@ -280,26 +287,27 @@ full_search:
19297
19298 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19299 /* At this point: (!vma || addr < vma->vm_end). */
19300 - if (TASK_SIZE - len < addr) {
19301 + if (pax_task_size - len < addr) {
19302 /*
19303 * Start a new search - just in case we missed
19304 * some holes.
19305 */
19306 - if (start_addr != TASK_UNMAPPED_BASE) {
19307 - start_addr = TASK_UNMAPPED_BASE;
19308 + if (start_addr != mm->mmap_base) {
19309 + start_addr = mm->mmap_base;
19310 mm->cached_hole_size = 0;
19311 goto full_search;
19312 }
19313 return -ENOMEM;
19314 }
19315 - if (!vma || addr + len <= vma->vm_start) {
19316 - mm->free_area_cache = addr + len;
19317 - return addr;
19318 - }
19319 + if (check_heap_stack_gap(vma, addr, len))
19320 + break;
19321 if (addr + mm->cached_hole_size < vma->vm_start)
19322 mm->cached_hole_size = vma->vm_start - addr;
19323 addr = ALIGN(vma->vm_end, huge_page_size(h));
19324 }
19325 +
19326 + mm->free_area_cache = addr + len;
19327 + return addr;
19328 }
19329
19330 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
19331 @@ -308,10 +316,9 @@ static unsigned long hugetlb_get_unmappe
19332 {
19333 struct hstate *h = hstate_file(file);
19334 struct mm_struct *mm = current->mm;
19335 - struct vm_area_struct *vma, *prev_vma;
19336 - unsigned long base = mm->mmap_base, addr = addr0;
19337 + struct vm_area_struct *vma;
19338 + unsigned long base = mm->mmap_base, addr;
19339 unsigned long largest_hole = mm->cached_hole_size;
19340 - int first_time = 1;
19341
19342 /* don't allow allocations above current base */
19343 if (mm->free_area_cache > base)
19344 @@ -321,64 +328,63 @@ static unsigned long hugetlb_get_unmappe
19345 largest_hole = 0;
19346 mm->free_area_cache = base;
19347 }
19348 -try_again:
19349 +
19350 /* make sure it can fit in the remaining address space */
19351 if (mm->free_area_cache < len)
19352 goto fail;
19353
19354 /* either no address requested or can't fit in requested address hole */
19355 - addr = (mm->free_area_cache - len) & huge_page_mask(h);
19356 + addr = (mm->free_area_cache - len);
19357 do {
19358 + addr &= huge_page_mask(h);
19359 + vma = find_vma(mm, addr);
19360 /*
19361 * Lookup failure means no vma is above this address,
19362 * i.e. return with success:
19363 - */
19364 - if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
19365 - return addr;
19366 -
19367 - /*
19368 * new region fits between prev_vma->vm_end and
19369 * vma->vm_start, use it:
19370 */
19371 - if (addr + len <= vma->vm_start &&
19372 - (!prev_vma || (addr >= prev_vma->vm_end))) {
19373 + if (check_heap_stack_gap(vma, addr, len)) {
19374 /* remember the address as a hint for next time */
19375 - mm->cached_hole_size = largest_hole;
19376 - return (mm->free_area_cache = addr);
19377 - } else {
19378 - /* pull free_area_cache down to the first hole */
19379 - if (mm->free_area_cache == vma->vm_end) {
19380 - mm->free_area_cache = vma->vm_start;
19381 - mm->cached_hole_size = largest_hole;
19382 - }
19383 + mm->cached_hole_size = largest_hole;
19384 + return (mm->free_area_cache = addr);
19385 + }
19386 + /* pull free_area_cache down to the first hole */
19387 + if (mm->free_area_cache == vma->vm_end) {
19388 + mm->free_area_cache = vma->vm_start;
19389 + mm->cached_hole_size = largest_hole;
19390 }
19391
19392 /* remember the largest hole we saw so far */
19393 if (addr + largest_hole < vma->vm_start)
19394 - largest_hole = vma->vm_start - addr;
19395 + largest_hole = vma->vm_start - addr;
19396
19397 /* try just below the current vma->vm_start */
19398 - addr = (vma->vm_start - len) & huge_page_mask(h);
19399 - } while (len <= vma->vm_start);
19400 + addr = skip_heap_stack_gap(vma, len);
19401 + } while (!IS_ERR_VALUE(addr));
19402
19403 fail:
19404 /*
19405 - * if hint left us with no space for the requested
19406 - * mapping then try again:
19407 - */
19408 - if (first_time) {
19409 - mm->free_area_cache = base;
19410 - largest_hole = 0;
19411 - first_time = 0;
19412 - goto try_again;
19413 - }
19414 - /*
19415 * A failed mmap() very likely causes application failure,
19416 * so fall back to the bottom-up function here. This scenario
19417 * can happen with large stack limits and large mmap()
19418 * allocations.
19419 */
19420 - mm->free_area_cache = TASK_UNMAPPED_BASE;
19421 +
19422 +#ifdef CONFIG_PAX_SEGMEXEC
19423 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19424 + mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19425 + else
19426 +#endif
19427 +
19428 + mm->mmap_base = TASK_UNMAPPED_BASE;
19429 +
19430 +#ifdef CONFIG_PAX_RANDMMAP
19431 + if (mm->pax_flags & MF_PAX_RANDMMAP)
19432 + mm->mmap_base += mm->delta_mmap;
19433 +#endif
19434 +
19435 + mm->free_area_cache = mm->mmap_base;
19436 mm->cached_hole_size = ~0UL;
19437 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
19438 len, pgoff, flags);
19439 @@ -386,6 +392,7 @@ fail:
19440 /*
19441 * Restore the topdown base:
19442 */
19443 + mm->mmap_base = base;
19444 mm->free_area_cache = base;
19445 mm->cached_hole_size = ~0UL;
19446
19447 @@ -399,10 +406,19 @@ hugetlb_get_unmapped_area(struct file *f
19448 struct hstate *h = hstate_file(file);
19449 struct mm_struct *mm = current->mm;
19450 struct vm_area_struct *vma;
19451 + unsigned long pax_task_size = TASK_SIZE;
19452
19453 if (len & ~huge_page_mask(h))
19454 return -EINVAL;
19455 - if (len > TASK_SIZE)
19456 +
19457 +#ifdef CONFIG_PAX_SEGMEXEC
19458 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
19459 + pax_task_size = SEGMEXEC_TASK_SIZE;
19460 +#endif
19461 +
19462 + pax_task_size -= PAGE_SIZE;
19463 +
19464 + if (len > pax_task_size)
19465 return -ENOMEM;
19466
19467 if (flags & MAP_FIXED) {
19468 @@ -414,8 +430,7 @@ hugetlb_get_unmapped_area(struct file *f
19469 if (addr) {
19470 addr = ALIGN(addr, huge_page_size(h));
19471 vma = find_vma(mm, addr);
19472 - if (TASK_SIZE - len >= addr &&
19473 - (!vma || addr + len <= vma->vm_start))
19474 + if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
19475 return addr;
19476 }
19477 if (mm->get_unmapped_area == arch_get_unmapped_area)
19478 diff -urNp linux-2.6.39.4/arch/x86/mm/init_32.c linux-2.6.39.4/arch/x86/mm/init_32.c
19479 --- linux-2.6.39.4/arch/x86/mm/init_32.c 2011-05-19 00:06:34.000000000 -0400
19480 +++ linux-2.6.39.4/arch/x86/mm/init_32.c 2011-08-05 19:44:35.000000000 -0400
19481 @@ -74,36 +74,6 @@ static __init void *alloc_low_page(void)
19482 }
19483
19484 /*
19485 - * Creates a middle page table and puts a pointer to it in the
19486 - * given global directory entry. This only returns the gd entry
19487 - * in non-PAE compilation mode, since the middle layer is folded.
19488 - */
19489 -static pmd_t * __init one_md_table_init(pgd_t *pgd)
19490 -{
19491 - pud_t *pud;
19492 - pmd_t *pmd_table;
19493 -
19494 -#ifdef CONFIG_X86_PAE
19495 - if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
19496 - if (after_bootmem)
19497 - pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
19498 - else
19499 - pmd_table = (pmd_t *)alloc_low_page();
19500 - paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
19501 - set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
19502 - pud = pud_offset(pgd, 0);
19503 - BUG_ON(pmd_table != pmd_offset(pud, 0));
19504 -
19505 - return pmd_table;
19506 - }
19507 -#endif
19508 - pud = pud_offset(pgd, 0);
19509 - pmd_table = pmd_offset(pud, 0);
19510 -
19511 - return pmd_table;
19512 -}
19513 -
19514 -/*
19515 * Create a page table and place a pointer to it in a middle page
19516 * directory entry:
19517 */
19518 @@ -123,13 +93,28 @@ static pte_t * __init one_page_table_ini
19519 page_table = (pte_t *)alloc_low_page();
19520
19521 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
19522 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
19523 + set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
19524 +#else
19525 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
19526 +#endif
19527 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
19528 }
19529
19530 return pte_offset_kernel(pmd, 0);
19531 }
19532
19533 +static pmd_t * __init one_md_table_init(pgd_t *pgd)
19534 +{
19535 + pud_t *pud;
19536 + pmd_t *pmd_table;
19537 +
19538 + pud = pud_offset(pgd, 0);
19539 + pmd_table = pmd_offset(pud, 0);
19540 +
19541 + return pmd_table;
19542 +}
19543 +
19544 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
19545 {
19546 int pgd_idx = pgd_index(vaddr);
19547 @@ -203,6 +188,7 @@ page_table_range_init(unsigned long star
19548 int pgd_idx, pmd_idx;
19549 unsigned long vaddr;
19550 pgd_t *pgd;
19551 + pud_t *pud;
19552 pmd_t *pmd;
19553 pte_t *pte = NULL;
19554
19555 @@ -212,8 +198,13 @@ page_table_range_init(unsigned long star
19556 pgd = pgd_base + pgd_idx;
19557
19558 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
19559 - pmd = one_md_table_init(pgd);
19560 - pmd = pmd + pmd_index(vaddr);
19561 + pud = pud_offset(pgd, vaddr);
19562 + pmd = pmd_offset(pud, vaddr);
19563 +
19564 +#ifdef CONFIG_X86_PAE
19565 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19566 +#endif
19567 +
19568 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
19569 pmd++, pmd_idx++) {
19570 pte = page_table_kmap_check(one_page_table_init(pmd),
19571 @@ -225,11 +216,20 @@ page_table_range_init(unsigned long star
19572 }
19573 }
19574
19575 -static inline int is_kernel_text(unsigned long addr)
19576 +static inline int is_kernel_text(unsigned long start, unsigned long end)
19577 {
19578 - if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
19579 - return 1;
19580 - return 0;
19581 + if ((start > ktla_ktva((unsigned long)_etext) ||
19582 + end <= ktla_ktva((unsigned long)_stext)) &&
19583 + (start > ktla_ktva((unsigned long)_einittext) ||
19584 + end <= ktla_ktva((unsigned long)_sinittext)) &&
19585 +
19586 +#ifdef CONFIG_ACPI_SLEEP
19587 + (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
19588 +#endif
19589 +
19590 + (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
19591 + return 0;
19592 + return 1;
19593 }
19594
19595 /*
19596 @@ -246,9 +246,10 @@ kernel_physical_mapping_init(unsigned lo
19597 unsigned long last_map_addr = end;
19598 unsigned long start_pfn, end_pfn;
19599 pgd_t *pgd_base = swapper_pg_dir;
19600 - int pgd_idx, pmd_idx, pte_ofs;
19601 + unsigned int pgd_idx, pmd_idx, pte_ofs;
19602 unsigned long pfn;
19603 pgd_t *pgd;
19604 + pud_t *pud;
19605 pmd_t *pmd;
19606 pte_t *pte;
19607 unsigned pages_2m, pages_4k;
19608 @@ -281,8 +282,13 @@ repeat:
19609 pfn = start_pfn;
19610 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19611 pgd = pgd_base + pgd_idx;
19612 - for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
19613 - pmd = one_md_table_init(pgd);
19614 + for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
19615 + pud = pud_offset(pgd, 0);
19616 + pmd = pmd_offset(pud, 0);
19617 +
19618 +#ifdef CONFIG_X86_PAE
19619 + paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
19620 +#endif
19621
19622 if (pfn >= end_pfn)
19623 continue;
19624 @@ -294,14 +300,13 @@ repeat:
19625 #endif
19626 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
19627 pmd++, pmd_idx++) {
19628 - unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
19629 + unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
19630
19631 /*
19632 * Map with big pages if possible, otherwise
19633 * create normal page tables:
19634 */
19635 if (use_pse) {
19636 - unsigned int addr2;
19637 pgprot_t prot = PAGE_KERNEL_LARGE;
19638 /*
19639 * first pass will use the same initial
19640 @@ -311,11 +316,7 @@ repeat:
19641 __pgprot(PTE_IDENT_ATTR |
19642 _PAGE_PSE);
19643
19644 - addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
19645 - PAGE_OFFSET + PAGE_SIZE-1;
19646 -
19647 - if (is_kernel_text(addr) ||
19648 - is_kernel_text(addr2))
19649 + if (is_kernel_text(address, address + PMD_SIZE))
19650 prot = PAGE_KERNEL_LARGE_EXEC;
19651
19652 pages_2m++;
19653 @@ -332,7 +333,7 @@ repeat:
19654 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
19655 pte += pte_ofs;
19656 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
19657 - pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
19658 + pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
19659 pgprot_t prot = PAGE_KERNEL;
19660 /*
19661 * first pass will use the same initial
19662 @@ -340,7 +341,7 @@ repeat:
19663 */
19664 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
19665
19666 - if (is_kernel_text(addr))
19667 + if (is_kernel_text(address, address + PAGE_SIZE))
19668 prot = PAGE_KERNEL_EXEC;
19669
19670 pages_4k++;
19671 @@ -472,7 +473,7 @@ void __init native_pagetable_setup_start
19672
19673 pud = pud_offset(pgd, va);
19674 pmd = pmd_offset(pud, va);
19675 - if (!pmd_present(*pmd))
19676 + if (!pmd_present(*pmd) || pmd_huge(*pmd))
19677 break;
19678
19679 pte = pte_offset_kernel(pmd, va);
19680 @@ -524,12 +525,10 @@ void __init early_ioremap_page_table_ran
19681
19682 static void __init pagetable_init(void)
19683 {
19684 - pgd_t *pgd_base = swapper_pg_dir;
19685 -
19686 - permanent_kmaps_init(pgd_base);
19687 + permanent_kmaps_init(swapper_pg_dir);
19688 }
19689
19690 -pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19691 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
19692 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19693
19694 /* user-defined highmem size */
19695 @@ -754,6 +753,12 @@ void __init mem_init(void)
19696
19697 pci_iommu_alloc();
19698
19699 +#ifdef CONFIG_PAX_PER_CPU_PGD
19700 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19701 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19702 + KERNEL_PGD_PTRS);
19703 +#endif
19704 +
19705 #ifdef CONFIG_FLATMEM
19706 BUG_ON(!mem_map);
19707 #endif
19708 @@ -771,7 +776,7 @@ void __init mem_init(void)
19709 set_highmem_pages_init();
19710
19711 codesize = (unsigned long) &_etext - (unsigned long) &_text;
19712 - datasize = (unsigned long) &_edata - (unsigned long) &_etext;
19713 + datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
19714 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
19715
19716 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
19717 @@ -812,10 +817,10 @@ void __init mem_init(void)
19718 ((unsigned long)&__init_end -
19719 (unsigned long)&__init_begin) >> 10,
19720
19721 - (unsigned long)&_etext, (unsigned long)&_edata,
19722 - ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
19723 + (unsigned long)&_sdata, (unsigned long)&_edata,
19724 + ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
19725
19726 - (unsigned long)&_text, (unsigned long)&_etext,
19727 + ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
19728 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
19729
19730 /*
19731 @@ -893,6 +898,7 @@ void set_kernel_text_rw(void)
19732 if (!kernel_set_to_readonly)
19733 return;
19734
19735 + start = ktla_ktva(start);
19736 pr_debug("Set kernel text: %lx - %lx for read write\n",
19737 start, start+size);
19738
19739 @@ -907,6 +913,7 @@ void set_kernel_text_ro(void)
19740 if (!kernel_set_to_readonly)
19741 return;
19742
19743 + start = ktla_ktva(start);
19744 pr_debug("Set kernel text: %lx - %lx for read only\n",
19745 start, start+size);
19746
19747 @@ -935,6 +942,7 @@ void mark_rodata_ro(void)
19748 unsigned long start = PFN_ALIGN(_text);
19749 unsigned long size = PFN_ALIGN(_etext) - start;
19750
19751 + start = ktla_ktva(start);
19752 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
19753 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
19754 size >> 10);
19755 diff -urNp linux-2.6.39.4/arch/x86/mm/init_64.c linux-2.6.39.4/arch/x86/mm/init_64.c
19756 --- linux-2.6.39.4/arch/x86/mm/init_64.c 2011-05-19 00:06:34.000000000 -0400
19757 +++ linux-2.6.39.4/arch/x86/mm/init_64.c 2011-08-05 19:44:35.000000000 -0400
19758 @@ -74,7 +74,7 @@ early_param("gbpages", parse_direct_gbpa
19759 * around without checking the pgd every time.
19760 */
19761
19762 -pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
19763 +pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
19764 EXPORT_SYMBOL_GPL(__supported_pte_mask);
19765
19766 int force_personality32;
19767 @@ -107,12 +107,22 @@ void sync_global_pgds(unsigned long star
19768
19769 for (address = start; address <= end; address += PGDIR_SIZE) {
19770 const pgd_t *pgd_ref = pgd_offset_k(address);
19771 +
19772 +#ifdef CONFIG_PAX_PER_CPU_PGD
19773 + unsigned long cpu;
19774 +#else
19775 struct page *page;
19776 +#endif
19777
19778 if (pgd_none(*pgd_ref))
19779 continue;
19780
19781 spin_lock(&pgd_lock);
19782 +
19783 +#ifdef CONFIG_PAX_PER_CPU_PGD
19784 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
19785 + pgd_t *pgd = pgd_offset_cpu(cpu, address);
19786 +#else
19787 list_for_each_entry(page, &pgd_list, lru) {
19788 pgd_t *pgd;
19789 spinlock_t *pgt_lock;
19790 @@ -121,6 +131,7 @@ void sync_global_pgds(unsigned long star
19791 /* the pgt_lock only for Xen */
19792 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
19793 spin_lock(pgt_lock);
19794 +#endif
19795
19796 if (pgd_none(*pgd))
19797 set_pgd(pgd, *pgd_ref);
19798 @@ -128,7 +139,10 @@ void sync_global_pgds(unsigned long star
19799 BUG_ON(pgd_page_vaddr(*pgd)
19800 != pgd_page_vaddr(*pgd_ref));
19801
19802 +#ifndef CONFIG_PAX_PER_CPU_PGD
19803 spin_unlock(pgt_lock);
19804 +#endif
19805 +
19806 }
19807 spin_unlock(&pgd_lock);
19808 }
19809 @@ -202,7 +216,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
19810 pmd = fill_pmd(pud, vaddr);
19811 pte = fill_pte(pmd, vaddr);
19812
19813 + pax_open_kernel();
19814 set_pte(pte, new_pte);
19815 + pax_close_kernel();
19816
19817 /*
19818 * It's enough to flush this one mapping.
19819 @@ -261,14 +277,12 @@ static void __init __init_extra_mapping(
19820 pgd = pgd_offset_k((unsigned long)__va(phys));
19821 if (pgd_none(*pgd)) {
19822 pud = (pud_t *) spp_getpage();
19823 - set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
19824 - _PAGE_USER));
19825 + set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
19826 }
19827 pud = pud_offset(pgd, (unsigned long)__va(phys));
19828 if (pud_none(*pud)) {
19829 pmd = (pmd_t *) spp_getpage();
19830 - set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
19831 - _PAGE_USER));
19832 + set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
19833 }
19834 pmd = pmd_offset(pud, phys);
19835 BUG_ON(!pmd_none(*pmd));
19836 @@ -698,6 +712,12 @@ void __init mem_init(void)
19837
19838 pci_iommu_alloc();
19839
19840 +#ifdef CONFIG_PAX_PER_CPU_PGD
19841 + clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
19842 + swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19843 + KERNEL_PGD_PTRS);
19844 +#endif
19845 +
19846 /* clear_bss() already clear the empty_zero_page */
19847
19848 reservedpages = 0;
19849 @@ -858,8 +878,8 @@ int kern_addr_valid(unsigned long addr)
19850 static struct vm_area_struct gate_vma = {
19851 .vm_start = VSYSCALL_START,
19852 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
19853 - .vm_page_prot = PAGE_READONLY_EXEC,
19854 - .vm_flags = VM_READ | VM_EXEC
19855 + .vm_page_prot = PAGE_READONLY,
19856 + .vm_flags = VM_READ
19857 };
19858
19859 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
19860 @@ -893,7 +913,7 @@ int in_gate_area_no_mm(unsigned long add
19861
19862 const char *arch_vma_name(struct vm_area_struct *vma)
19863 {
19864 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
19865 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
19866 return "[vdso]";
19867 if (vma == &gate_vma)
19868 return "[vsyscall]";
19869 diff -urNp linux-2.6.39.4/arch/x86/mm/init.c linux-2.6.39.4/arch/x86/mm/init.c
19870 --- linux-2.6.39.4/arch/x86/mm/init.c 2011-05-19 00:06:34.000000000 -0400
19871 +++ linux-2.6.39.4/arch/x86/mm/init.c 2011-08-05 19:44:35.000000000 -0400
19872 @@ -33,7 +33,7 @@ int direct_gbpages
19873 static void __init find_early_table_space(unsigned long end, int use_pse,
19874 int use_gbpages)
19875 {
19876 - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
19877 + unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end;
19878 phys_addr_t base;
19879
19880 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
19881 @@ -315,12 +315,34 @@ unsigned long __init_refok init_memory_m
19882 */
19883 int devmem_is_allowed(unsigned long pagenr)
19884 {
19885 - if (pagenr <= 256)
19886 +#ifdef CONFIG_GRKERNSEC_KMEM
19887 + /* allow BDA */
19888 + if (!pagenr)
19889 + return 1;
19890 + /* allow EBDA */
19891 + if ((0x9f000 >> PAGE_SHIFT) == pagenr)
19892 + return 1;
19893 +#else
19894 + if (!pagenr)
19895 + return 1;
19896 +#ifdef CONFIG_VM86
19897 + if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
19898 + return 1;
19899 +#endif
19900 +#endif
19901 +
19902 + if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
19903 return 1;
19904 +#ifdef CONFIG_GRKERNSEC_KMEM
19905 + /* throw out everything else below 1MB */
19906 + if (pagenr <= 256)
19907 + return 0;
19908 +#endif
19909 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
19910 return 0;
19911 if (!page_is_ram(pagenr))
19912 return 1;
19913 +
19914 return 0;
19915 }
19916
19917 @@ -375,6 +397,86 @@ void free_init_pages(char *what, unsigne
19918
19919 void free_initmem(void)
19920 {
19921 +
19922 +#ifdef CONFIG_PAX_KERNEXEC
19923 +#ifdef CONFIG_X86_32
19924 + /* PaX: limit KERNEL_CS to actual size */
19925 + unsigned long addr, limit;
19926 + struct desc_struct d;
19927 + int cpu;
19928 +
19929 + limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
19930 + limit = (limit - 1UL) >> PAGE_SHIFT;
19931 +
19932 + memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
19933 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
19934 + pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
19935 + write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
19936 + }
19937 +
19938 + /* PaX: make KERNEL_CS read-only */
19939 + addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
19940 + if (!paravirt_enabled())
19941 + set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
19942 +/*
19943 + for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
19944 + pgd = pgd_offset_k(addr);
19945 + pud = pud_offset(pgd, addr);
19946 + pmd = pmd_offset(pud, addr);
19947 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19948 + }
19949 +*/
19950 +#ifdef CONFIG_X86_PAE
19951 + set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
19952 +/*
19953 + for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
19954 + pgd = pgd_offset_k(addr);
19955 + pud = pud_offset(pgd, addr);
19956 + pmd = pmd_offset(pud, addr);
19957 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19958 + }
19959 +*/
19960 +#endif
19961 +
19962 +#ifdef CONFIG_MODULES
19963 + set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
19964 +#endif
19965 +
19966 +#else
19967 + pgd_t *pgd;
19968 + pud_t *pud;
19969 + pmd_t *pmd;
19970 + unsigned long addr, end;
19971 +
19972 + /* PaX: make kernel code/rodata read-only, rest non-executable */
19973 + for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
19974 + pgd = pgd_offset_k(addr);
19975 + pud = pud_offset(pgd, addr);
19976 + pmd = pmd_offset(pud, addr);
19977 + if (!pmd_present(*pmd))
19978 + continue;
19979 + if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
19980 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19981 + else
19982 + set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
19983 + }
19984 +
19985 + addr = (unsigned long)__va(__pa(__START_KERNEL_map));
19986 + end = addr + KERNEL_IMAGE_SIZE;
19987 + for (; addr < end; addr += PMD_SIZE) {
19988 + pgd = pgd_offset_k(addr);
19989 + pud = pud_offset(pgd, addr);
19990 + pmd = pmd_offset(pud, addr);
19991 + if (!pmd_present(*pmd))
19992 + continue;
19993 + if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
19994 + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
19995 + }
19996 +#endif
19997 +
19998 + flush_tlb_all();
19999 +#endif
20000 +
20001 free_init_pages("unused kernel memory",
20002 (unsigned long)(&__init_begin),
20003 (unsigned long)(&__init_end));
20004 diff -urNp linux-2.6.39.4/arch/x86/mm/iomap_32.c linux-2.6.39.4/arch/x86/mm/iomap_32.c
20005 --- linux-2.6.39.4/arch/x86/mm/iomap_32.c 2011-05-19 00:06:34.000000000 -0400
20006 +++ linux-2.6.39.4/arch/x86/mm/iomap_32.c 2011-08-05 19:44:35.000000000 -0400
20007 @@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long
20008 type = kmap_atomic_idx_push();
20009 idx = type + KM_TYPE_NR * smp_processor_id();
20010 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
20011 +
20012 + pax_open_kernel();
20013 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
20014 + pax_close_kernel();
20015 +
20016 arch_flush_lazy_mmu_mode();
20017
20018 return (void *)vaddr;
20019 diff -urNp linux-2.6.39.4/arch/x86/mm/ioremap.c linux-2.6.39.4/arch/x86/mm/ioremap.c
20020 --- linux-2.6.39.4/arch/x86/mm/ioremap.c 2011-05-19 00:06:34.000000000 -0400
20021 +++ linux-2.6.39.4/arch/x86/mm/ioremap.c 2011-08-05 19:44:35.000000000 -0400
20022 @@ -104,7 +104,7 @@ static void __iomem *__ioremap_caller(re
20023 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
20024 int is_ram = page_is_ram(pfn);
20025
20026 - if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
20027 + if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
20028 return NULL;
20029 WARN_ON_ONCE(is_ram);
20030 }
20031 @@ -344,7 +344,7 @@ static int __init early_ioremap_debug_se
20032 early_param("early_ioremap_debug", early_ioremap_debug_setup);
20033
20034 static __initdata int after_paging_init;
20035 -static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
20036 +static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
20037
20038 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
20039 {
20040 @@ -381,8 +381,7 @@ void __init early_ioremap_init(void)
20041 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
20042
20043 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
20044 - memset(bm_pte, 0, sizeof(bm_pte));
20045 - pmd_populate_kernel(&init_mm, pmd, bm_pte);
20046 + pmd_populate_user(&init_mm, pmd, bm_pte);
20047
20048 /*
20049 * The boot-ioremap range spans multiple pmds, for which
20050 diff -urNp linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c
20051 --- linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-05-19 00:06:34.000000000 -0400
20052 +++ linux-2.6.39.4/arch/x86/mm/kmemcheck/kmemcheck.c 2011-08-05 19:44:35.000000000 -0400
20053 @@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
20054 * memory (e.g. tracked pages)? For now, we need this to avoid
20055 * invoking kmemcheck for PnP BIOS calls.
20056 */
20057 - if (regs->flags & X86_VM_MASK)
20058 + if (v8086_mode(regs))
20059 return false;
20060 - if (regs->cs != __KERNEL_CS)
20061 + if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
20062 return false;
20063
20064 pte = kmemcheck_pte_lookup(address);
20065 diff -urNp linux-2.6.39.4/arch/x86/mm/mmap.c linux-2.6.39.4/arch/x86/mm/mmap.c
20066 --- linux-2.6.39.4/arch/x86/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
20067 +++ linux-2.6.39.4/arch/x86/mm/mmap.c 2011-08-05 19:44:35.000000000 -0400
20068 @@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
20069 * Leave an at least ~128 MB hole with possible stack randomization.
20070 */
20071 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
20072 -#define MAX_GAP (TASK_SIZE/6*5)
20073 +#define MAX_GAP (pax_task_size/6*5)
20074
20075 /*
20076 * True on X86_32 or when emulating IA32 on X86_64
20077 @@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
20078 return rnd << PAGE_SHIFT;
20079 }
20080
20081 -static unsigned long mmap_base(void)
20082 +static unsigned long mmap_base(struct mm_struct *mm)
20083 {
20084 unsigned long gap = rlimit(RLIMIT_STACK);
20085 + unsigned long pax_task_size = TASK_SIZE;
20086 +
20087 +#ifdef CONFIG_PAX_SEGMEXEC
20088 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20089 + pax_task_size = SEGMEXEC_TASK_SIZE;
20090 +#endif
20091
20092 if (gap < MIN_GAP)
20093 gap = MIN_GAP;
20094 else if (gap > MAX_GAP)
20095 gap = MAX_GAP;
20096
20097 - return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
20098 + return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
20099 }
20100
20101 /*
20102 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
20103 * does, but not when emulating X86_32
20104 */
20105 -static unsigned long mmap_legacy_base(void)
20106 +static unsigned long mmap_legacy_base(struct mm_struct *mm)
20107 {
20108 - if (mmap_is_ia32())
20109 + if (mmap_is_ia32()) {
20110 +
20111 +#ifdef CONFIG_PAX_SEGMEXEC
20112 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
20113 + return SEGMEXEC_TASK_UNMAPPED_BASE;
20114 + else
20115 +#endif
20116 +
20117 return TASK_UNMAPPED_BASE;
20118 - else
20119 + } else
20120 return TASK_UNMAPPED_BASE + mmap_rnd();
20121 }
20122
20123 @@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
20124 void arch_pick_mmap_layout(struct mm_struct *mm)
20125 {
20126 if (mmap_is_legacy()) {
20127 - mm->mmap_base = mmap_legacy_base();
20128 + mm->mmap_base = mmap_legacy_base(mm);
20129 +
20130 +#ifdef CONFIG_PAX_RANDMMAP
20131 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20132 + mm->mmap_base += mm->delta_mmap;
20133 +#endif
20134 +
20135 mm->get_unmapped_area = arch_get_unmapped_area;
20136 mm->unmap_area = arch_unmap_area;
20137 } else {
20138 - mm->mmap_base = mmap_base();
20139 + mm->mmap_base = mmap_base(mm);
20140 +
20141 +#ifdef CONFIG_PAX_RANDMMAP
20142 + if (mm->pax_flags & MF_PAX_RANDMMAP)
20143 + mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
20144 +#endif
20145 +
20146 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
20147 mm->unmap_area = arch_unmap_area_topdown;
20148 }
20149 diff -urNp linux-2.6.39.4/arch/x86/mm/mmio-mod.c linux-2.6.39.4/arch/x86/mm/mmio-mod.c
20150 --- linux-2.6.39.4/arch/x86/mm/mmio-mod.c 2011-05-19 00:06:34.000000000 -0400
20151 +++ linux-2.6.39.4/arch/x86/mm/mmio-mod.c 2011-08-05 19:44:35.000000000 -0400
20152 @@ -195,7 +195,7 @@ static void pre(struct kmmio_probe *p, s
20153 break;
20154 default:
20155 {
20156 - unsigned char *ip = (unsigned char *)instptr;
20157 + unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
20158 my_trace->opcode = MMIO_UNKNOWN_OP;
20159 my_trace->width = 0;
20160 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
20161 @@ -235,7 +235,7 @@ static void post(struct kmmio_probe *p,
20162 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
20163 void __iomem *addr)
20164 {
20165 - static atomic_t next_id;
20166 + static atomic_unchecked_t next_id;
20167 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
20168 /* These are page-unaligned. */
20169 struct mmiotrace_map map = {
20170 @@ -259,7 +259,7 @@ static void ioremap_trace_core(resource_
20171 .private = trace
20172 },
20173 .phys = offset,
20174 - .id = atomic_inc_return(&next_id)
20175 + .id = atomic_inc_return_unchecked(&next_id)
20176 };
20177 map.map_id = trace->id;
20178
20179 diff -urNp linux-2.6.39.4/arch/x86/mm/numa_32.c linux-2.6.39.4/arch/x86/mm/numa_32.c
20180 --- linux-2.6.39.4/arch/x86/mm/numa_32.c 2011-05-19 00:06:34.000000000 -0400
20181 +++ linux-2.6.39.4/arch/x86/mm/numa_32.c 2011-08-05 19:44:35.000000000 -0400
20182 @@ -99,7 +99,6 @@ unsigned long node_memmap_size_bytes(int
20183 }
20184 #endif
20185
20186 -extern unsigned long find_max_low_pfn(void);
20187 extern unsigned long highend_pfn, highstart_pfn;
20188
20189 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
20190 diff -urNp linux-2.6.39.4/arch/x86/mm/pageattr.c linux-2.6.39.4/arch/x86/mm/pageattr.c
20191 --- linux-2.6.39.4/arch/x86/mm/pageattr.c 2011-05-19 00:06:34.000000000 -0400
20192 +++ linux-2.6.39.4/arch/x86/mm/pageattr.c 2011-08-05 19:44:35.000000000 -0400
20193 @@ -261,7 +261,7 @@ static inline pgprot_t static_protection
20194 */
20195 #ifdef CONFIG_PCI_BIOS
20196 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
20197 - pgprot_val(forbidden) |= _PAGE_NX;
20198 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20199 #endif
20200
20201 /*
20202 @@ -269,9 +269,10 @@ static inline pgprot_t static_protection
20203 * Does not cover __inittext since that is gone later on. On
20204 * 64bit we do not enforce !NX on the low mapping
20205 */
20206 - if (within(address, (unsigned long)_text, (unsigned long)_etext))
20207 - pgprot_val(forbidden) |= _PAGE_NX;
20208 + if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
20209 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20210
20211 +#ifdef CONFIG_DEBUG_RODATA
20212 /*
20213 * The .rodata section needs to be read-only. Using the pfn
20214 * catches all aliases.
20215 @@ -279,6 +280,7 @@ static inline pgprot_t static_protection
20216 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
20217 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
20218 pgprot_val(forbidden) |= _PAGE_RW;
20219 +#endif
20220
20221 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
20222 /*
20223 @@ -317,6 +319,13 @@ static inline pgprot_t static_protection
20224 }
20225 #endif
20226
20227 +#ifdef CONFIG_PAX_KERNEXEC
20228 + if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
20229 + pgprot_val(forbidden) |= _PAGE_RW;
20230 + pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
20231 + }
20232 +#endif
20233 +
20234 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
20235
20236 return prot;
20237 @@ -369,23 +378,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
20238 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
20239 {
20240 /* change init_mm */
20241 + pax_open_kernel();
20242 set_pte_atomic(kpte, pte);
20243 +
20244 #ifdef CONFIG_X86_32
20245 if (!SHARED_KERNEL_PMD) {
20246 +
20247 +#ifdef CONFIG_PAX_PER_CPU_PGD
20248 + unsigned long cpu;
20249 +#else
20250 struct page *page;
20251 +#endif
20252
20253 +#ifdef CONFIG_PAX_PER_CPU_PGD
20254 + for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20255 + pgd_t *pgd = get_cpu_pgd(cpu);
20256 +#else
20257 list_for_each_entry(page, &pgd_list, lru) {
20258 - pgd_t *pgd;
20259 + pgd_t *pgd = (pgd_t *)page_address(page);
20260 +#endif
20261 +
20262 pud_t *pud;
20263 pmd_t *pmd;
20264
20265 - pgd = (pgd_t *)page_address(page) + pgd_index(address);
20266 + pgd += pgd_index(address);
20267 pud = pud_offset(pgd, address);
20268 pmd = pmd_offset(pud, address);
20269 set_pte_atomic((pte_t *)pmd, pte);
20270 }
20271 }
20272 #endif
20273 + pax_close_kernel();
20274 }
20275
20276 static int
20277 diff -urNp linux-2.6.39.4/arch/x86/mm/pageattr-test.c linux-2.6.39.4/arch/x86/mm/pageattr-test.c
20278 --- linux-2.6.39.4/arch/x86/mm/pageattr-test.c 2011-05-19 00:06:34.000000000 -0400
20279 +++ linux-2.6.39.4/arch/x86/mm/pageattr-test.c 2011-08-05 19:44:35.000000000 -0400
20280 @@ -36,7 +36,7 @@ enum {
20281
20282 static int pte_testbit(pte_t pte)
20283 {
20284 - return pte_flags(pte) & _PAGE_UNUSED1;
20285 + return pte_flags(pte) & _PAGE_CPA_TEST;
20286 }
20287
20288 struct split_state {
20289 diff -urNp linux-2.6.39.4/arch/x86/mm/pat.c linux-2.6.39.4/arch/x86/mm/pat.c
20290 --- linux-2.6.39.4/arch/x86/mm/pat.c 2011-05-19 00:06:34.000000000 -0400
20291 +++ linux-2.6.39.4/arch/x86/mm/pat.c 2011-08-05 19:44:35.000000000 -0400
20292 @@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end)
20293
20294 if (!entry) {
20295 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
20296 - current->comm, current->pid, start, end);
20297 + current->comm, task_pid_nr(current), start, end);
20298 return -EINVAL;
20299 }
20300
20301 @@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig
20302 while (cursor < to) {
20303 if (!devmem_is_allowed(pfn)) {
20304 printk(KERN_INFO
20305 - "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
20306 - current->comm, from, to);
20307 + "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
20308 + current->comm, from, to, cursor);
20309 return 0;
20310 }
20311 cursor += PAGE_SIZE;
20312 @@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un
20313 printk(KERN_INFO
20314 "%s:%d ioremap_change_attr failed %s "
20315 "for %Lx-%Lx\n",
20316 - current->comm, current->pid,
20317 + current->comm, task_pid_nr(current),
20318 cattr_name(flags),
20319 base, (unsigned long long)(base + size));
20320 return -EINVAL;
20321 @@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr,
20322 if (want_flags != flags) {
20323 printk(KERN_WARNING
20324 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
20325 - current->comm, current->pid,
20326 + current->comm, task_pid_nr(current),
20327 cattr_name(want_flags),
20328 (unsigned long long)paddr,
20329 (unsigned long long)(paddr + size),
20330 @@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr,
20331 free_memtype(paddr, paddr + size);
20332 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
20333 " for %Lx-%Lx, got %s\n",
20334 - current->comm, current->pid,
20335 + current->comm, task_pid_nr(current),
20336 cattr_name(want_flags),
20337 (unsigned long long)paddr,
20338 (unsigned long long)(paddr + size),
20339 diff -urNp linux-2.6.39.4/arch/x86/mm/pf_in.c linux-2.6.39.4/arch/x86/mm/pf_in.c
20340 --- linux-2.6.39.4/arch/x86/mm/pf_in.c 2011-05-19 00:06:34.000000000 -0400
20341 +++ linux-2.6.39.4/arch/x86/mm/pf_in.c 2011-08-05 19:44:35.000000000 -0400
20342 @@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
20343 int i;
20344 enum reason_type rv = OTHERS;
20345
20346 - p = (unsigned char *)ins_addr;
20347 + p = (unsigned char *)ktla_ktva(ins_addr);
20348 p += skip_prefix(p, &prf);
20349 p += get_opcode(p, &opcode);
20350
20351 @@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
20352 struct prefix_bits prf;
20353 int i;
20354
20355 - p = (unsigned char *)ins_addr;
20356 + p = (unsigned char *)ktla_ktva(ins_addr);
20357 p += skip_prefix(p, &prf);
20358 p += get_opcode(p, &opcode);
20359
20360 @@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
20361 struct prefix_bits prf;
20362 int i;
20363
20364 - p = (unsigned char *)ins_addr;
20365 + p = (unsigned char *)ktla_ktva(ins_addr);
20366 p += skip_prefix(p, &prf);
20367 p += get_opcode(p, &opcode);
20368
20369 @@ -416,7 +416,7 @@ unsigned long get_ins_reg_val(unsigned l
20370 int i;
20371 unsigned long rv;
20372
20373 - p = (unsigned char *)ins_addr;
20374 + p = (unsigned char *)ktla_ktva(ins_addr);
20375 p += skip_prefix(p, &prf);
20376 p += get_opcode(p, &opcode);
20377 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
20378 @@ -476,7 +476,7 @@ unsigned long get_ins_imm_val(unsigned l
20379 int i;
20380 unsigned long rv;
20381
20382 - p = (unsigned char *)ins_addr;
20383 + p = (unsigned char *)ktla_ktva(ins_addr);
20384 p += skip_prefix(p, &prf);
20385 p += get_opcode(p, &opcode);
20386 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
20387 diff -urNp linux-2.6.39.4/arch/x86/mm/pgtable_32.c linux-2.6.39.4/arch/x86/mm/pgtable_32.c
20388 --- linux-2.6.39.4/arch/x86/mm/pgtable_32.c 2011-05-19 00:06:34.000000000 -0400
20389 +++ linux-2.6.39.4/arch/x86/mm/pgtable_32.c 2011-08-05 19:44:35.000000000 -0400
20390 @@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr,
20391 return;
20392 }
20393 pte = pte_offset_kernel(pmd, vaddr);
20394 +
20395 + pax_open_kernel();
20396 if (pte_val(pteval))
20397 set_pte_at(&init_mm, vaddr, pte, pteval);
20398 else
20399 pte_clear(&init_mm, vaddr, pte);
20400 + pax_close_kernel();
20401
20402 /*
20403 * It's enough to flush this one mapping.
20404 diff -urNp linux-2.6.39.4/arch/x86/mm/pgtable.c linux-2.6.39.4/arch/x86/mm/pgtable.c
20405 --- linux-2.6.39.4/arch/x86/mm/pgtable.c 2011-05-19 00:06:34.000000000 -0400
20406 +++ linux-2.6.39.4/arch/x86/mm/pgtable.c 2011-08-05 19:44:35.000000000 -0400
20407 @@ -84,10 +84,52 @@ static inline void pgd_list_del(pgd_t *p
20408 list_del(&page->lru);
20409 }
20410
20411 -#define UNSHARED_PTRS_PER_PGD \
20412 - (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20413 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20414 +pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
20415
20416 +void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20417 +{
20418 + while (count--)
20419 + *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
20420 +}
20421 +#endif
20422 +
20423 +#ifdef CONFIG_PAX_PER_CPU_PGD
20424 +void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
20425 +{
20426 + while (count--)
20427 +
20428 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20429 + *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
20430 +#else
20431 + *dst++ = *src++;
20432 +#endif
20433
20434 +}
20435 +#endif
20436 +
20437 +#ifdef CONFIG_X86_64
20438 +#define pxd_t pud_t
20439 +#define pyd_t pgd_t
20440 +#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
20441 +#define pxd_free(mm, pud) pud_free((mm), (pud))
20442 +#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
20443 +#define pyd_offset(mm ,address) pgd_offset((mm), (address))
20444 +#define PYD_SIZE PGDIR_SIZE
20445 +#else
20446 +#define pxd_t pmd_t
20447 +#define pyd_t pud_t
20448 +#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
20449 +#define pxd_free(mm, pud) pmd_free((mm), (pud))
20450 +#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
20451 +#define pyd_offset(mm ,address) pud_offset((mm), (address))
20452 +#define PYD_SIZE PUD_SIZE
20453 +#endif
20454 +
20455 +#ifdef CONFIG_PAX_PER_CPU_PGD
20456 +static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
20457 +static inline void pgd_dtor(pgd_t *pgd) {}
20458 +#else
20459 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
20460 {
20461 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
20462 @@ -128,6 +170,7 @@ static void pgd_dtor(pgd_t *pgd)
20463 pgd_list_del(pgd);
20464 spin_unlock(&pgd_lock);
20465 }
20466 +#endif
20467
20468 /*
20469 * List of all pgd's needed for non-PAE so it can invalidate entries
20470 @@ -140,7 +183,7 @@ static void pgd_dtor(pgd_t *pgd)
20471 * -- wli
20472 */
20473
20474 -#ifdef CONFIG_X86_PAE
20475 +#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
20476 /*
20477 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
20478 * updating the top-level pagetable entries to guarantee the
20479 @@ -152,7 +195,7 @@ static void pgd_dtor(pgd_t *pgd)
20480 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
20481 * and initialize the kernel pmds here.
20482 */
20483 -#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
20484 +#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
20485
20486 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
20487 {
20488 @@ -170,36 +213,38 @@ void pud_populate(struct mm_struct *mm,
20489 */
20490 flush_tlb_mm(mm);
20491 }
20492 +#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
20493 +#define PREALLOCATED_PXDS USER_PGD_PTRS
20494 #else /* !CONFIG_X86_PAE */
20495
20496 /* No need to prepopulate any pagetable entries in non-PAE modes. */
20497 -#define PREALLOCATED_PMDS 0
20498 +#define PREALLOCATED_PXDS 0
20499
20500 #endif /* CONFIG_X86_PAE */
20501
20502 -static void free_pmds(pmd_t *pmds[])
20503 +static void free_pxds(pxd_t *pxds[])
20504 {
20505 int i;
20506
20507 - for(i = 0; i < PREALLOCATED_PMDS; i++)
20508 - if (pmds[i])
20509 - free_page((unsigned long)pmds[i]);
20510 + for(i = 0; i < PREALLOCATED_PXDS; i++)
20511 + if (pxds[i])
20512 + free_page((unsigned long)pxds[i]);
20513 }
20514
20515 -static int preallocate_pmds(pmd_t *pmds[])
20516 +static int preallocate_pxds(pxd_t *pxds[])
20517 {
20518 int i;
20519 bool failed = false;
20520
20521 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
20522 - pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
20523 - if (pmd == NULL)
20524 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
20525 + pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
20526 + if (pxd == NULL)
20527 failed = true;
20528 - pmds[i] = pmd;
20529 + pxds[i] = pxd;
20530 }
20531
20532 if (failed) {
20533 - free_pmds(pmds);
20534 + free_pxds(pxds);
20535 return -ENOMEM;
20536 }
20537
20538 @@ -212,51 +257,55 @@ static int preallocate_pmds(pmd_t *pmds[
20539 * preallocate which never got a corresponding vma will need to be
20540 * freed manually.
20541 */
20542 -static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
20543 +static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
20544 {
20545 int i;
20546
20547 - for(i = 0; i < PREALLOCATED_PMDS; i++) {
20548 + for(i = 0; i < PREALLOCATED_PXDS; i++) {
20549 pgd_t pgd = pgdp[i];
20550
20551 if (pgd_val(pgd) != 0) {
20552 - pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
20553 + pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
20554
20555 - pgdp[i] = native_make_pgd(0);
20556 + set_pgd(pgdp + i, native_make_pgd(0));
20557
20558 - paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
20559 - pmd_free(mm, pmd);
20560 + paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
20561 + pxd_free(mm, pxd);
20562 }
20563 }
20564 }
20565
20566 -static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
20567 +static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
20568 {
20569 - pud_t *pud;
20570 + pyd_t *pyd;
20571 unsigned long addr;
20572 int i;
20573
20574 - if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
20575 + if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
20576 return;
20577
20578 - pud = pud_offset(pgd, 0);
20579 +#ifdef CONFIG_X86_64
20580 + pyd = pyd_offset(mm, 0L);
20581 +#else
20582 + pyd = pyd_offset(pgd, 0L);
20583 +#endif
20584
20585 - for (addr = i = 0; i < PREALLOCATED_PMDS;
20586 - i++, pud++, addr += PUD_SIZE) {
20587 - pmd_t *pmd = pmds[i];
20588 + for (addr = i = 0; i < PREALLOCATED_PXDS;
20589 + i++, pyd++, addr += PYD_SIZE) {
20590 + pxd_t *pxd = pxds[i];
20591
20592 if (i >= KERNEL_PGD_BOUNDARY)
20593 - memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20594 - sizeof(pmd_t) * PTRS_PER_PMD);
20595 + memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
20596 + sizeof(pxd_t) * PTRS_PER_PMD);
20597
20598 - pud_populate(mm, pud, pmd);
20599 + pyd_populate(mm, pyd, pxd);
20600 }
20601 }
20602
20603 pgd_t *pgd_alloc(struct mm_struct *mm)
20604 {
20605 pgd_t *pgd;
20606 - pmd_t *pmds[PREALLOCATED_PMDS];
20607 + pxd_t *pxds[PREALLOCATED_PXDS];
20608
20609 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
20610
20611 @@ -265,11 +314,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20612
20613 mm->pgd = pgd;
20614
20615 - if (preallocate_pmds(pmds) != 0)
20616 + if (preallocate_pxds(pxds) != 0)
20617 goto out_free_pgd;
20618
20619 if (paravirt_pgd_alloc(mm) != 0)
20620 - goto out_free_pmds;
20621 + goto out_free_pxds;
20622
20623 /*
20624 * Make sure that pre-populating the pmds is atomic with
20625 @@ -279,14 +328,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
20626 spin_lock(&pgd_lock);
20627
20628 pgd_ctor(mm, pgd);
20629 - pgd_prepopulate_pmd(mm, pgd, pmds);
20630 + pgd_prepopulate_pxd(mm, pgd, pxds);
20631
20632 spin_unlock(&pgd_lock);
20633
20634 return pgd;
20635
20636 -out_free_pmds:
20637 - free_pmds(pmds);
20638 +out_free_pxds:
20639 + free_pxds(pxds);
20640 out_free_pgd:
20641 free_page((unsigned long)pgd);
20642 out:
20643 @@ -295,7 +344,7 @@ out:
20644
20645 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
20646 {
20647 - pgd_mop_up_pmds(mm, pgd);
20648 + pgd_mop_up_pxds(mm, pgd);
20649 pgd_dtor(pgd);
20650 paravirt_pgd_free(mm, pgd);
20651 free_page((unsigned long)pgd);
20652 diff -urNp linux-2.6.39.4/arch/x86/mm/setup_nx.c linux-2.6.39.4/arch/x86/mm/setup_nx.c
20653 --- linux-2.6.39.4/arch/x86/mm/setup_nx.c 2011-05-19 00:06:34.000000000 -0400
20654 +++ linux-2.6.39.4/arch/x86/mm/setup_nx.c 2011-08-05 19:44:35.000000000 -0400
20655 @@ -5,8 +5,10 @@
20656 #include <asm/pgtable.h>
20657 #include <asm/proto.h>
20658
20659 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20660 static int disable_nx __cpuinitdata;
20661
20662 +#ifndef CONFIG_PAX_PAGEEXEC
20663 /*
20664 * noexec = on|off
20665 *
20666 @@ -28,12 +30,17 @@ static int __init noexec_setup(char *str
20667 return 0;
20668 }
20669 early_param("noexec", noexec_setup);
20670 +#endif
20671 +
20672 +#endif
20673
20674 void __cpuinit x86_configure_nx(void)
20675 {
20676 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
20677 if (cpu_has_nx && !disable_nx)
20678 __supported_pte_mask |= _PAGE_NX;
20679 else
20680 +#endif
20681 __supported_pte_mask &= ~_PAGE_NX;
20682 }
20683
20684 diff -urNp linux-2.6.39.4/arch/x86/mm/tlb.c linux-2.6.39.4/arch/x86/mm/tlb.c
20685 --- linux-2.6.39.4/arch/x86/mm/tlb.c 2011-05-19 00:06:34.000000000 -0400
20686 +++ linux-2.6.39.4/arch/x86/mm/tlb.c 2011-08-05 19:44:35.000000000 -0400
20687 @@ -65,7 +65,11 @@ void leave_mm(int cpu)
20688 BUG();
20689 cpumask_clear_cpu(cpu,
20690 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
20691 +
20692 +#ifndef CONFIG_PAX_PER_CPU_PGD
20693 load_cr3(swapper_pg_dir);
20694 +#endif
20695 +
20696 }
20697 EXPORT_SYMBOL_GPL(leave_mm);
20698
20699 diff -urNp linux-2.6.39.4/arch/x86/oprofile/backtrace.c linux-2.6.39.4/arch/x86/oprofile/backtrace.c
20700 --- linux-2.6.39.4/arch/x86/oprofile/backtrace.c 2011-05-19 00:06:34.000000000 -0400
20701 +++ linux-2.6.39.4/arch/x86/oprofile/backtrace.c 2011-08-05 19:44:35.000000000 -0400
20702 @@ -57,7 +57,7 @@ dump_user_backtrace_32(struct stack_fram
20703 struct stack_frame_ia32 *fp;
20704
20705 /* Also check accessibility of one struct frame_head beyond */
20706 - if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
20707 + if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
20708 return NULL;
20709 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
20710 return NULL;
20711 @@ -123,7 +123,7 @@ x86_backtrace(struct pt_regs * const reg
20712 {
20713 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
20714
20715 - if (!user_mode_vm(regs)) {
20716 + if (!user_mode(regs)) {
20717 unsigned long stack = kernel_stack_pointer(regs);
20718 if (depth)
20719 dump_trace(NULL, regs, (unsigned long *)stack, 0,
20720 diff -urNp linux-2.6.39.4/arch/x86/pci/mrst.c linux-2.6.39.4/arch/x86/pci/mrst.c
20721 --- linux-2.6.39.4/arch/x86/pci/mrst.c 2011-05-19 00:06:34.000000000 -0400
20722 +++ linux-2.6.39.4/arch/x86/pci/mrst.c 2011-08-05 20:34:06.000000000 -0400
20723 @@ -234,7 +234,9 @@ int __init pci_mrst_init(void)
20724 printk(KERN_INFO "Moorestown platform detected, using MRST PCI ops\n");
20725 pci_mmcfg_late_init();
20726 pcibios_enable_irq = mrst_pci_irq_enable;
20727 - pci_root_ops = pci_mrst_ops;
20728 + pax_open_kernel();
20729 + memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
20730 + pax_close_kernel();
20731 /* Continue with standard init */
20732 return 1;
20733 }
20734 diff -urNp linux-2.6.39.4/arch/x86/pci/pcbios.c linux-2.6.39.4/arch/x86/pci/pcbios.c
20735 --- linux-2.6.39.4/arch/x86/pci/pcbios.c 2011-05-19 00:06:34.000000000 -0400
20736 +++ linux-2.6.39.4/arch/x86/pci/pcbios.c 2011-08-05 20:34:06.000000000 -0400
20737 @@ -79,50 +79,93 @@ union bios32 {
20738 static struct {
20739 unsigned long address;
20740 unsigned short segment;
20741 -} bios32_indirect = { 0, __KERNEL_CS };
20742 +} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
20743
20744 /*
20745 * Returns the entry point for the given service, NULL on error
20746 */
20747
20748 -static unsigned long bios32_service(unsigned long service)
20749 +static unsigned long __devinit bios32_service(unsigned long service)
20750 {
20751 unsigned char return_code; /* %al */
20752 unsigned long address; /* %ebx */
20753 unsigned long length; /* %ecx */
20754 unsigned long entry; /* %edx */
20755 unsigned long flags;
20756 + struct desc_struct d, *gdt;
20757
20758 local_irq_save(flags);
20759 - __asm__("lcall *(%%edi); cld"
20760 +
20761 + gdt = get_cpu_gdt_table(smp_processor_id());
20762 +
20763 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
20764 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20765 + pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
20766 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20767 +
20768 + __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
20769 : "=a" (return_code),
20770 "=b" (address),
20771 "=c" (length),
20772 "=d" (entry)
20773 : "0" (service),
20774 "1" (0),
20775 - "D" (&bios32_indirect));
20776 + "D" (&bios32_indirect),
20777 + "r"(__PCIBIOS_DS)
20778 + : "memory");
20779 +
20780 + pax_open_kernel();
20781 + gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
20782 + gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
20783 + gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
20784 + gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
20785 + pax_close_kernel();
20786 +
20787 local_irq_restore(flags);
20788
20789 switch (return_code) {
20790 - case 0:
20791 - return address + entry;
20792 - case 0x80: /* Not present */
20793 - printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20794 - return 0;
20795 - default: /* Shouldn't happen */
20796 - printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20797 - service, return_code);
20798 + case 0: {
20799 + int cpu;
20800 + unsigned char flags;
20801 +
20802 + printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
20803 + if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
20804 + printk(KERN_WARNING "bios32_service: not valid\n");
20805 return 0;
20806 + }
20807 + address = address + PAGE_OFFSET;
20808 + length += 16UL; /* some BIOSs underreport this... */
20809 + flags = 4;
20810 + if (length >= 64*1024*1024) {
20811 + length >>= PAGE_SHIFT;
20812 + flags |= 8;
20813 + }
20814 +
20815 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
20816 + gdt = get_cpu_gdt_table(cpu);
20817 + pack_descriptor(&d, address, length, 0x9b, flags);
20818 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
20819 + pack_descriptor(&d, address, length, 0x93, flags);
20820 + write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
20821 + }
20822 + return entry;
20823 + }
20824 + case 0x80: /* Not present */
20825 + printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
20826 + return 0;
20827 + default: /* Shouldn't happen */
20828 + printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
20829 + service, return_code);
20830 + return 0;
20831 }
20832 }
20833
20834 static struct {
20835 unsigned long address;
20836 unsigned short segment;
20837 -} pci_indirect = { 0, __KERNEL_CS };
20838 +} pci_indirect __read_only = { 0, __PCIBIOS_CS };
20839
20840 -static int pci_bios_present;
20841 +static int pci_bios_present __read_only;
20842
20843 static int __devinit check_pcibios(void)
20844 {
20845 @@ -131,11 +174,13 @@ static int __devinit check_pcibios(void)
20846 unsigned long flags, pcibios_entry;
20847
20848 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
20849 - pci_indirect.address = pcibios_entry + PAGE_OFFSET;
20850 + pci_indirect.address = pcibios_entry;
20851
20852 local_irq_save(flags);
20853 - __asm__(
20854 - "lcall *(%%edi); cld\n\t"
20855 + __asm__("movw %w6, %%ds\n\t"
20856 + "lcall *%%ss:(%%edi); cld\n\t"
20857 + "push %%ss\n\t"
20858 + "pop %%ds\n\t"
20859 "jc 1f\n\t"
20860 "xor %%ah, %%ah\n"
20861 "1:"
20862 @@ -144,7 +189,8 @@ static int __devinit check_pcibios(void)
20863 "=b" (ebx),
20864 "=c" (ecx)
20865 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
20866 - "D" (&pci_indirect)
20867 + "D" (&pci_indirect),
20868 + "r" (__PCIBIOS_DS)
20869 : "memory");
20870 local_irq_restore(flags);
20871
20872 @@ -188,7 +234,10 @@ static int pci_bios_read(unsigned int se
20873
20874 switch (len) {
20875 case 1:
20876 - __asm__("lcall *(%%esi); cld\n\t"
20877 + __asm__("movw %w6, %%ds\n\t"
20878 + "lcall *%%ss:(%%esi); cld\n\t"
20879 + "push %%ss\n\t"
20880 + "pop %%ds\n\t"
20881 "jc 1f\n\t"
20882 "xor %%ah, %%ah\n"
20883 "1:"
20884 @@ -197,7 +246,8 @@ static int pci_bios_read(unsigned int se
20885 : "1" (PCIBIOS_READ_CONFIG_BYTE),
20886 "b" (bx),
20887 "D" ((long)reg),
20888 - "S" (&pci_indirect));
20889 + "S" (&pci_indirect),
20890 + "r" (__PCIBIOS_DS));
20891 /*
20892 * Zero-extend the result beyond 8 bits, do not trust the
20893 * BIOS having done it:
20894 @@ -205,7 +255,10 @@ static int pci_bios_read(unsigned int se
20895 *value &= 0xff;
20896 break;
20897 case 2:
20898 - __asm__("lcall *(%%esi); cld\n\t"
20899 + __asm__("movw %w6, %%ds\n\t"
20900 + "lcall *%%ss:(%%esi); cld\n\t"
20901 + "push %%ss\n\t"
20902 + "pop %%ds\n\t"
20903 "jc 1f\n\t"
20904 "xor %%ah, %%ah\n"
20905 "1:"
20906 @@ -214,7 +267,8 @@ static int pci_bios_read(unsigned int se
20907 : "1" (PCIBIOS_READ_CONFIG_WORD),
20908 "b" (bx),
20909 "D" ((long)reg),
20910 - "S" (&pci_indirect));
20911 + "S" (&pci_indirect),
20912 + "r" (__PCIBIOS_DS));
20913 /*
20914 * Zero-extend the result beyond 16 bits, do not trust the
20915 * BIOS having done it:
20916 @@ -222,7 +276,10 @@ static int pci_bios_read(unsigned int se
20917 *value &= 0xffff;
20918 break;
20919 case 4:
20920 - __asm__("lcall *(%%esi); cld\n\t"
20921 + __asm__("movw %w6, %%ds\n\t"
20922 + "lcall *%%ss:(%%esi); cld\n\t"
20923 + "push %%ss\n\t"
20924 + "pop %%ds\n\t"
20925 "jc 1f\n\t"
20926 "xor %%ah, %%ah\n"
20927 "1:"
20928 @@ -231,7 +288,8 @@ static int pci_bios_read(unsigned int se
20929 : "1" (PCIBIOS_READ_CONFIG_DWORD),
20930 "b" (bx),
20931 "D" ((long)reg),
20932 - "S" (&pci_indirect));
20933 + "S" (&pci_indirect),
20934 + "r" (__PCIBIOS_DS));
20935 break;
20936 }
20937
20938 @@ -254,7 +312,10 @@ static int pci_bios_write(unsigned int s
20939
20940 switch (len) {
20941 case 1:
20942 - __asm__("lcall *(%%esi); cld\n\t"
20943 + __asm__("movw %w6, %%ds\n\t"
20944 + "lcall *%%ss:(%%esi); cld\n\t"
20945 + "push %%ss\n\t"
20946 + "pop %%ds\n\t"
20947 "jc 1f\n\t"
20948 "xor %%ah, %%ah\n"
20949 "1:"
20950 @@ -263,10 +324,14 @@ static int pci_bios_write(unsigned int s
20951 "c" (value),
20952 "b" (bx),
20953 "D" ((long)reg),
20954 - "S" (&pci_indirect));
20955 + "S" (&pci_indirect),
20956 + "r" (__PCIBIOS_DS));
20957 break;
20958 case 2:
20959 - __asm__("lcall *(%%esi); cld\n\t"
20960 + __asm__("movw %w6, %%ds\n\t"
20961 + "lcall *%%ss:(%%esi); cld\n\t"
20962 + "push %%ss\n\t"
20963 + "pop %%ds\n\t"
20964 "jc 1f\n\t"
20965 "xor %%ah, %%ah\n"
20966 "1:"
20967 @@ -275,10 +340,14 @@ static int pci_bios_write(unsigned int s
20968 "c" (value),
20969 "b" (bx),
20970 "D" ((long)reg),
20971 - "S" (&pci_indirect));
20972 + "S" (&pci_indirect),
20973 + "r" (__PCIBIOS_DS));
20974 break;
20975 case 4:
20976 - __asm__("lcall *(%%esi); cld\n\t"
20977 + __asm__("movw %w6, %%ds\n\t"
20978 + "lcall *%%ss:(%%esi); cld\n\t"
20979 + "push %%ss\n\t"
20980 + "pop %%ds\n\t"
20981 "jc 1f\n\t"
20982 "xor %%ah, %%ah\n"
20983 "1:"
20984 @@ -287,7 +356,8 @@ static int pci_bios_write(unsigned int s
20985 "c" (value),
20986 "b" (bx),
20987 "D" ((long)reg),
20988 - "S" (&pci_indirect));
20989 + "S" (&pci_indirect),
20990 + "r" (__PCIBIOS_DS));
20991 break;
20992 }
20993
20994 @@ -392,10 +462,13 @@ struct irq_routing_table * pcibios_get_i
20995
20996 DBG("PCI: Fetching IRQ routing table... ");
20997 __asm__("push %%es\n\t"
20998 + "movw %w8, %%ds\n\t"
20999 "push %%ds\n\t"
21000 "pop %%es\n\t"
21001 - "lcall *(%%esi); cld\n\t"
21002 + "lcall *%%ss:(%%esi); cld\n\t"
21003 "pop %%es\n\t"
21004 + "push %%ss\n\t"
21005 + "pop %%ds\n"
21006 "jc 1f\n\t"
21007 "xor %%ah, %%ah\n"
21008 "1:"
21009 @@ -406,7 +479,8 @@ struct irq_routing_table * pcibios_get_i
21010 "1" (0),
21011 "D" ((long) &opt),
21012 "S" (&pci_indirect),
21013 - "m" (opt)
21014 + "m" (opt),
21015 + "r" (__PCIBIOS_DS)
21016 : "memory");
21017 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
21018 if (ret & 0xff00)
21019 @@ -430,7 +504,10 @@ int pcibios_set_irq_routing(struct pci_d
21020 {
21021 int ret;
21022
21023 - __asm__("lcall *(%%esi); cld\n\t"
21024 + __asm__("movw %w5, %%ds\n\t"
21025 + "lcall *%%ss:(%%esi); cld\n\t"
21026 + "push %%ss\n\t"
21027 + "pop %%ds\n"
21028 "jc 1f\n\t"
21029 "xor %%ah, %%ah\n"
21030 "1:"
21031 @@ -438,7 +515,8 @@ int pcibios_set_irq_routing(struct pci_d
21032 : "0" (PCIBIOS_SET_PCI_HW_INT),
21033 "b" ((dev->bus->number << 8) | dev->devfn),
21034 "c" ((irq << 8) | (pin + 10)),
21035 - "S" (&pci_indirect));
21036 + "S" (&pci_indirect),
21037 + "r" (__PCIBIOS_DS));
21038 return !(ret & 0xff00);
21039 }
21040 EXPORT_SYMBOL(pcibios_set_irq_routing);
21041 diff -urNp linux-2.6.39.4/arch/x86/platform/efi/efi_32.c linux-2.6.39.4/arch/x86/platform/efi/efi_32.c
21042 --- linux-2.6.39.4/arch/x86/platform/efi/efi_32.c 2011-05-19 00:06:34.000000000 -0400
21043 +++ linux-2.6.39.4/arch/x86/platform/efi/efi_32.c 2011-08-05 19:44:35.000000000 -0400
21044 @@ -38,70 +38,37 @@
21045 */
21046
21047 static unsigned long efi_rt_eflags;
21048 -static pgd_t efi_bak_pg_dir_pointer[2];
21049 +static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
21050
21051 -void efi_call_phys_prelog(void)
21052 +void __init efi_call_phys_prelog(void)
21053 {
21054 - unsigned long cr4;
21055 - unsigned long temp;
21056 struct desc_ptr gdt_descr;
21057
21058 local_irq_save(efi_rt_eflags);
21059
21060 - /*
21061 - * If I don't have PAE, I should just duplicate two entries in page
21062 - * directory. If I have PAE, I just need to duplicate one entry in
21063 - * page directory.
21064 - */
21065 - cr4 = read_cr4_safe();
21066 -
21067 - if (cr4 & X86_CR4_PAE) {
21068 - efi_bak_pg_dir_pointer[0].pgd =
21069 - swapper_pg_dir[pgd_index(0)].pgd;
21070 - swapper_pg_dir[0].pgd =
21071 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21072 - } else {
21073 - efi_bak_pg_dir_pointer[0].pgd =
21074 - swapper_pg_dir[pgd_index(0)].pgd;
21075 - efi_bak_pg_dir_pointer[1].pgd =
21076 - swapper_pg_dir[pgd_index(0x400000)].pgd;
21077 - swapper_pg_dir[pgd_index(0)].pgd =
21078 - swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
21079 - temp = PAGE_OFFSET + 0x400000;
21080 - swapper_pg_dir[pgd_index(0x400000)].pgd =
21081 - swapper_pg_dir[pgd_index(temp)].pgd;
21082 - }
21083 + clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
21084 + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21085 + min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
21086
21087 /*
21088 * After the lock is released, the original page table is restored.
21089 */
21090 __flush_tlb_all();
21091
21092 - gdt_descr.address = __pa(get_cpu_gdt_table(0));
21093 + gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
21094 gdt_descr.size = GDT_SIZE - 1;
21095 load_gdt(&gdt_descr);
21096 }
21097
21098 -void efi_call_phys_epilog(void)
21099 +void __init efi_call_phys_epilog(void)
21100 {
21101 - unsigned long cr4;
21102 struct desc_ptr gdt_descr;
21103
21104 - gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
21105 + gdt_descr.address = get_cpu_gdt_table(0);
21106 gdt_descr.size = GDT_SIZE - 1;
21107 load_gdt(&gdt_descr);
21108
21109 - cr4 = read_cr4_safe();
21110 -
21111 - if (cr4 & X86_CR4_PAE) {
21112 - swapper_pg_dir[pgd_index(0)].pgd =
21113 - efi_bak_pg_dir_pointer[0].pgd;
21114 - } else {
21115 - swapper_pg_dir[pgd_index(0)].pgd =
21116 - efi_bak_pg_dir_pointer[0].pgd;
21117 - swapper_pg_dir[pgd_index(0x400000)].pgd =
21118 - efi_bak_pg_dir_pointer[1].pgd;
21119 - }
21120 + clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
21121
21122 /*
21123 * After the lock is released, the original page table is restored.
21124 diff -urNp linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S
21125 --- linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S 2011-05-19 00:06:34.000000000 -0400
21126 +++ linux-2.6.39.4/arch/x86/platform/efi/efi_stub_32.S 2011-08-05 19:44:35.000000000 -0400
21127 @@ -6,6 +6,7 @@
21128 */
21129
21130 #include <linux/linkage.h>
21131 +#include <linux/init.h>
21132 #include <asm/page_types.h>
21133
21134 /*
21135 @@ -20,7 +21,7 @@
21136 * service functions will comply with gcc calling convention, too.
21137 */
21138
21139 -.text
21140 +__INIT
21141 ENTRY(efi_call_phys)
21142 /*
21143 * 0. The function can only be called in Linux kernel. So CS has been
21144 @@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
21145 * The mapping of lower virtual memory has been created in prelog and
21146 * epilog.
21147 */
21148 - movl $1f, %edx
21149 - subl $__PAGE_OFFSET, %edx
21150 - jmp *%edx
21151 + jmp 1f-__PAGE_OFFSET
21152 1:
21153
21154 /*
21155 @@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
21156 * parameter 2, ..., param n. To make things easy, we save the return
21157 * address of efi_call_phys in a global variable.
21158 */
21159 - popl %edx
21160 - movl %edx, saved_return_addr
21161 - /* get the function pointer into ECX*/
21162 - popl %ecx
21163 - movl %ecx, efi_rt_function_ptr
21164 - movl $2f, %edx
21165 - subl $__PAGE_OFFSET, %edx
21166 - pushl %edx
21167 + popl (saved_return_addr)
21168 + popl (efi_rt_function_ptr)
21169
21170 /*
21171 * 3. Clear PG bit in %CR0.
21172 @@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
21173 /*
21174 * 5. Call the physical function.
21175 */
21176 - jmp *%ecx
21177 + call *(efi_rt_function_ptr-__PAGE_OFFSET)
21178
21179 -2:
21180 /*
21181 * 6. After EFI runtime service returns, control will return to
21182 * following instruction. We'd better readjust stack pointer first.
21183 @@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
21184 movl %cr0, %edx
21185 orl $0x80000000, %edx
21186 movl %edx, %cr0
21187 - jmp 1f
21188 -1:
21189 +
21190 /*
21191 * 8. Now restore the virtual mode from flat mode by
21192 * adding EIP with PAGE_OFFSET.
21193 */
21194 - movl $1f, %edx
21195 - jmp *%edx
21196 + jmp 1f+__PAGE_OFFSET
21197 1:
21198
21199 /*
21200 * 9. Balance the stack. And because EAX contain the return value,
21201 * we'd better not clobber it.
21202 */
21203 - leal efi_rt_function_ptr, %edx
21204 - movl (%edx), %ecx
21205 - pushl %ecx
21206 + pushl (efi_rt_function_ptr)
21207
21208 /*
21209 - * 10. Push the saved return address onto the stack and return.
21210 + * 10. Return to the saved return address.
21211 */
21212 - leal saved_return_addr, %edx
21213 - movl (%edx), %ecx
21214 - pushl %ecx
21215 - ret
21216 + jmpl *(saved_return_addr)
21217 ENDPROC(efi_call_phys)
21218 .previous
21219
21220 -.data
21221 +__INITDATA
21222 saved_return_addr:
21223 .long 0
21224 efi_rt_function_ptr:
21225 diff -urNp linux-2.6.39.4/arch/x86/platform/mrst/mrst.c linux-2.6.39.4/arch/x86/platform/mrst/mrst.c
21226 --- linux-2.6.39.4/arch/x86/platform/mrst/mrst.c 2011-05-19 00:06:34.000000000 -0400
21227 +++ linux-2.6.39.4/arch/x86/platform/mrst/mrst.c 2011-08-05 20:34:06.000000000 -0400
21228 @@ -239,14 +239,16 @@ static int mrst_i8042_detect(void)
21229 }
21230
21231 /* Reboot and power off are handled by the SCU on a MID device */
21232 -static void mrst_power_off(void)
21233 +static __noreturn void mrst_power_off(void)
21234 {
21235 intel_scu_ipc_simple_command(0xf1, 1);
21236 + BUG();
21237 }
21238
21239 -static void mrst_reboot(void)
21240 +static __noreturn void mrst_reboot(void)
21241 {
21242 intel_scu_ipc_simple_command(0xf1, 0);
21243 + BUG();
21244 }
21245
21246 /*
21247 diff -urNp linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c
21248 --- linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c 2011-05-19 00:06:34.000000000 -0400
21249 +++ linux-2.6.39.4/arch/x86/platform/uv/tlb_uv.c 2011-08-05 19:44:35.000000000 -0400
21250 @@ -342,6 +342,8 @@ static void uv_reset_with_ipi(struct bau
21251 cpumask_t mask;
21252 struct reset_args reset_args;
21253
21254 + pax_track_stack();
21255 +
21256 reset_args.sender = sender;
21257
21258 cpus_clear(mask);
21259 diff -urNp linux-2.6.39.4/arch/x86/power/cpu.c linux-2.6.39.4/arch/x86/power/cpu.c
21260 --- linux-2.6.39.4/arch/x86/power/cpu.c 2011-05-19 00:06:34.000000000 -0400
21261 +++ linux-2.6.39.4/arch/x86/power/cpu.c 2011-08-05 19:44:35.000000000 -0400
21262 @@ -130,7 +130,7 @@ static void do_fpu_end(void)
21263 static void fix_processor_context(void)
21264 {
21265 int cpu = smp_processor_id();
21266 - struct tss_struct *t = &per_cpu(init_tss, cpu);
21267 + struct tss_struct *t = init_tss + cpu;
21268
21269 set_tss_desc(cpu, t); /*
21270 * This just modifies memory; should not be
21271 @@ -140,7 +140,9 @@ static void fix_processor_context(void)
21272 */
21273
21274 #ifdef CONFIG_X86_64
21275 + pax_open_kernel();
21276 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
21277 + pax_close_kernel();
21278
21279 syscall_init(); /* This sets MSR_*STAR and related */
21280 #endif
21281 Binary files linux-2.6.39.4/arch/x86/tools/test_get_len and linux-2.6.39.4/arch/x86/tools/test_get_len differ
21282 diff -urNp linux-2.6.39.4/arch/x86/vdso/Makefile linux-2.6.39.4/arch/x86/vdso/Makefile
21283 --- linux-2.6.39.4/arch/x86/vdso/Makefile 2011-05-19 00:06:34.000000000 -0400
21284 +++ linux-2.6.39.4/arch/x86/vdso/Makefile 2011-08-05 19:44:35.000000000 -0400
21285 @@ -123,7 +123,7 @@ quiet_cmd_vdso = VDSO $@
21286 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
21287 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
21288
21289 -VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21290 +VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
21291 GCOV_PROFILE := n
21292
21293 #
21294 diff -urNp linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c
21295 --- linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c 2011-05-19 00:06:34.000000000 -0400
21296 +++ linux-2.6.39.4/arch/x86/vdso/vclock_gettime.c 2011-08-05 19:44:35.000000000 -0400
21297 @@ -22,24 +22,48 @@
21298 #include <asm/hpet.h>
21299 #include <asm/unistd.h>
21300 #include <asm/io.h>
21301 +#include <asm/fixmap.h>
21302 #include "vextern.h"
21303
21304 #define gtod vdso_vsyscall_gtod_data
21305
21306 +notrace noinline long __vdso_fallback_time(long *t)
21307 +{
21308 + long secs;
21309 + asm volatile("syscall"
21310 + : "=a" (secs)
21311 + : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
21312 + return secs;
21313 +}
21314 +
21315 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
21316 {
21317 long ret;
21318 asm("syscall" : "=a" (ret) :
21319 - "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
21320 + "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
21321 return ret;
21322 }
21323
21324 +notrace static inline cycle_t __vdso_vread_hpet(void)
21325 +{
21326 + return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
21327 +}
21328 +
21329 +notrace static inline cycle_t __vdso_vread_tsc(void)
21330 +{
21331 + cycle_t ret = (cycle_t)vget_cycles();
21332 +
21333 + return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
21334 +}
21335 +
21336 notrace static inline long vgetns(void)
21337 {
21338 long v;
21339 - cycles_t (*vread)(void);
21340 - vread = gtod->clock.vread;
21341 - v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
21342 + if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
21343 + v = __vdso_vread_tsc();
21344 + else
21345 + v = __vdso_vread_hpet();
21346 + v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
21347 return (v * gtod->clock.mult) >> gtod->clock.shift;
21348 }
21349
21350 @@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
21351
21352 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
21353 {
21354 - if (likely(gtod->sysctl_enabled))
21355 + if (likely(gtod->sysctl_enabled &&
21356 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
21357 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
21358 switch (clock) {
21359 case CLOCK_REALTIME:
21360 if (likely(gtod->clock.vread))
21361 @@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
21362 int clock_gettime(clockid_t, struct timespec *)
21363 __attribute__((weak, alias("__vdso_clock_gettime")));
21364
21365 -notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
21366 +notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
21367 {
21368 long ret;
21369 - if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
21370 + asm("syscall" : "=a" (ret) :
21371 + "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
21372 + return ret;
21373 +}
21374 +
21375 +notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
21376 +{
21377 + if (likely(gtod->sysctl_enabled &&
21378 + ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
21379 + (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
21380 + {
21381 if (likely(tv != NULL)) {
21382 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
21383 offsetof(struct timespec, tv_nsec) ||
21384 @@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
21385 }
21386 return 0;
21387 }
21388 - asm("syscall" : "=a" (ret) :
21389 - "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
21390 - return ret;
21391 + return __vdso_fallback_gettimeofday(tv, tz);
21392 }
21393 int gettimeofday(struct timeval *, struct timezone *)
21394 __attribute__((weak, alias("__vdso_gettimeofday")));
21395 diff -urNp linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c
21396 --- linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c 2011-05-19 00:06:34.000000000 -0400
21397 +++ linux-2.6.39.4/arch/x86/vdso/vdso32-setup.c 2011-08-05 19:44:35.000000000 -0400
21398 @@ -25,6 +25,7 @@
21399 #include <asm/tlbflush.h>
21400 #include <asm/vdso.h>
21401 #include <asm/proto.h>
21402 +#include <asm/mman.h>
21403
21404 enum {
21405 VDSO_DISABLED = 0,
21406 @@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
21407 void enable_sep_cpu(void)
21408 {
21409 int cpu = get_cpu();
21410 - struct tss_struct *tss = &per_cpu(init_tss, cpu);
21411 + struct tss_struct *tss = init_tss + cpu;
21412
21413 if (!boot_cpu_has(X86_FEATURE_SEP)) {
21414 put_cpu();
21415 @@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
21416 gate_vma.vm_start = FIXADDR_USER_START;
21417 gate_vma.vm_end = FIXADDR_USER_END;
21418 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
21419 - gate_vma.vm_page_prot = __P101;
21420 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
21421 /*
21422 * Make sure the vDSO gets into every core dump.
21423 * Dumping its contents makes post-mortem fully interpretable later
21424 @@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
21425 if (compat)
21426 addr = VDSO_HIGH_BASE;
21427 else {
21428 - addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
21429 + addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
21430 if (IS_ERR_VALUE(addr)) {
21431 ret = addr;
21432 goto up_fail;
21433 }
21434 }
21435
21436 - current->mm->context.vdso = (void *)addr;
21437 + current->mm->context.vdso = addr;
21438
21439 if (compat_uses_vma || !compat) {
21440 /*
21441 @@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
21442 }
21443
21444 current_thread_info()->sysenter_return =
21445 - VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21446 + (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
21447
21448 up_fail:
21449 if (ret)
21450 - current->mm->context.vdso = NULL;
21451 + current->mm->context.vdso = 0;
21452
21453 up_write(&mm->mmap_sem);
21454
21455 @@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init);
21456
21457 const char *arch_vma_name(struct vm_area_struct *vma)
21458 {
21459 - if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21460 + if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21461 return "[vdso]";
21462 +
21463 +#ifdef CONFIG_PAX_SEGMEXEC
21464 + if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
21465 + return "[vdso]";
21466 +#endif
21467 +
21468 return NULL;
21469 }
21470
21471 @@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
21472 * Check to see if the corresponding task was created in compat vdso
21473 * mode.
21474 */
21475 - if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
21476 + if (mm && mm->context.vdso == VDSO_HIGH_BASE)
21477 return &gate_vma;
21478 return NULL;
21479 }
21480 diff -urNp linux-2.6.39.4/arch/x86/vdso/vdso.lds.S linux-2.6.39.4/arch/x86/vdso/vdso.lds.S
21481 --- linux-2.6.39.4/arch/x86/vdso/vdso.lds.S 2011-05-19 00:06:34.000000000 -0400
21482 +++ linux-2.6.39.4/arch/x86/vdso/vdso.lds.S 2011-08-05 19:44:35.000000000 -0400
21483 @@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
21484 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
21485 #include "vextern.h"
21486 #undef VEXTERN
21487 +
21488 +#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
21489 +VEXTERN(fallback_gettimeofday)
21490 +VEXTERN(fallback_time)
21491 +VEXTERN(getcpu)
21492 +#undef VEXTERN
21493 diff -urNp linux-2.6.39.4/arch/x86/vdso/vextern.h linux-2.6.39.4/arch/x86/vdso/vextern.h
21494 --- linux-2.6.39.4/arch/x86/vdso/vextern.h 2011-05-19 00:06:34.000000000 -0400
21495 +++ linux-2.6.39.4/arch/x86/vdso/vextern.h 2011-08-05 19:44:35.000000000 -0400
21496 @@ -11,6 +11,5 @@
21497 put into vextern.h and be referenced as a pointer with vdso prefix.
21498 The main kernel later fills in the values. */
21499
21500 -VEXTERN(jiffies)
21501 VEXTERN(vgetcpu_mode)
21502 VEXTERN(vsyscall_gtod_data)
21503 diff -urNp linux-2.6.39.4/arch/x86/vdso/vma.c linux-2.6.39.4/arch/x86/vdso/vma.c
21504 --- linux-2.6.39.4/arch/x86/vdso/vma.c 2011-05-19 00:06:34.000000000 -0400
21505 +++ linux-2.6.39.4/arch/x86/vdso/vma.c 2011-08-05 19:44:35.000000000 -0400
21506 @@ -58,7 +58,7 @@ static int __init init_vdso_vars(void)
21507 if (!vbase)
21508 goto oom;
21509
21510 - if (memcmp(vbase, "\177ELF", 4)) {
21511 + if (memcmp(vbase, ELFMAG, SELFMAG)) {
21512 printk("VDSO: I'm broken; not ELF\n");
21513 vdso_enabled = 0;
21514 }
21515 @@ -118,7 +118,7 @@ int arch_setup_additional_pages(struct l
21516 goto up_fail;
21517 }
21518
21519 - current->mm->context.vdso = (void *)addr;
21520 + current->mm->context.vdso = addr;
21521
21522 ret = install_special_mapping(mm, addr, vdso_size,
21523 VM_READ|VM_EXEC|
21524 @@ -126,7 +126,7 @@ int arch_setup_additional_pages(struct l
21525 VM_ALWAYSDUMP,
21526 vdso_pages);
21527 if (ret) {
21528 - current->mm->context.vdso = NULL;
21529 + current->mm->context.vdso = 0;
21530 goto up_fail;
21531 }
21532
21533 @@ -134,10 +134,3 @@ up_fail:
21534 up_write(&mm->mmap_sem);
21535 return ret;
21536 }
21537 -
21538 -static __init int vdso_setup(char *s)
21539 -{
21540 - vdso_enabled = simple_strtoul(s, NULL, 0);
21541 - return 0;
21542 -}
21543 -__setup("vdso=", vdso_setup);
21544 diff -urNp linux-2.6.39.4/arch/x86/xen/enlighten.c linux-2.6.39.4/arch/x86/xen/enlighten.c
21545 --- linux-2.6.39.4/arch/x86/xen/enlighten.c 2011-05-19 00:06:34.000000000 -0400
21546 +++ linux-2.6.39.4/arch/x86/xen/enlighten.c 2011-08-05 19:44:35.000000000 -0400
21547 @@ -85,8 +85,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
21548
21549 struct shared_info xen_dummy_shared_info;
21550
21551 -void *xen_initial_gdt;
21552 -
21553 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
21554 __read_mostly int xen_have_vector_callback;
21555 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
21556 @@ -1010,7 +1008,7 @@ static const struct pv_apic_ops xen_apic
21557 #endif
21558 };
21559
21560 -static void xen_reboot(int reason)
21561 +static __noreturn void xen_reboot(int reason)
21562 {
21563 struct sched_shutdown r = { .reason = reason };
21564
21565 @@ -1018,17 +1016,17 @@ static void xen_reboot(int reason)
21566 BUG();
21567 }
21568
21569 -static void xen_restart(char *msg)
21570 +static __noreturn void xen_restart(char *msg)
21571 {
21572 xen_reboot(SHUTDOWN_reboot);
21573 }
21574
21575 -static void xen_emergency_restart(void)
21576 +static __noreturn void xen_emergency_restart(void)
21577 {
21578 xen_reboot(SHUTDOWN_reboot);
21579 }
21580
21581 -static void xen_machine_halt(void)
21582 +static __noreturn void xen_machine_halt(void)
21583 {
21584 xen_reboot(SHUTDOWN_poweroff);
21585 }
21586 @@ -1127,7 +1125,17 @@ asmlinkage void __init xen_start_kernel(
21587 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
21588
21589 /* Work out if we support NX */
21590 - x86_configure_nx();
21591 +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
21592 + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
21593 + (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
21594 + unsigned l, h;
21595 +
21596 + __supported_pte_mask |= _PAGE_NX;
21597 + rdmsr(MSR_EFER, l, h);
21598 + l |= EFER_NX;
21599 + wrmsr(MSR_EFER, l, h);
21600 + }
21601 +#endif
21602
21603 xen_setup_features();
21604
21605 @@ -1158,13 +1166,6 @@ asmlinkage void __init xen_start_kernel(
21606
21607 machine_ops = xen_machine_ops;
21608
21609 - /*
21610 - * The only reliable way to retain the initial address of the
21611 - * percpu gdt_page is to remember it here, so we can go and
21612 - * mark it RW later, when the initial percpu area is freed.
21613 - */
21614 - xen_initial_gdt = &per_cpu(gdt_page, 0);
21615 -
21616 xen_smp_init();
21617
21618 #ifdef CONFIG_ACPI_NUMA
21619 diff -urNp linux-2.6.39.4/arch/x86/xen/mmu.c linux-2.6.39.4/arch/x86/xen/mmu.c
21620 --- linux-2.6.39.4/arch/x86/xen/mmu.c 2011-07-09 09:18:51.000000000 -0400
21621 +++ linux-2.6.39.4/arch/x86/xen/mmu.c 2011-08-05 19:44:35.000000000 -0400
21622 @@ -1801,6 +1801,8 @@ __init pgd_t *xen_setup_kernel_pagetable
21623 convert_pfn_mfn(init_level4_pgt);
21624 convert_pfn_mfn(level3_ident_pgt);
21625 convert_pfn_mfn(level3_kernel_pgt);
21626 + convert_pfn_mfn(level3_vmalloc_pgt);
21627 + convert_pfn_mfn(level3_vmemmap_pgt);
21628
21629 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
21630 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
21631 @@ -1819,7 +1821,10 @@ __init pgd_t *xen_setup_kernel_pagetable
21632 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
21633 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
21634 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
21635 + set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
21636 + set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
21637 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
21638 + set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
21639 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
21640 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
21641
21642 diff -urNp linux-2.6.39.4/arch/x86/xen/smp.c linux-2.6.39.4/arch/x86/xen/smp.c
21643 --- linux-2.6.39.4/arch/x86/xen/smp.c 2011-07-09 09:18:51.000000000 -0400
21644 +++ linux-2.6.39.4/arch/x86/xen/smp.c 2011-08-05 19:44:35.000000000 -0400
21645 @@ -194,11 +194,6 @@ static void __init xen_smp_prepare_boot_
21646 {
21647 BUG_ON(smp_processor_id() != 0);
21648 native_smp_prepare_boot_cpu();
21649 -
21650 - /* We've switched to the "real" per-cpu gdt, so make sure the
21651 - old memory can be recycled */
21652 - make_lowmem_page_readwrite(xen_initial_gdt);
21653 -
21654 xen_filter_cpu_maps();
21655 xen_setup_vcpu_info_placement();
21656 }
21657 @@ -266,12 +261,12 @@ cpu_initialize_context(unsigned int cpu,
21658 gdt = get_cpu_gdt_table(cpu);
21659
21660 ctxt->flags = VGCF_IN_KERNEL;
21661 - ctxt->user_regs.ds = __USER_DS;
21662 - ctxt->user_regs.es = __USER_DS;
21663 + ctxt->user_regs.ds = __KERNEL_DS;
21664 + ctxt->user_regs.es = __KERNEL_DS;
21665 ctxt->user_regs.ss = __KERNEL_DS;
21666 #ifdef CONFIG_X86_32
21667 ctxt->user_regs.fs = __KERNEL_PERCPU;
21668 - ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
21669 + savesegment(gs, ctxt->user_regs.gs);
21670 #else
21671 ctxt->gs_base_kernel = per_cpu_offset(cpu);
21672 #endif
21673 @@ -322,13 +317,12 @@ static int __cpuinit xen_cpu_up(unsigned
21674 int rc;
21675
21676 per_cpu(current_task, cpu) = idle;
21677 + per_cpu(current_tinfo, cpu) = &idle->tinfo;
21678 #ifdef CONFIG_X86_32
21679 irq_ctx_init(cpu);
21680 #else
21681 clear_tsk_thread_flag(idle, TIF_FORK);
21682 - per_cpu(kernel_stack, cpu) =
21683 - (unsigned long)task_stack_page(idle) -
21684 - KERNEL_STACK_OFFSET + THREAD_SIZE;
21685 + per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
21686 #endif
21687 xen_setup_runstate_info(cpu);
21688 xen_setup_timer(cpu);
21689 diff -urNp linux-2.6.39.4/arch/x86/xen/xen-asm_32.S linux-2.6.39.4/arch/x86/xen/xen-asm_32.S
21690 --- linux-2.6.39.4/arch/x86/xen/xen-asm_32.S 2011-05-19 00:06:34.000000000 -0400
21691 +++ linux-2.6.39.4/arch/x86/xen/xen-asm_32.S 2011-08-05 19:44:35.000000000 -0400
21692 @@ -83,14 +83,14 @@ ENTRY(xen_iret)
21693 ESP_OFFSET=4 # bytes pushed onto stack
21694
21695 /*
21696 - * Store vcpu_info pointer for easy access. Do it this way to
21697 - * avoid having to reload %fs
21698 + * Store vcpu_info pointer for easy access.
21699 */
21700 #ifdef CONFIG_SMP
21701 - GET_THREAD_INFO(%eax)
21702 - movl TI_cpu(%eax), %eax
21703 - movl __per_cpu_offset(,%eax,4), %eax
21704 - mov xen_vcpu(%eax), %eax
21705 + push %fs
21706 + mov $(__KERNEL_PERCPU), %eax
21707 + mov %eax, %fs
21708 + mov PER_CPU_VAR(xen_vcpu), %eax
21709 + pop %fs
21710 #else
21711 movl xen_vcpu, %eax
21712 #endif
21713 diff -urNp linux-2.6.39.4/arch/x86/xen/xen-head.S linux-2.6.39.4/arch/x86/xen/xen-head.S
21714 --- linux-2.6.39.4/arch/x86/xen/xen-head.S 2011-05-19 00:06:34.000000000 -0400
21715 +++ linux-2.6.39.4/arch/x86/xen/xen-head.S 2011-08-05 19:44:35.000000000 -0400
21716 @@ -19,6 +19,17 @@ ENTRY(startup_xen)
21717 #ifdef CONFIG_X86_32
21718 mov %esi,xen_start_info
21719 mov $init_thread_union+THREAD_SIZE,%esp
21720 +#ifdef CONFIG_SMP
21721 + movl $cpu_gdt_table,%edi
21722 + movl $__per_cpu_load,%eax
21723 + movw %ax,__KERNEL_PERCPU + 2(%edi)
21724 + rorl $16,%eax
21725 + movb %al,__KERNEL_PERCPU + 4(%edi)
21726 + movb %ah,__KERNEL_PERCPU + 7(%edi)
21727 + movl $__per_cpu_end - 1,%eax
21728 + subl $__per_cpu_start,%eax
21729 + movw %ax,__KERNEL_PERCPU + 0(%edi)
21730 +#endif
21731 #else
21732 mov %rsi,xen_start_info
21733 mov $init_thread_union+THREAD_SIZE,%rsp
21734 diff -urNp linux-2.6.39.4/arch/x86/xen/xen-ops.h linux-2.6.39.4/arch/x86/xen/xen-ops.h
21735 --- linux-2.6.39.4/arch/x86/xen/xen-ops.h 2011-05-19 00:06:34.000000000 -0400
21736 +++ linux-2.6.39.4/arch/x86/xen/xen-ops.h 2011-08-05 19:44:35.000000000 -0400
21737 @@ -10,8 +10,6 @@
21738 extern const char xen_hypervisor_callback[];
21739 extern const char xen_failsafe_callback[];
21740
21741 -extern void *xen_initial_gdt;
21742 -
21743 struct trap_info;
21744 void xen_copy_trap_info(struct trap_info *traps);
21745
21746 diff -urNp linux-2.6.39.4/block/blk-iopoll.c linux-2.6.39.4/block/blk-iopoll.c
21747 --- linux-2.6.39.4/block/blk-iopoll.c 2011-05-19 00:06:34.000000000 -0400
21748 +++ linux-2.6.39.4/block/blk-iopoll.c 2011-08-05 19:44:35.000000000 -0400
21749 @@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
21750 }
21751 EXPORT_SYMBOL(blk_iopoll_complete);
21752
21753 -static void blk_iopoll_softirq(struct softirq_action *h)
21754 +static void blk_iopoll_softirq(void)
21755 {
21756 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
21757 int rearm = 0, budget = blk_iopoll_budget;
21758 diff -urNp linux-2.6.39.4/block/blk-map.c linux-2.6.39.4/block/blk-map.c
21759 --- linux-2.6.39.4/block/blk-map.c 2011-05-19 00:06:34.000000000 -0400
21760 +++ linux-2.6.39.4/block/blk-map.c 2011-08-05 19:44:35.000000000 -0400
21761 @@ -301,7 +301,7 @@ int blk_rq_map_kern(struct request_queue
21762 if (!len || !kbuf)
21763 return -EINVAL;
21764
21765 - do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
21766 + do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
21767 if (do_copy)
21768 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
21769 else
21770 diff -urNp linux-2.6.39.4/block/blk-softirq.c linux-2.6.39.4/block/blk-softirq.c
21771 --- linux-2.6.39.4/block/blk-softirq.c 2011-05-19 00:06:34.000000000 -0400
21772 +++ linux-2.6.39.4/block/blk-softirq.c 2011-08-05 19:44:35.000000000 -0400
21773 @@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
21774 * Softirq action handler - move entries to local list and loop over them
21775 * while passing them to the queue registered handler.
21776 */
21777 -static void blk_done_softirq(struct softirq_action *h)
21778 +static void blk_done_softirq(void)
21779 {
21780 struct list_head *cpu_list, local_list;
21781
21782 diff -urNp linux-2.6.39.4/block/bsg.c linux-2.6.39.4/block/bsg.c
21783 --- linux-2.6.39.4/block/bsg.c 2011-05-19 00:06:34.000000000 -0400
21784 +++ linux-2.6.39.4/block/bsg.c 2011-08-05 19:44:35.000000000 -0400
21785 @@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
21786 struct sg_io_v4 *hdr, struct bsg_device *bd,
21787 fmode_t has_write_perm)
21788 {
21789 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21790 + unsigned char *cmdptr;
21791 +
21792 if (hdr->request_len > BLK_MAX_CDB) {
21793 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
21794 if (!rq->cmd)
21795 return -ENOMEM;
21796 - }
21797 + cmdptr = rq->cmd;
21798 + } else
21799 + cmdptr = tmpcmd;
21800
21801 - if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
21802 + if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
21803 hdr->request_len))
21804 return -EFAULT;
21805
21806 + if (cmdptr != rq->cmd)
21807 + memcpy(rq->cmd, cmdptr, hdr->request_len);
21808 +
21809 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
21810 if (blk_verify_command(rq->cmd, has_write_perm))
21811 return -EPERM;
21812 diff -urNp linux-2.6.39.4/block/scsi_ioctl.c linux-2.6.39.4/block/scsi_ioctl.c
21813 --- linux-2.6.39.4/block/scsi_ioctl.c 2011-05-19 00:06:34.000000000 -0400
21814 +++ linux-2.6.39.4/block/scsi_ioctl.c 2011-08-05 19:44:35.000000000 -0400
21815 @@ -222,8 +222,20 @@ EXPORT_SYMBOL(blk_verify_command);
21816 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
21817 struct sg_io_hdr *hdr, fmode_t mode)
21818 {
21819 - if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
21820 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21821 + unsigned char *cmdptr;
21822 +
21823 + if (rq->cmd != rq->__cmd)
21824 + cmdptr = rq->cmd;
21825 + else
21826 + cmdptr = tmpcmd;
21827 +
21828 + if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
21829 return -EFAULT;
21830 +
21831 + if (cmdptr != rq->cmd)
21832 + memcpy(rq->cmd, cmdptr, hdr->cmd_len);
21833 +
21834 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
21835 return -EPERM;
21836
21837 @@ -432,6 +444,8 @@ int sg_scsi_ioctl(struct request_queue *
21838 int err;
21839 unsigned int in_len, out_len, bytes, opcode, cmdlen;
21840 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
21841 + unsigned char tmpcmd[sizeof(rq->__cmd)];
21842 + unsigned char *cmdptr;
21843
21844 if (!sic)
21845 return -EINVAL;
21846 @@ -465,9 +479,18 @@ int sg_scsi_ioctl(struct request_queue *
21847 */
21848 err = -EFAULT;
21849 rq->cmd_len = cmdlen;
21850 - if (copy_from_user(rq->cmd, sic->data, cmdlen))
21851 +
21852 + if (rq->cmd != rq->__cmd)
21853 + cmdptr = rq->cmd;
21854 + else
21855 + cmdptr = tmpcmd;
21856 +
21857 + if (copy_from_user(cmdptr, sic->data, cmdlen))
21858 goto error;
21859
21860 + if (rq->cmd != cmdptr)
21861 + memcpy(rq->cmd, cmdptr, cmdlen);
21862 +
21863 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
21864 goto error;
21865
21866 diff -urNp linux-2.6.39.4/crypto/cryptd.c linux-2.6.39.4/crypto/cryptd.c
21867 --- linux-2.6.39.4/crypto/cryptd.c 2011-05-19 00:06:34.000000000 -0400
21868 +++ linux-2.6.39.4/crypto/cryptd.c 2011-08-05 20:34:06.000000000 -0400
21869 @@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
21870
21871 struct cryptd_blkcipher_request_ctx {
21872 crypto_completion_t complete;
21873 -};
21874 +} __no_const;
21875
21876 struct cryptd_hash_ctx {
21877 struct crypto_shash *child;
21878 @@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
21879
21880 struct cryptd_aead_request_ctx {
21881 crypto_completion_t complete;
21882 -};
21883 +} __no_const;
21884
21885 static void cryptd_queue_worker(struct work_struct *work);
21886
21887 diff -urNp linux-2.6.39.4/crypto/gf128mul.c linux-2.6.39.4/crypto/gf128mul.c
21888 --- linux-2.6.39.4/crypto/gf128mul.c 2011-05-19 00:06:34.000000000 -0400
21889 +++ linux-2.6.39.4/crypto/gf128mul.c 2011-08-05 19:44:35.000000000 -0400
21890 @@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
21891 for (i = 0; i < 7; ++i)
21892 gf128mul_x_lle(&p[i + 1], &p[i]);
21893
21894 - memset(r, 0, sizeof(r));
21895 + memset(r, 0, sizeof(*r));
21896 for (i = 0;;) {
21897 u8 ch = ((u8 *)b)[15 - i];
21898
21899 @@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
21900 for (i = 0; i < 7; ++i)
21901 gf128mul_x_bbe(&p[i + 1], &p[i]);
21902
21903 - memset(r, 0, sizeof(r));
21904 + memset(r, 0, sizeof(*r));
21905 for (i = 0;;) {
21906 u8 ch = ((u8 *)b)[i];
21907
21908 diff -urNp linux-2.6.39.4/crypto/serpent.c linux-2.6.39.4/crypto/serpent.c
21909 --- linux-2.6.39.4/crypto/serpent.c 2011-05-19 00:06:34.000000000 -0400
21910 +++ linux-2.6.39.4/crypto/serpent.c 2011-08-05 19:44:35.000000000 -0400
21911 @@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
21912 u32 r0,r1,r2,r3,r4;
21913 int i;
21914
21915 + pax_track_stack();
21916 +
21917 /* Copy key, add padding */
21918
21919 for (i = 0; i < keylen; ++i)
21920 diff -urNp linux-2.6.39.4/Documentation/dontdiff linux-2.6.39.4/Documentation/dontdiff
21921 --- linux-2.6.39.4/Documentation/dontdiff 2011-05-19 00:06:34.000000000 -0400
21922 +++ linux-2.6.39.4/Documentation/dontdiff 2011-08-05 19:44:35.000000000 -0400
21923 @@ -1,13 +1,16 @@
21924 *.a
21925 *.aux
21926 *.bin
21927 +*.cis
21928 *.cpio
21929 *.csp
21930 +*.dbg
21931 *.dsp
21932 *.dvi
21933 *.elf
21934 *.eps
21935 *.fw
21936 +*.gcno
21937 *.gen.S
21938 *.gif
21939 *.grep
21940 @@ -38,8 +41,10 @@
21941 *.tab.h
21942 *.tex
21943 *.ver
21944 +*.vim
21945 *.xml
21946 *_MODULES
21947 +*_reg_safe.h
21948 *_vga16.c
21949 *~
21950 *.9
21951 @@ -49,11 +54,16 @@
21952 53c700_d.h
21953 CVS
21954 ChangeSet
21955 +GPATH
21956 +GRTAGS
21957 +GSYMS
21958 +GTAGS
21959 Image
21960 Kerntypes
21961 Module.markers
21962 Module.symvers
21963 PENDING
21964 +PERF*
21965 SCCS
21966 System.map*
21967 TAGS
21968 @@ -80,8 +90,11 @@ btfixupprep
21969 build
21970 bvmlinux
21971 bzImage*
21972 +capability_names.h
21973 capflags.c
21974 classlist.h*
21975 +clut_vga16.c
21976 +common-cmds.h
21977 comp*.log
21978 compile.h*
21979 conf
21980 @@ -106,16 +119,19 @@ fore200e_mkfirm
21981 fore200e_pca_fw.c*
21982 gconf
21983 gen-devlist
21984 +gen-kdb_cmds.c
21985 gen_crc32table
21986 gen_init_cpio
21987 generated
21988 genheaders
21989 genksyms
21990 *_gray256.c
21991 +hash
21992 ihex2fw
21993 ikconfig.h*
21994 inat-tables.c
21995 initramfs_data.cpio
21996 +initramfs_data.cpio.bz2
21997 initramfs_data.cpio.gz
21998 initramfs_list
21999 int16.c
22000 @@ -125,7 +141,6 @@ int32.c
22001 int4.c
22002 int8.c
22003 kallsyms
22004 -kconfig
22005 keywords.c
22006 ksym.c*
22007 ksym.h*
22008 @@ -149,7 +164,9 @@ mkboot
22009 mkbugboot
22010 mkcpustr
22011 mkdep
22012 +mkpiggy
22013 mkprep
22014 +mkregtable
22015 mktables
22016 mktree
22017 modpost
22018 @@ -165,6 +182,7 @@ parse.h
22019 patches*
22020 pca200e.bin
22021 pca200e_ecd.bin2
22022 +perf-archive
22023 piggy.gz
22024 piggyback
22025 piggy.S
22026 @@ -180,7 +198,9 @@ r600_reg_safe.h
22027 raid6altivec*.c
22028 raid6int*.c
22029 raid6tables.c
22030 +regdb.c
22031 relocs
22032 +rlim_names.h
22033 rn50_reg_safe.h
22034 rs600_reg_safe.h
22035 rv515_reg_safe.h
22036 @@ -189,6 +209,7 @@ setup
22037 setup.bin
22038 setup.elf
22039 sImage
22040 +slabinfo
22041 sm_tbl*
22042 split-include
22043 syscalltab.h
22044 @@ -213,13 +234,17 @@ version.h*
22045 vmlinux
22046 vmlinux-*
22047 vmlinux.aout
22048 +vmlinux.bin.all
22049 +vmlinux.bin.bz2
22050 vmlinux.lds
22051 +vmlinux.relocs
22052 voffset.h
22053 vsyscall.lds
22054 vsyscall_32.lds
22055 wanxlfw.inc
22056 uImage
22057 unifdef
22058 +utsrelease.h
22059 wakeup.bin
22060 wakeup.elf
22061 wakeup.lds
22062 diff -urNp linux-2.6.39.4/Documentation/kernel-parameters.txt linux-2.6.39.4/Documentation/kernel-parameters.txt
22063 --- linux-2.6.39.4/Documentation/kernel-parameters.txt 2011-06-25 12:55:22.000000000 -0400
22064 +++ linux-2.6.39.4/Documentation/kernel-parameters.txt 2011-08-05 19:44:35.000000000 -0400
22065 @@ -1879,6 +1879,13 @@ bytes respectively. Such letter suffixes
22066 the specified number of seconds. This is to be used if
22067 your oopses keep scrolling off the screen.
22068
22069 + pax_nouderef [X86] disables UDEREF. Most likely needed under certain
22070 + virtualization environments that don't cope well with the
22071 + expand down segment used by UDEREF on X86-32 or the frequent
22072 + page table updates on X86-64.
22073 +
22074 + pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
22075 +
22076 pcbit= [HW,ISDN]
22077
22078 pcd. [PARIDE]
22079 diff -urNp linux-2.6.39.4/drivers/acpi/apei/cper.c linux-2.6.39.4/drivers/acpi/apei/cper.c
22080 --- linux-2.6.39.4/drivers/acpi/apei/cper.c 2011-05-19 00:06:34.000000000 -0400
22081 +++ linux-2.6.39.4/drivers/acpi/apei/cper.c 2011-08-05 19:44:35.000000000 -0400
22082 @@ -38,12 +38,12 @@
22083 */
22084 u64 cper_next_record_id(void)
22085 {
22086 - static atomic64_t seq;
22087 + static atomic64_unchecked_t seq;
22088
22089 - if (!atomic64_read(&seq))
22090 - atomic64_set(&seq, ((u64)get_seconds()) << 32);
22091 + if (!atomic64_read_unchecked(&seq))
22092 + atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
22093
22094 - return atomic64_inc_return(&seq);
22095 + return atomic64_inc_return_unchecked(&seq);
22096 }
22097 EXPORT_SYMBOL_GPL(cper_next_record_id);
22098
22099 diff -urNp linux-2.6.39.4/drivers/acpi/power_meter.c linux-2.6.39.4/drivers/acpi/power_meter.c
22100 --- linux-2.6.39.4/drivers/acpi/power_meter.c 2011-05-19 00:06:34.000000000 -0400
22101 +++ linux-2.6.39.4/drivers/acpi/power_meter.c 2011-08-05 19:44:35.000000000 -0400
22102 @@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d
22103 return res;
22104
22105 temp /= 1000;
22106 - if (temp < 0)
22107 - return -EINVAL;
22108
22109 mutex_lock(&resource->lock);
22110 resource->trip[attr->index - 7] = temp;
22111 diff -urNp linux-2.6.39.4/drivers/acpi/proc.c linux-2.6.39.4/drivers/acpi/proc.c
22112 --- linux-2.6.39.4/drivers/acpi/proc.c 2011-05-19 00:06:34.000000000 -0400
22113 +++ linux-2.6.39.4/drivers/acpi/proc.c 2011-08-05 19:44:35.000000000 -0400
22114 @@ -342,19 +342,13 @@ acpi_system_write_wakeup_device(struct f
22115 size_t count, loff_t * ppos)
22116 {
22117 struct list_head *node, *next;
22118 - char strbuf[5];
22119 - char str[5] = "";
22120 - unsigned int len = count;
22121 -
22122 - if (len > 4)
22123 - len = 4;
22124 - if (len < 0)
22125 - return -EFAULT;
22126 + char strbuf[5] = {0};
22127
22128 - if (copy_from_user(strbuf, buffer, len))
22129 + if (count > 4)
22130 + count = 4;
22131 + if (copy_from_user(strbuf, buffer, count))
22132 return -EFAULT;
22133 - strbuf[len] = '\0';
22134 - sscanf(strbuf, "%s", str);
22135 + strbuf[count] = '\0';
22136
22137 mutex_lock(&acpi_device_lock);
22138 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
22139 @@ -363,7 +357,7 @@ acpi_system_write_wakeup_device(struct f
22140 if (!dev->wakeup.flags.valid)
22141 continue;
22142
22143 - if (!strncmp(dev->pnp.bus_id, str, 4)) {
22144 + if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
22145 if (device_can_wakeup(&dev->dev)) {
22146 bool enable = !device_may_wakeup(&dev->dev);
22147 device_set_wakeup_enable(&dev->dev, enable);
22148 diff -urNp linux-2.6.39.4/drivers/acpi/processor_driver.c linux-2.6.39.4/drivers/acpi/processor_driver.c
22149 --- linux-2.6.39.4/drivers/acpi/processor_driver.c 2011-05-19 00:06:34.000000000 -0400
22150 +++ linux-2.6.39.4/drivers/acpi/processor_driver.c 2011-08-05 19:44:35.000000000 -0400
22151 @@ -473,7 +473,7 @@ static int __cpuinit acpi_processor_add(
22152 return 0;
22153 #endif
22154
22155 - BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
22156 + BUG_ON(pr->id >= nr_cpu_ids);
22157
22158 /*
22159 * Buggy BIOS check
22160 diff -urNp linux-2.6.39.4/drivers/ata/libata-core.c linux-2.6.39.4/drivers/ata/libata-core.c
22161 --- linux-2.6.39.4/drivers/ata/libata-core.c 2011-05-19 00:06:34.000000000 -0400
22162 +++ linux-2.6.39.4/drivers/ata/libata-core.c 2011-08-05 20:34:06.000000000 -0400
22163 @@ -4747,7 +4747,7 @@ void ata_qc_free(struct ata_queued_cmd *
22164 struct ata_port *ap;
22165 unsigned int tag;
22166
22167 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22168 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22169 ap = qc->ap;
22170
22171 qc->flags = 0;
22172 @@ -4763,7 +4763,7 @@ void __ata_qc_complete(struct ata_queued
22173 struct ata_port *ap;
22174 struct ata_link *link;
22175
22176 - WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22177 + BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
22178 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
22179 ap = qc->ap;
22180 link = qc->dev->link;
22181 @@ -5768,6 +5768,7 @@ static void ata_finalize_port_ops(struct
22182 return;
22183
22184 spin_lock(&lock);
22185 + pax_open_kernel();
22186
22187 for (cur = ops->inherits; cur; cur = cur->inherits) {
22188 void **inherit = (void **)cur;
22189 @@ -5781,8 +5782,9 @@ static void ata_finalize_port_ops(struct
22190 if (IS_ERR(*pp))
22191 *pp = NULL;
22192
22193 - ops->inherits = NULL;
22194 + *(struct ata_port_operations **)&ops->inherits = NULL;
22195
22196 + pax_close_kernel();
22197 spin_unlock(&lock);
22198 }
22199
22200 diff -urNp linux-2.6.39.4/drivers/ata/libata-eh.c linux-2.6.39.4/drivers/ata/libata-eh.c
22201 --- linux-2.6.39.4/drivers/ata/libata-eh.c 2011-08-05 21:11:51.000000000 -0400
22202 +++ linux-2.6.39.4/drivers/ata/libata-eh.c 2011-08-05 21:12:20.000000000 -0400
22203 @@ -2518,6 +2518,8 @@ void ata_eh_report(struct ata_port *ap)
22204 {
22205 struct ata_link *link;
22206
22207 + pax_track_stack();
22208 +
22209 ata_for_each_link(link, ap, HOST_FIRST)
22210 ata_eh_link_report(link);
22211 }
22212 diff -urNp linux-2.6.39.4/drivers/ata/pata_arasan_cf.c linux-2.6.39.4/drivers/ata/pata_arasan_cf.c
22213 --- linux-2.6.39.4/drivers/ata/pata_arasan_cf.c 2011-05-19 00:06:34.000000000 -0400
22214 +++ linux-2.6.39.4/drivers/ata/pata_arasan_cf.c 2011-08-05 20:34:06.000000000 -0400
22215 @@ -862,7 +862,9 @@ static int __devinit arasan_cf_probe(str
22216 /* Handle platform specific quirks */
22217 if (pdata->quirk) {
22218 if (pdata->quirk & CF_BROKEN_PIO) {
22219 - ap->ops->set_piomode = NULL;
22220 + pax_open_kernel();
22221 + *(void **)&ap->ops->set_piomode = NULL;
22222 + pax_close_kernel();
22223 ap->pio_mask = 0;
22224 }
22225 if (pdata->quirk & CF_BROKEN_MWDMA)
22226 diff -urNp linux-2.6.39.4/drivers/atm/adummy.c linux-2.6.39.4/drivers/atm/adummy.c
22227 --- linux-2.6.39.4/drivers/atm/adummy.c 2011-05-19 00:06:34.000000000 -0400
22228 +++ linux-2.6.39.4/drivers/atm/adummy.c 2011-08-05 19:44:36.000000000 -0400
22229 @@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct
22230 vcc->pop(vcc, skb);
22231 else
22232 dev_kfree_skb_any(skb);
22233 - atomic_inc(&vcc->stats->tx);
22234 + atomic_inc_unchecked(&vcc->stats->tx);
22235
22236 return 0;
22237 }
22238 diff -urNp linux-2.6.39.4/drivers/atm/ambassador.c linux-2.6.39.4/drivers/atm/ambassador.c
22239 --- linux-2.6.39.4/drivers/atm/ambassador.c 2011-05-19 00:06:34.000000000 -0400
22240 +++ linux-2.6.39.4/drivers/atm/ambassador.c 2011-08-05 19:44:36.000000000 -0400
22241 @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev,
22242 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
22243
22244 // VC layer stats
22245 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22246 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22247
22248 // free the descriptor
22249 kfree (tx_descr);
22250 @@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev,
22251 dump_skb ("<<<", vc, skb);
22252
22253 // VC layer stats
22254 - atomic_inc(&atm_vcc->stats->rx);
22255 + atomic_inc_unchecked(&atm_vcc->stats->rx);
22256 __net_timestamp(skb);
22257 // end of our responsibility
22258 atm_vcc->push (atm_vcc, skb);
22259 @@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev,
22260 } else {
22261 PRINTK (KERN_INFO, "dropped over-size frame");
22262 // should we count this?
22263 - atomic_inc(&atm_vcc->stats->rx_drop);
22264 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22265 }
22266
22267 } else {
22268 @@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at
22269 }
22270
22271 if (check_area (skb->data, skb->len)) {
22272 - atomic_inc(&atm_vcc->stats->tx_err);
22273 + atomic_inc_unchecked(&atm_vcc->stats->tx_err);
22274 return -ENOMEM; // ?
22275 }
22276
22277 diff -urNp linux-2.6.39.4/drivers/atm/atmtcp.c linux-2.6.39.4/drivers/atm/atmtcp.c
22278 --- linux-2.6.39.4/drivers/atm/atmtcp.c 2011-05-19 00:06:34.000000000 -0400
22279 +++ linux-2.6.39.4/drivers/atm/atmtcp.c 2011-08-05 19:44:36.000000000 -0400
22280 @@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc
22281 if (vcc->pop) vcc->pop(vcc,skb);
22282 else dev_kfree_skb(skb);
22283 if (dev_data) return 0;
22284 - atomic_inc(&vcc->stats->tx_err);
22285 + atomic_inc_unchecked(&vcc->stats->tx_err);
22286 return -ENOLINK;
22287 }
22288 size = skb->len+sizeof(struct atmtcp_hdr);
22289 @@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc
22290 if (!new_skb) {
22291 if (vcc->pop) vcc->pop(vcc,skb);
22292 else dev_kfree_skb(skb);
22293 - atomic_inc(&vcc->stats->tx_err);
22294 + atomic_inc_unchecked(&vcc->stats->tx_err);
22295 return -ENOBUFS;
22296 }
22297 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
22298 @@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc
22299 if (vcc->pop) vcc->pop(vcc,skb);
22300 else dev_kfree_skb(skb);
22301 out_vcc->push(out_vcc,new_skb);
22302 - atomic_inc(&vcc->stats->tx);
22303 - atomic_inc(&out_vcc->stats->rx);
22304 + atomic_inc_unchecked(&vcc->stats->tx);
22305 + atomic_inc_unchecked(&out_vcc->stats->rx);
22306 return 0;
22307 }
22308
22309 @@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc
22310 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
22311 read_unlock(&vcc_sklist_lock);
22312 if (!out_vcc) {
22313 - atomic_inc(&vcc->stats->tx_err);
22314 + atomic_inc_unchecked(&vcc->stats->tx_err);
22315 goto done;
22316 }
22317 skb_pull(skb,sizeof(struct atmtcp_hdr));
22318 @@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc
22319 __net_timestamp(new_skb);
22320 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
22321 out_vcc->push(out_vcc,new_skb);
22322 - atomic_inc(&vcc->stats->tx);
22323 - atomic_inc(&out_vcc->stats->rx);
22324 + atomic_inc_unchecked(&vcc->stats->tx);
22325 + atomic_inc_unchecked(&out_vcc->stats->rx);
22326 done:
22327 if (vcc->pop) vcc->pop(vcc,skb);
22328 else dev_kfree_skb(skb);
22329 diff -urNp linux-2.6.39.4/drivers/atm/eni.c linux-2.6.39.4/drivers/atm/eni.c
22330 --- linux-2.6.39.4/drivers/atm/eni.c 2011-05-19 00:06:34.000000000 -0400
22331 +++ linux-2.6.39.4/drivers/atm/eni.c 2011-08-05 19:44:36.000000000 -0400
22332 @@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc)
22333 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
22334 vcc->dev->number);
22335 length = 0;
22336 - atomic_inc(&vcc->stats->rx_err);
22337 + atomic_inc_unchecked(&vcc->stats->rx_err);
22338 }
22339 else {
22340 length = ATM_CELL_SIZE-1; /* no HEC */
22341 @@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22342 size);
22343 }
22344 eff = length = 0;
22345 - atomic_inc(&vcc->stats->rx_err);
22346 + atomic_inc_unchecked(&vcc->stats->rx_err);
22347 }
22348 else {
22349 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
22350 @@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc)
22351 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
22352 vcc->dev->number,vcc->vci,length,size << 2,descr);
22353 length = eff = 0;
22354 - atomic_inc(&vcc->stats->rx_err);
22355 + atomic_inc_unchecked(&vcc->stats->rx_err);
22356 }
22357 }
22358 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
22359 @@ -771,7 +771,7 @@ rx_dequeued++;
22360 vcc->push(vcc,skb);
22361 pushed++;
22362 }
22363 - atomic_inc(&vcc->stats->rx);
22364 + atomic_inc_unchecked(&vcc->stats->rx);
22365 }
22366 wake_up(&eni_dev->rx_wait);
22367 }
22368 @@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d
22369 PCI_DMA_TODEVICE);
22370 if (vcc->pop) vcc->pop(vcc,skb);
22371 else dev_kfree_skb_irq(skb);
22372 - atomic_inc(&vcc->stats->tx);
22373 + atomic_inc_unchecked(&vcc->stats->tx);
22374 wake_up(&eni_dev->tx_wait);
22375 dma_complete++;
22376 }
22377 diff -urNp linux-2.6.39.4/drivers/atm/firestream.c linux-2.6.39.4/drivers/atm/firestream.c
22378 --- linux-2.6.39.4/drivers/atm/firestream.c 2011-05-19 00:06:34.000000000 -0400
22379 +++ linux-2.6.39.4/drivers/atm/firestream.c 2011-08-05 19:44:36.000000000 -0400
22380 @@ -749,7 +749,7 @@ static void process_txdone_queue (struct
22381 }
22382 }
22383
22384 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22385 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22386
22387 fs_dprintk (FS_DEBUG_TXMEM, "i");
22388 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
22389 @@ -816,7 +816,7 @@ static void process_incoming (struct fs_
22390 #endif
22391 skb_put (skb, qe->p1 & 0xffff);
22392 ATM_SKB(skb)->vcc = atm_vcc;
22393 - atomic_inc(&atm_vcc->stats->rx);
22394 + atomic_inc_unchecked(&atm_vcc->stats->rx);
22395 __net_timestamp(skb);
22396 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
22397 atm_vcc->push (atm_vcc, skb);
22398 @@ -837,12 +837,12 @@ static void process_incoming (struct fs_
22399 kfree (pe);
22400 }
22401 if (atm_vcc)
22402 - atomic_inc(&atm_vcc->stats->rx_drop);
22403 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22404 break;
22405 case 0x1f: /* Reassembly abort: no buffers. */
22406 /* Silently increment error counter. */
22407 if (atm_vcc)
22408 - atomic_inc(&atm_vcc->stats->rx_drop);
22409 + atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
22410 break;
22411 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
22412 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
22413 diff -urNp linux-2.6.39.4/drivers/atm/fore200e.c linux-2.6.39.4/drivers/atm/fore200e.c
22414 --- linux-2.6.39.4/drivers/atm/fore200e.c 2011-05-19 00:06:34.000000000 -0400
22415 +++ linux-2.6.39.4/drivers/atm/fore200e.c 2011-08-05 19:44:36.000000000 -0400
22416 @@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200
22417 #endif
22418 /* check error condition */
22419 if (*entry->status & STATUS_ERROR)
22420 - atomic_inc(&vcc->stats->tx_err);
22421 + atomic_inc_unchecked(&vcc->stats->tx_err);
22422 else
22423 - atomic_inc(&vcc->stats->tx);
22424 + atomic_inc_unchecked(&vcc->stats->tx);
22425 }
22426 }
22427
22428 @@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2
22429 if (skb == NULL) {
22430 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
22431
22432 - atomic_inc(&vcc->stats->rx_drop);
22433 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22434 return -ENOMEM;
22435 }
22436
22437 @@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2
22438
22439 dev_kfree_skb_any(skb);
22440
22441 - atomic_inc(&vcc->stats->rx_drop);
22442 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22443 return -ENOMEM;
22444 }
22445
22446 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22447
22448 vcc->push(vcc, skb);
22449 - atomic_inc(&vcc->stats->rx);
22450 + atomic_inc_unchecked(&vcc->stats->rx);
22451
22452 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
22453
22454 @@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200
22455 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
22456 fore200e->atm_dev->number,
22457 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
22458 - atomic_inc(&vcc->stats->rx_err);
22459 + atomic_inc_unchecked(&vcc->stats->rx_err);
22460 }
22461 }
22462
22463 @@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc
22464 goto retry_here;
22465 }
22466
22467 - atomic_inc(&vcc->stats->tx_err);
22468 + atomic_inc_unchecked(&vcc->stats->tx_err);
22469
22470 fore200e->tx_sat++;
22471 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
22472 diff -urNp linux-2.6.39.4/drivers/atm/he.c linux-2.6.39.4/drivers/atm/he.c
22473 --- linux-2.6.39.4/drivers/atm/he.c 2011-05-19 00:06:34.000000000 -0400
22474 +++ linux-2.6.39.4/drivers/atm/he.c 2011-08-05 19:44:36.000000000 -0400
22475 @@ -1709,7 +1709,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22476
22477 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
22478 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
22479 - atomic_inc(&vcc->stats->rx_drop);
22480 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22481 goto return_host_buffers;
22482 }
22483
22484 @@ -1736,7 +1736,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22485 RBRQ_LEN_ERR(he_dev->rbrq_head)
22486 ? "LEN_ERR" : "",
22487 vcc->vpi, vcc->vci);
22488 - atomic_inc(&vcc->stats->rx_err);
22489 + atomic_inc_unchecked(&vcc->stats->rx_err);
22490 goto return_host_buffers;
22491 }
22492
22493 @@ -1788,7 +1788,7 @@ he_service_rbrq(struct he_dev *he_dev, i
22494 vcc->push(vcc, skb);
22495 spin_lock(&he_dev->global_lock);
22496
22497 - atomic_inc(&vcc->stats->rx);
22498 + atomic_inc_unchecked(&vcc->stats->rx);
22499
22500 return_host_buffers:
22501 ++pdus_assembled;
22502 @@ -2114,7 +2114,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
22503 tpd->vcc->pop(tpd->vcc, tpd->skb);
22504 else
22505 dev_kfree_skb_any(tpd->skb);
22506 - atomic_inc(&tpd->vcc->stats->tx_err);
22507 + atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
22508 }
22509 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
22510 return;
22511 @@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22512 vcc->pop(vcc, skb);
22513 else
22514 dev_kfree_skb_any(skb);
22515 - atomic_inc(&vcc->stats->tx_err);
22516 + atomic_inc_unchecked(&vcc->stats->tx_err);
22517 return -EINVAL;
22518 }
22519
22520 @@ -2537,7 +2537,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22521 vcc->pop(vcc, skb);
22522 else
22523 dev_kfree_skb_any(skb);
22524 - atomic_inc(&vcc->stats->tx_err);
22525 + atomic_inc_unchecked(&vcc->stats->tx_err);
22526 return -EINVAL;
22527 }
22528 #endif
22529 @@ -2549,7 +2549,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22530 vcc->pop(vcc, skb);
22531 else
22532 dev_kfree_skb_any(skb);
22533 - atomic_inc(&vcc->stats->tx_err);
22534 + atomic_inc_unchecked(&vcc->stats->tx_err);
22535 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22536 return -ENOMEM;
22537 }
22538 @@ -2591,7 +2591,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22539 vcc->pop(vcc, skb);
22540 else
22541 dev_kfree_skb_any(skb);
22542 - atomic_inc(&vcc->stats->tx_err);
22543 + atomic_inc_unchecked(&vcc->stats->tx_err);
22544 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22545 return -ENOMEM;
22546 }
22547 @@ -2622,7 +2622,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
22548 __enqueue_tpd(he_dev, tpd, cid);
22549 spin_unlock_irqrestore(&he_dev->global_lock, flags);
22550
22551 - atomic_inc(&vcc->stats->tx);
22552 + atomic_inc_unchecked(&vcc->stats->tx);
22553
22554 return 0;
22555 }
22556 diff -urNp linux-2.6.39.4/drivers/atm/horizon.c linux-2.6.39.4/drivers/atm/horizon.c
22557 --- linux-2.6.39.4/drivers/atm/horizon.c 2011-05-19 00:06:34.000000000 -0400
22558 +++ linux-2.6.39.4/drivers/atm/horizon.c 2011-08-05 19:44:36.000000000 -0400
22559 @@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev,
22560 {
22561 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
22562 // VC layer stats
22563 - atomic_inc(&vcc->stats->rx);
22564 + atomic_inc_unchecked(&vcc->stats->rx);
22565 __net_timestamp(skb);
22566 // end of our responsibility
22567 vcc->push (vcc, skb);
22568 @@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const
22569 dev->tx_iovec = NULL;
22570
22571 // VC layer stats
22572 - atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
22573 + atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
22574
22575 // free the skb
22576 hrz_kfree_skb (skb);
22577 diff -urNp linux-2.6.39.4/drivers/atm/idt77252.c linux-2.6.39.4/drivers/atm/idt77252.c
22578 --- linux-2.6.39.4/drivers/atm/idt77252.c 2011-05-19 00:06:34.000000000 -0400
22579 +++ linux-2.6.39.4/drivers/atm/idt77252.c 2011-08-05 19:44:36.000000000 -0400
22580 @@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str
22581 else
22582 dev_kfree_skb(skb);
22583
22584 - atomic_inc(&vcc->stats->tx);
22585 + atomic_inc_unchecked(&vcc->stats->tx);
22586 }
22587
22588 atomic_dec(&scq->used);
22589 @@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st
22590 if ((sb = dev_alloc_skb(64)) == NULL) {
22591 printk("%s: Can't allocate buffers for aal0.\n",
22592 card->name);
22593 - atomic_add(i, &vcc->stats->rx_drop);
22594 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
22595 break;
22596 }
22597 if (!atm_charge(vcc, sb->truesize)) {
22598 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
22599 card->name);
22600 - atomic_add(i - 1, &vcc->stats->rx_drop);
22601 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
22602 dev_kfree_skb(sb);
22603 break;
22604 }
22605 @@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st
22606 ATM_SKB(sb)->vcc = vcc;
22607 __net_timestamp(sb);
22608 vcc->push(vcc, sb);
22609 - atomic_inc(&vcc->stats->rx);
22610 + atomic_inc_unchecked(&vcc->stats->rx);
22611
22612 cell += ATM_CELL_PAYLOAD;
22613 }
22614 @@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st
22615 "(CDC: %08x)\n",
22616 card->name, len, rpp->len, readl(SAR_REG_CDC));
22617 recycle_rx_pool_skb(card, rpp);
22618 - atomic_inc(&vcc->stats->rx_err);
22619 + atomic_inc_unchecked(&vcc->stats->rx_err);
22620 return;
22621 }
22622 if (stat & SAR_RSQE_CRC) {
22623 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
22624 recycle_rx_pool_skb(card, rpp);
22625 - atomic_inc(&vcc->stats->rx_err);
22626 + atomic_inc_unchecked(&vcc->stats->rx_err);
22627 return;
22628 }
22629 if (skb_queue_len(&rpp->queue) > 1) {
22630 @@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st
22631 RXPRINTK("%s: Can't alloc RX skb.\n",
22632 card->name);
22633 recycle_rx_pool_skb(card, rpp);
22634 - atomic_inc(&vcc->stats->rx_err);
22635 + atomic_inc_unchecked(&vcc->stats->rx_err);
22636 return;
22637 }
22638 if (!atm_charge(vcc, skb->truesize)) {
22639 @@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st
22640 __net_timestamp(skb);
22641
22642 vcc->push(vcc, skb);
22643 - atomic_inc(&vcc->stats->rx);
22644 + atomic_inc_unchecked(&vcc->stats->rx);
22645
22646 return;
22647 }
22648 @@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st
22649 __net_timestamp(skb);
22650
22651 vcc->push(vcc, skb);
22652 - atomic_inc(&vcc->stats->rx);
22653 + atomic_inc_unchecked(&vcc->stats->rx);
22654
22655 if (skb->truesize > SAR_FB_SIZE_3)
22656 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
22657 @@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *car
22658 if (vcc->qos.aal != ATM_AAL0) {
22659 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
22660 card->name, vpi, vci);
22661 - atomic_inc(&vcc->stats->rx_drop);
22662 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22663 goto drop;
22664 }
22665
22666 if ((sb = dev_alloc_skb(64)) == NULL) {
22667 printk("%s: Can't allocate buffers for AAL0.\n",
22668 card->name);
22669 - atomic_inc(&vcc->stats->rx_err);
22670 + atomic_inc_unchecked(&vcc->stats->rx_err);
22671 goto drop;
22672 }
22673
22674 @@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *car
22675 ATM_SKB(sb)->vcc = vcc;
22676 __net_timestamp(sb);
22677 vcc->push(vcc, sb);
22678 - atomic_inc(&vcc->stats->rx);
22679 + atomic_inc_unchecked(&vcc->stats->rx);
22680
22681 drop:
22682 skb_pull(queue, 64);
22683 @@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22684
22685 if (vc == NULL) {
22686 printk("%s: NULL connection in send().\n", card->name);
22687 - atomic_inc(&vcc->stats->tx_err);
22688 + atomic_inc_unchecked(&vcc->stats->tx_err);
22689 dev_kfree_skb(skb);
22690 return -EINVAL;
22691 }
22692 if (!test_bit(VCF_TX, &vc->flags)) {
22693 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
22694 - atomic_inc(&vcc->stats->tx_err);
22695 + atomic_inc_unchecked(&vcc->stats->tx_err);
22696 dev_kfree_skb(skb);
22697 return -EINVAL;
22698 }
22699 @@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22700 break;
22701 default:
22702 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
22703 - atomic_inc(&vcc->stats->tx_err);
22704 + atomic_inc_unchecked(&vcc->stats->tx_err);
22705 dev_kfree_skb(skb);
22706 return -EINVAL;
22707 }
22708
22709 if (skb_shinfo(skb)->nr_frags != 0) {
22710 printk("%s: No scatter-gather yet.\n", card->name);
22711 - atomic_inc(&vcc->stats->tx_err);
22712 + atomic_inc_unchecked(&vcc->stats->tx_err);
22713 dev_kfree_skb(skb);
22714 return -EINVAL;
22715 }
22716 @@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
22717
22718 err = queue_skb(card, vc, skb, oam);
22719 if (err) {
22720 - atomic_inc(&vcc->stats->tx_err);
22721 + atomic_inc_unchecked(&vcc->stats->tx_err);
22722 dev_kfree_skb(skb);
22723 return err;
22724 }
22725 @@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
22726 skb = dev_alloc_skb(64);
22727 if (!skb) {
22728 printk("%s: Out of memory in send_oam().\n", card->name);
22729 - atomic_inc(&vcc->stats->tx_err);
22730 + atomic_inc_unchecked(&vcc->stats->tx_err);
22731 return -ENOMEM;
22732 }
22733 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
22734 diff -urNp linux-2.6.39.4/drivers/atm/iphase.c linux-2.6.39.4/drivers/atm/iphase.c
22735 --- linux-2.6.39.4/drivers/atm/iphase.c 2011-05-19 00:06:34.000000000 -0400
22736 +++ linux-2.6.39.4/drivers/atm/iphase.c 2011-08-05 19:44:36.000000000 -0400
22737 @@ -1124,7 +1124,7 @@ static int rx_pkt(struct atm_dev *dev)
22738 status = (u_short) (buf_desc_ptr->desc_mode);
22739 if (status & (RX_CER | RX_PTE | RX_OFL))
22740 {
22741 - atomic_inc(&vcc->stats->rx_err);
22742 + atomic_inc_unchecked(&vcc->stats->rx_err);
22743 IF_ERR(printk("IA: bad packet, dropping it");)
22744 if (status & RX_CER) {
22745 IF_ERR(printk(" cause: packet CRC error\n");)
22746 @@ -1147,7 +1147,7 @@ static int rx_pkt(struct atm_dev *dev)
22747 len = dma_addr - buf_addr;
22748 if (len > iadev->rx_buf_sz) {
22749 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
22750 - atomic_inc(&vcc->stats->rx_err);
22751 + atomic_inc_unchecked(&vcc->stats->rx_err);
22752 goto out_free_desc;
22753 }
22754
22755 @@ -1297,7 +1297,7 @@ static void rx_dle_intr(struct atm_dev *
22756 ia_vcc = INPH_IA_VCC(vcc);
22757 if (ia_vcc == NULL)
22758 {
22759 - atomic_inc(&vcc->stats->rx_err);
22760 + atomic_inc_unchecked(&vcc->stats->rx_err);
22761 dev_kfree_skb_any(skb);
22762 atm_return(vcc, atm_guess_pdu2truesize(len));
22763 goto INCR_DLE;
22764 @@ -1309,7 +1309,7 @@ static void rx_dle_intr(struct atm_dev *
22765 if ((length > iadev->rx_buf_sz) || (length >
22766 (skb->len - sizeof(struct cpcs_trailer))))
22767 {
22768 - atomic_inc(&vcc->stats->rx_err);
22769 + atomic_inc_unchecked(&vcc->stats->rx_err);
22770 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
22771 length, skb->len);)
22772 dev_kfree_skb_any(skb);
22773 @@ -1325,7 +1325,7 @@ static void rx_dle_intr(struct atm_dev *
22774
22775 IF_RX(printk("rx_dle_intr: skb push");)
22776 vcc->push(vcc,skb);
22777 - atomic_inc(&vcc->stats->rx);
22778 + atomic_inc_unchecked(&vcc->stats->rx);
22779 iadev->rx_pkt_cnt++;
22780 }
22781 INCR_DLE:
22782 @@ -2807,15 +2807,15 @@ static int ia_ioctl(struct atm_dev *dev,
22783 {
22784 struct k_sonet_stats *stats;
22785 stats = &PRIV(_ia_dev[board])->sonet_stats;
22786 - printk("section_bip: %d\n", atomic_read(&stats->section_bip));
22787 - printk("line_bip : %d\n", atomic_read(&stats->line_bip));
22788 - printk("path_bip : %d\n", atomic_read(&stats->path_bip));
22789 - printk("line_febe : %d\n", atomic_read(&stats->line_febe));
22790 - printk("path_febe : %d\n", atomic_read(&stats->path_febe));
22791 - printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
22792 - printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
22793 - printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
22794 - printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
22795 + printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
22796 + printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
22797 + printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
22798 + printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
22799 + printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
22800 + printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
22801 + printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
22802 + printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
22803 + printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
22804 }
22805 ia_cmds.status = 0;
22806 break;
22807 @@ -2920,7 +2920,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
22808 if ((desc == 0) || (desc > iadev->num_tx_desc))
22809 {
22810 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
22811 - atomic_inc(&vcc->stats->tx);
22812 + atomic_inc_unchecked(&vcc->stats->tx);
22813 if (vcc->pop)
22814 vcc->pop(vcc, skb);
22815 else
22816 @@ -3025,14 +3025,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
22817 ATM_DESC(skb) = vcc->vci;
22818 skb_queue_tail(&iadev->tx_dma_q, skb);
22819
22820 - atomic_inc(&vcc->stats->tx);
22821 + atomic_inc_unchecked(&vcc->stats->tx);
22822 iadev->tx_pkt_cnt++;
22823 /* Increment transaction counter */
22824 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
22825
22826 #if 0
22827 /* add flow control logic */
22828 - if (atomic_read(&vcc->stats->tx) % 20 == 0) {
22829 + if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
22830 if (iavcc->vc_desc_cnt > 10) {
22831 vcc->tx_quota = vcc->tx_quota * 3 / 4;
22832 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
22833 diff -urNp linux-2.6.39.4/drivers/atm/lanai.c linux-2.6.39.4/drivers/atm/lanai.c
22834 --- linux-2.6.39.4/drivers/atm/lanai.c 2011-05-19 00:06:34.000000000 -0400
22835 +++ linux-2.6.39.4/drivers/atm/lanai.c 2011-08-05 19:44:36.000000000 -0400
22836 @@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l
22837 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
22838 lanai_endtx(lanai, lvcc);
22839 lanai_free_skb(lvcc->tx.atmvcc, skb);
22840 - atomic_inc(&lvcc->tx.atmvcc->stats->tx);
22841 + atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
22842 }
22843
22844 /* Try to fill the buffer - don't call unless there is backlog */
22845 @@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc
22846 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
22847 __net_timestamp(skb);
22848 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
22849 - atomic_inc(&lvcc->rx.atmvcc->stats->rx);
22850 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
22851 out:
22852 lvcc->rx.buf.ptr = end;
22853 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
22854 @@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d
22855 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
22856 "vcc %d\n", lanai->number, (unsigned int) s, vci);
22857 lanai->stats.service_rxnotaal5++;
22858 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22859 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22860 return 0;
22861 }
22862 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
22863 @@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d
22864 int bytes;
22865 read_unlock(&vcc_sklist_lock);
22866 DPRINTK("got trashed rx pdu on vci %d\n", vci);
22867 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22868 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22869 lvcc->stats.x.aal5.service_trash++;
22870 bytes = (SERVICE_GET_END(s) * 16) -
22871 (((unsigned long) lvcc->rx.buf.ptr) -
22872 @@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d
22873 }
22874 if (s & SERVICE_STREAM) {
22875 read_unlock(&vcc_sklist_lock);
22876 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22877 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22878 lvcc->stats.x.aal5.service_stream++;
22879 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
22880 "PDU on VCI %d!\n", lanai->number, vci);
22881 @@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d
22882 return 0;
22883 }
22884 DPRINTK("got rx crc error on vci %d\n", vci);
22885 - atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
22886 + atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
22887 lvcc->stats.x.aal5.service_rxcrc++;
22888 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
22889 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
22890 diff -urNp linux-2.6.39.4/drivers/atm/nicstar.c linux-2.6.39.4/drivers/atm/nicstar.c
22891 --- linux-2.6.39.4/drivers/atm/nicstar.c 2011-05-19 00:06:34.000000000 -0400
22892 +++ linux-2.6.39.4/drivers/atm/nicstar.c 2011-08-05 19:44:36.000000000 -0400
22893 @@ -1654,7 +1654,7 @@ static int ns_send(struct atm_vcc *vcc,
22894 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
22895 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
22896 card->index);
22897 - atomic_inc(&vcc->stats->tx_err);
22898 + atomic_inc_unchecked(&vcc->stats->tx_err);
22899 dev_kfree_skb_any(skb);
22900 return -EINVAL;
22901 }
22902 @@ -1662,7 +1662,7 @@ static int ns_send(struct atm_vcc *vcc,
22903 if (!vc->tx) {
22904 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
22905 card->index);
22906 - atomic_inc(&vcc->stats->tx_err);
22907 + atomic_inc_unchecked(&vcc->stats->tx_err);
22908 dev_kfree_skb_any(skb);
22909 return -EINVAL;
22910 }
22911 @@ -1670,14 +1670,14 @@ static int ns_send(struct atm_vcc *vcc,
22912 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
22913 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
22914 card->index);
22915 - atomic_inc(&vcc->stats->tx_err);
22916 + atomic_inc_unchecked(&vcc->stats->tx_err);
22917 dev_kfree_skb_any(skb);
22918 return -EINVAL;
22919 }
22920
22921 if (skb_shinfo(skb)->nr_frags != 0) {
22922 printk("nicstar%d: No scatter-gather yet.\n", card->index);
22923 - atomic_inc(&vcc->stats->tx_err);
22924 + atomic_inc_unchecked(&vcc->stats->tx_err);
22925 dev_kfree_skb_any(skb);
22926 return -EINVAL;
22927 }
22928 @@ -1725,11 +1725,11 @@ static int ns_send(struct atm_vcc *vcc,
22929 }
22930
22931 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
22932 - atomic_inc(&vcc->stats->tx_err);
22933 + atomic_inc_unchecked(&vcc->stats->tx_err);
22934 dev_kfree_skb_any(skb);
22935 return -EIO;
22936 }
22937 - atomic_inc(&vcc->stats->tx);
22938 + atomic_inc_unchecked(&vcc->stats->tx);
22939
22940 return 0;
22941 }
22942 @@ -2046,14 +2046,14 @@ static void dequeue_rx(ns_dev * card, ns
22943 printk
22944 ("nicstar%d: Can't allocate buffers for aal0.\n",
22945 card->index);
22946 - atomic_add(i, &vcc->stats->rx_drop);
22947 + atomic_add_unchecked(i, &vcc->stats->rx_drop);
22948 break;
22949 }
22950 if (!atm_charge(vcc, sb->truesize)) {
22951 RXPRINTK
22952 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
22953 card->index);
22954 - atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22955 + atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
22956 dev_kfree_skb_any(sb);
22957 break;
22958 }
22959 @@ -2068,7 +2068,7 @@ static void dequeue_rx(ns_dev * card, ns
22960 ATM_SKB(sb)->vcc = vcc;
22961 __net_timestamp(sb);
22962 vcc->push(vcc, sb);
22963 - atomic_inc(&vcc->stats->rx);
22964 + atomic_inc_unchecked(&vcc->stats->rx);
22965 cell += ATM_CELL_PAYLOAD;
22966 }
22967
22968 @@ -2085,7 +2085,7 @@ static void dequeue_rx(ns_dev * card, ns
22969 if (iovb == NULL) {
22970 printk("nicstar%d: Out of iovec buffers.\n",
22971 card->index);
22972 - atomic_inc(&vcc->stats->rx_drop);
22973 + atomic_inc_unchecked(&vcc->stats->rx_drop);
22974 recycle_rx_buf(card, skb);
22975 return;
22976 }
22977 @@ -2109,7 +2109,7 @@ static void dequeue_rx(ns_dev * card, ns
22978 small or large buffer itself. */
22979 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
22980 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
22981 - atomic_inc(&vcc->stats->rx_err);
22982 + atomic_inc_unchecked(&vcc->stats->rx_err);
22983 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
22984 NS_MAX_IOVECS);
22985 NS_PRV_IOVCNT(iovb) = 0;
22986 @@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns
22987 ("nicstar%d: Expected a small buffer, and this is not one.\n",
22988 card->index);
22989 which_list(card, skb);
22990 - atomic_inc(&vcc->stats->rx_err);
22991 + atomic_inc_unchecked(&vcc->stats->rx_err);
22992 recycle_rx_buf(card, skb);
22993 vc->rx_iov = NULL;
22994 recycle_iov_buf(card, iovb);
22995 @@ -2142,7 +2142,7 @@ static void dequeue_rx(ns_dev * card, ns
22996 ("nicstar%d: Expected a large buffer, and this is not one.\n",
22997 card->index);
22998 which_list(card, skb);
22999 - atomic_inc(&vcc->stats->rx_err);
23000 + atomic_inc_unchecked(&vcc->stats->rx_err);
23001 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
23002 NS_PRV_IOVCNT(iovb));
23003 vc->rx_iov = NULL;
23004 @@ -2165,7 +2165,7 @@ static void dequeue_rx(ns_dev * card, ns
23005 printk(" - PDU size mismatch.\n");
23006 else
23007 printk(".\n");
23008 - atomic_inc(&vcc->stats->rx_err);
23009 + atomic_inc_unchecked(&vcc->stats->rx_err);
23010 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
23011 NS_PRV_IOVCNT(iovb));
23012 vc->rx_iov = NULL;
23013 @@ -2179,7 +2179,7 @@ static void dequeue_rx(ns_dev * card, ns
23014 /* skb points to a small buffer */
23015 if (!atm_charge(vcc, skb->truesize)) {
23016 push_rxbufs(card, skb);
23017 - atomic_inc(&vcc->stats->rx_drop);
23018 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23019 } else {
23020 skb_put(skb, len);
23021 dequeue_sm_buf(card, skb);
23022 @@ -2189,7 +2189,7 @@ static void dequeue_rx(ns_dev * card, ns
23023 ATM_SKB(skb)->vcc = vcc;
23024 __net_timestamp(skb);
23025 vcc->push(vcc, skb);
23026 - atomic_inc(&vcc->stats->rx);
23027 + atomic_inc_unchecked(&vcc->stats->rx);
23028 }
23029 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
23030 struct sk_buff *sb;
23031 @@ -2200,7 +2200,7 @@ static void dequeue_rx(ns_dev * card, ns
23032 if (len <= NS_SMBUFSIZE) {
23033 if (!atm_charge(vcc, sb->truesize)) {
23034 push_rxbufs(card, sb);
23035 - atomic_inc(&vcc->stats->rx_drop);
23036 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23037 } else {
23038 skb_put(sb, len);
23039 dequeue_sm_buf(card, sb);
23040 @@ -2210,7 +2210,7 @@ static void dequeue_rx(ns_dev * card, ns
23041 ATM_SKB(sb)->vcc = vcc;
23042 __net_timestamp(sb);
23043 vcc->push(vcc, sb);
23044 - atomic_inc(&vcc->stats->rx);
23045 + atomic_inc_unchecked(&vcc->stats->rx);
23046 }
23047
23048 push_rxbufs(card, skb);
23049 @@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns
23050
23051 if (!atm_charge(vcc, skb->truesize)) {
23052 push_rxbufs(card, skb);
23053 - atomic_inc(&vcc->stats->rx_drop);
23054 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23055 } else {
23056 dequeue_lg_buf(card, skb);
23057 #ifdef NS_USE_DESTRUCTORS
23058 @@ -2232,7 +2232,7 @@ static void dequeue_rx(ns_dev * card, ns
23059 ATM_SKB(skb)->vcc = vcc;
23060 __net_timestamp(skb);
23061 vcc->push(vcc, skb);
23062 - atomic_inc(&vcc->stats->rx);
23063 + atomic_inc_unchecked(&vcc->stats->rx);
23064 }
23065
23066 push_rxbufs(card, sb);
23067 @@ -2253,7 +2253,7 @@ static void dequeue_rx(ns_dev * card, ns
23068 printk
23069 ("nicstar%d: Out of huge buffers.\n",
23070 card->index);
23071 - atomic_inc(&vcc->stats->rx_drop);
23072 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23073 recycle_iovec_rx_bufs(card,
23074 (struct iovec *)
23075 iovb->data,
23076 @@ -2304,7 +2304,7 @@ static void dequeue_rx(ns_dev * card, ns
23077 card->hbpool.count++;
23078 } else
23079 dev_kfree_skb_any(hb);
23080 - atomic_inc(&vcc->stats->rx_drop);
23081 + atomic_inc_unchecked(&vcc->stats->rx_drop);
23082 } else {
23083 /* Copy the small buffer to the huge buffer */
23084 sb = (struct sk_buff *)iov->iov_base;
23085 @@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev * card, ns
23086 #endif /* NS_USE_DESTRUCTORS */
23087 __net_timestamp(hb);
23088 vcc->push(vcc, hb);
23089 - atomic_inc(&vcc->stats->rx);
23090 + atomic_inc_unchecked(&vcc->stats->rx);
23091 }
23092 }
23093
23094 diff -urNp linux-2.6.39.4/drivers/atm/solos-pci.c linux-2.6.39.4/drivers/atm/solos-pci.c
23095 --- linux-2.6.39.4/drivers/atm/solos-pci.c 2011-05-19 00:06:34.000000000 -0400
23096 +++ linux-2.6.39.4/drivers/atm/solos-pci.c 2011-08-05 19:44:36.000000000 -0400
23097 @@ -715,7 +715,7 @@ void solos_bh(unsigned long card_arg)
23098 }
23099 atm_charge(vcc, skb->truesize);
23100 vcc->push(vcc, skb);
23101 - atomic_inc(&vcc->stats->rx);
23102 + atomic_inc_unchecked(&vcc->stats->rx);
23103 break;
23104
23105 case PKT_STATUS:
23106 @@ -900,6 +900,8 @@ static int print_buffer(struct sk_buff *
23107 char msg[500];
23108 char item[10];
23109
23110 + pax_track_stack();
23111 +
23112 len = buf->len;
23113 for (i = 0; i < len; i++){
23114 if(i % 8 == 0)
23115 @@ -1009,7 +1011,7 @@ static uint32_t fpga_tx(struct solos_car
23116 vcc = SKB_CB(oldskb)->vcc;
23117
23118 if (vcc) {
23119 - atomic_inc(&vcc->stats->tx);
23120 + atomic_inc_unchecked(&vcc->stats->tx);
23121 solos_pop(vcc, oldskb);
23122 } else
23123 dev_kfree_skb_irq(oldskb);
23124 diff -urNp linux-2.6.39.4/drivers/atm/suni.c linux-2.6.39.4/drivers/atm/suni.c
23125 --- linux-2.6.39.4/drivers/atm/suni.c 2011-05-19 00:06:34.000000000 -0400
23126 +++ linux-2.6.39.4/drivers/atm/suni.c 2011-08-05 19:44:36.000000000 -0400
23127 @@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock);
23128
23129
23130 #define ADD_LIMITED(s,v) \
23131 - atomic_add((v),&stats->s); \
23132 - if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
23133 + atomic_add_unchecked((v),&stats->s); \
23134 + if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
23135
23136
23137 static void suni_hz(unsigned long from_timer)
23138 diff -urNp linux-2.6.39.4/drivers/atm/uPD98402.c linux-2.6.39.4/drivers/atm/uPD98402.c
23139 --- linux-2.6.39.4/drivers/atm/uPD98402.c 2011-05-19 00:06:34.000000000 -0400
23140 +++ linux-2.6.39.4/drivers/atm/uPD98402.c 2011-08-05 19:44:36.000000000 -0400
23141 @@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d
23142 struct sonet_stats tmp;
23143 int error = 0;
23144
23145 - atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
23146 + atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
23147 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
23148 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
23149 if (zero && !error) {
23150 @@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev
23151
23152
23153 #define ADD_LIMITED(s,v) \
23154 - { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
23155 - if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
23156 - atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
23157 + { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
23158 + if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
23159 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
23160
23161
23162 static void stat_event(struct atm_dev *dev)
23163 @@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev
23164 if (reason & uPD98402_INT_PFM) stat_event(dev);
23165 if (reason & uPD98402_INT_PCO) {
23166 (void) GET(PCOCR); /* clear interrupt cause */
23167 - atomic_add(GET(HECCT),
23168 + atomic_add_unchecked(GET(HECCT),
23169 &PRIV(dev)->sonet_stats.uncorr_hcs);
23170 }
23171 if ((reason & uPD98402_INT_RFO) &&
23172 @@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev
23173 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
23174 uPD98402_INT_LOS),PIMR); /* enable them */
23175 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
23176 - atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
23177 - atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
23178 - atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
23179 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
23180 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
23181 + atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
23182 return 0;
23183 }
23184
23185 diff -urNp linux-2.6.39.4/drivers/atm/zatm.c linux-2.6.39.4/drivers/atm/zatm.c
23186 --- linux-2.6.39.4/drivers/atm/zatm.c 2011-05-19 00:06:34.000000000 -0400
23187 +++ linux-2.6.39.4/drivers/atm/zatm.c 2011-08-05 19:44:36.000000000 -0400
23188 @@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23189 }
23190 if (!size) {
23191 dev_kfree_skb_irq(skb);
23192 - if (vcc) atomic_inc(&vcc->stats->rx_err);
23193 + if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
23194 continue;
23195 }
23196 if (!atm_charge(vcc,skb->truesize)) {
23197 @@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
23198 skb->len = size;
23199 ATM_SKB(skb)->vcc = vcc;
23200 vcc->push(vcc,skb);
23201 - atomic_inc(&vcc->stats->rx);
23202 + atomic_inc_unchecked(&vcc->stats->rx);
23203 }
23204 zout(pos & 0xffff,MTA(mbx));
23205 #if 0 /* probably a stupid idea */
23206 @@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
23207 skb_queue_head(&zatm_vcc->backlog,skb);
23208 break;
23209 }
23210 - atomic_inc(&vcc->stats->tx);
23211 + atomic_inc_unchecked(&vcc->stats->tx);
23212 wake_up(&zatm_vcc->tx_wait);
23213 }
23214
23215 diff -urNp linux-2.6.39.4/drivers/base/power/wakeup.c linux-2.6.39.4/drivers/base/power/wakeup.c
23216 --- linux-2.6.39.4/drivers/base/power/wakeup.c 2011-05-19 00:06:34.000000000 -0400
23217 +++ linux-2.6.39.4/drivers/base/power/wakeup.c 2011-08-05 19:44:36.000000000 -0400
23218 @@ -29,14 +29,14 @@ bool events_check_enabled;
23219 * They need to be modified together atomically, so it's better to use one
23220 * atomic variable to hold them both.
23221 */
23222 -static atomic_t combined_event_count = ATOMIC_INIT(0);
23223 +static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
23224
23225 #define IN_PROGRESS_BITS (sizeof(int) * 4)
23226 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
23227
23228 static void split_counters(unsigned int *cnt, unsigned int *inpr)
23229 {
23230 - unsigned int comb = atomic_read(&combined_event_count);
23231 + unsigned int comb = atomic_read_unchecked(&combined_event_count);
23232
23233 *cnt = (comb >> IN_PROGRESS_BITS);
23234 *inpr = comb & MAX_IN_PROGRESS;
23235 @@ -351,7 +351,7 @@ static void wakeup_source_activate(struc
23236 ws->last_time = ktime_get();
23237
23238 /* Increment the counter of events in progress. */
23239 - atomic_inc(&combined_event_count);
23240 + atomic_inc_unchecked(&combined_event_count);
23241 }
23242
23243 /**
23244 @@ -441,7 +441,7 @@ static void wakeup_source_deactivate(str
23245 * Increment the counter of registered wakeup events and decrement the
23246 * couter of wakeup events in progress simultaneously.
23247 */
23248 - atomic_add(MAX_IN_PROGRESS, &combined_event_count);
23249 + atomic_add_unchecked(MAX_IN_PROGRESS, &combined_event_count);
23250 }
23251
23252 /**
23253 diff -urNp linux-2.6.39.4/drivers/block/cciss.c linux-2.6.39.4/drivers/block/cciss.c
23254 --- linux-2.6.39.4/drivers/block/cciss.c 2011-05-19 00:06:34.000000000 -0400
23255 +++ linux-2.6.39.4/drivers/block/cciss.c 2011-08-05 20:34:06.000000000 -0400
23256 @@ -1151,6 +1151,8 @@ static int cciss_ioctl32_passthru(struct
23257 int err;
23258 u32 cp;
23259
23260 + memset(&arg64, 0, sizeof(arg64));
23261 +
23262 err = 0;
23263 err |=
23264 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
23265 @@ -2933,7 +2935,7 @@ static void start_io(ctlr_info_t *h)
23266 while (!list_empty(&h->reqQ)) {
23267 c = list_entry(h->reqQ.next, CommandList_struct, list);
23268 /* can't do anything if fifo is full */
23269 - if ((h->access.fifo_full(h))) {
23270 + if ((h->access->fifo_full(h))) {
23271 dev_warn(&h->pdev->dev, "fifo full\n");
23272 break;
23273 }
23274 @@ -2943,7 +2945,7 @@ static void start_io(ctlr_info_t *h)
23275 h->Qdepth--;
23276
23277 /* Tell the controller execute command */
23278 - h->access.submit_command(h, c);
23279 + h->access->submit_command(h, c);
23280
23281 /* Put job onto the completed Q */
23282 addQ(&h->cmpQ, c);
23283 @@ -3369,17 +3371,17 @@ startio:
23284
23285 static inline unsigned long get_next_completion(ctlr_info_t *h)
23286 {
23287 - return h->access.command_completed(h);
23288 + return h->access->command_completed(h);
23289 }
23290
23291 static inline int interrupt_pending(ctlr_info_t *h)
23292 {
23293 - return h->access.intr_pending(h);
23294 + return h->access->intr_pending(h);
23295 }
23296
23297 static inline long interrupt_not_for_us(ctlr_info_t *h)
23298 {
23299 - return ((h->access.intr_pending(h) == 0) ||
23300 + return ((h->access->intr_pending(h) == 0) ||
23301 (h->interrupts_enabled == 0));
23302 }
23303
23304 @@ -3412,7 +3414,7 @@ static inline u32 next_command(ctlr_info
23305 u32 a;
23306
23307 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
23308 - return h->access.command_completed(h);
23309 + return h->access->command_completed(h);
23310
23311 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
23312 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
23313 @@ -3910,7 +3912,7 @@ static void __devinit cciss_put_controll
23314 trans_support & CFGTBL_Trans_use_short_tags);
23315
23316 /* Change the access methods to the performant access methods */
23317 - h->access = SA5_performant_access;
23318 + h->access = &SA5_performant_access;
23319 h->transMethod = CFGTBL_Trans_Performant;
23320
23321 return;
23322 @@ -4179,7 +4181,7 @@ static int __devinit cciss_pci_init(ctlr
23323 if (prod_index < 0)
23324 return -ENODEV;
23325 h->product_name = products[prod_index].product_name;
23326 - h->access = *(products[prod_index].access);
23327 + h->access = products[prod_index].access;
23328
23329 if (cciss_board_disabled(h)) {
23330 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
23331 @@ -4661,7 +4663,7 @@ static int __devinit cciss_init_one(stru
23332 }
23333
23334 /* make sure the board interrupts are off */
23335 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23336 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23337 if (h->msi_vector || h->msix_vector) {
23338 if (request_irq(h->intr[PERF_MODE_INT],
23339 do_cciss_msix_intr,
23340 @@ -4744,7 +4746,7 @@ static int __devinit cciss_init_one(stru
23341 cciss_scsi_setup(h);
23342
23343 /* Turn the interrupts on so we can service requests */
23344 - h->access.set_intr_mask(h, CCISS_INTR_ON);
23345 + h->access->set_intr_mask(h, CCISS_INTR_ON);
23346
23347 /* Get the firmware version */
23348 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
23349 @@ -4828,7 +4830,7 @@ static void cciss_shutdown(struct pci_de
23350 kfree(flush_buf);
23351 if (return_code != IO_OK)
23352 dev_warn(&h->pdev->dev, "Error flushing cache\n");
23353 - h->access.set_intr_mask(h, CCISS_INTR_OFF);
23354 + h->access->set_intr_mask(h, CCISS_INTR_OFF);
23355 free_irq(h->intr[PERF_MODE_INT], h);
23356 }
23357
23358 diff -urNp linux-2.6.39.4/drivers/block/cciss.h linux-2.6.39.4/drivers/block/cciss.h
23359 --- linux-2.6.39.4/drivers/block/cciss.h 2011-05-19 00:06:34.000000000 -0400
23360 +++ linux-2.6.39.4/drivers/block/cciss.h 2011-08-05 20:34:06.000000000 -0400
23361 @@ -100,7 +100,7 @@ struct ctlr_info
23362 /* information about each logical volume */
23363 drive_info_struct *drv[CISS_MAX_LUN];
23364
23365 - struct access_method access;
23366 + struct access_method *access;
23367
23368 /* queue and queue Info */
23369 struct list_head reqQ;
23370 diff -urNp linux-2.6.39.4/drivers/block/cpqarray.c linux-2.6.39.4/drivers/block/cpqarray.c
23371 --- linux-2.6.39.4/drivers/block/cpqarray.c 2011-05-19 00:06:34.000000000 -0400
23372 +++ linux-2.6.39.4/drivers/block/cpqarray.c 2011-08-05 20:34:06.000000000 -0400
23373 @@ -404,7 +404,7 @@ static int __devinit cpqarray_register_c
23374 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
23375 goto Enomem4;
23376 }
23377 - hba[i]->access.set_intr_mask(hba[i], 0);
23378 + hba[i]->access->set_intr_mask(hba[i], 0);
23379 if (request_irq(hba[i]->intr, do_ida_intr,
23380 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
23381 {
23382 @@ -459,7 +459,7 @@ static int __devinit cpqarray_register_c
23383 add_timer(&hba[i]->timer);
23384
23385 /* Enable IRQ now that spinlock and rate limit timer are set up */
23386 - hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23387 + hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
23388
23389 for(j=0; j<NWD; j++) {
23390 struct gendisk *disk = ida_gendisk[i][j];
23391 @@ -694,7 +694,7 @@ DBGINFO(
23392 for(i=0; i<NR_PRODUCTS; i++) {
23393 if (board_id == products[i].board_id) {
23394 c->product_name = products[i].product_name;
23395 - c->access = *(products[i].access);
23396 + c->access = products[i].access;
23397 break;
23398 }
23399 }
23400 @@ -792,7 +792,7 @@ static int __devinit cpqarray_eisa_detec
23401 hba[ctlr]->intr = intr;
23402 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
23403 hba[ctlr]->product_name = products[j].product_name;
23404 - hba[ctlr]->access = *(products[j].access);
23405 + hba[ctlr]->access = products[j].access;
23406 hba[ctlr]->ctlr = ctlr;
23407 hba[ctlr]->board_id = board_id;
23408 hba[ctlr]->pci_dev = NULL; /* not PCI */
23409 @@ -911,6 +911,8 @@ static void do_ida_request(struct reques
23410 struct scatterlist tmp_sg[SG_MAX];
23411 int i, dir, seg;
23412
23413 + pax_track_stack();
23414 +
23415 queue_next:
23416 creq = blk_peek_request(q);
23417 if (!creq)
23418 @@ -980,7 +982,7 @@ static void start_io(ctlr_info_t *h)
23419
23420 while((c = h->reqQ) != NULL) {
23421 /* Can't do anything if we're busy */
23422 - if (h->access.fifo_full(h) == 0)
23423 + if (h->access->fifo_full(h) == 0)
23424 return;
23425
23426 /* Get the first entry from the request Q */
23427 @@ -988,7 +990,7 @@ static void start_io(ctlr_info_t *h)
23428 h->Qdepth--;
23429
23430 /* Tell the controller to do our bidding */
23431 - h->access.submit_command(h, c);
23432 + h->access->submit_command(h, c);
23433
23434 /* Get onto the completion Q */
23435 addQ(&h->cmpQ, c);
23436 @@ -1050,7 +1052,7 @@ static irqreturn_t do_ida_intr(int irq,
23437 unsigned long flags;
23438 __u32 a,a1;
23439
23440 - istat = h->access.intr_pending(h);
23441 + istat = h->access->intr_pending(h);
23442 /* Is this interrupt for us? */
23443 if (istat == 0)
23444 return IRQ_NONE;
23445 @@ -1061,7 +1063,7 @@ static irqreturn_t do_ida_intr(int irq,
23446 */
23447 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
23448 if (istat & FIFO_NOT_EMPTY) {
23449 - while((a = h->access.command_completed(h))) {
23450 + while((a = h->access->command_completed(h))) {
23451 a1 = a; a &= ~3;
23452 if ((c = h->cmpQ) == NULL)
23453 {
23454 @@ -1449,11 +1451,11 @@ static int sendcmd(
23455 /*
23456 * Disable interrupt
23457 */
23458 - info_p->access.set_intr_mask(info_p, 0);
23459 + info_p->access->set_intr_mask(info_p, 0);
23460 /* Make sure there is room in the command FIFO */
23461 /* Actually it should be completely empty at this time. */
23462 for (i = 200000; i > 0; i--) {
23463 - temp = info_p->access.fifo_full(info_p);
23464 + temp = info_p->access->fifo_full(info_p);
23465 if (temp != 0) {
23466 break;
23467 }
23468 @@ -1466,7 +1468,7 @@ DBG(
23469 /*
23470 * Send the cmd
23471 */
23472 - info_p->access.submit_command(info_p, c);
23473 + info_p->access->submit_command(info_p, c);
23474 complete = pollcomplete(ctlr);
23475
23476 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
23477 @@ -1549,9 +1551,9 @@ static int revalidate_allvol(ctlr_info_t
23478 * we check the new geometry. Then turn interrupts back on when
23479 * we're done.
23480 */
23481 - host->access.set_intr_mask(host, 0);
23482 + host->access->set_intr_mask(host, 0);
23483 getgeometry(ctlr);
23484 - host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
23485 + host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
23486
23487 for(i=0; i<NWD; i++) {
23488 struct gendisk *disk = ida_gendisk[ctlr][i];
23489 @@ -1591,7 +1593,7 @@ static int pollcomplete(int ctlr)
23490 /* Wait (up to 2 seconds) for a command to complete */
23491
23492 for (i = 200000; i > 0; i--) {
23493 - done = hba[ctlr]->access.command_completed(hba[ctlr]);
23494 + done = hba[ctlr]->access->command_completed(hba[ctlr]);
23495 if (done == 0) {
23496 udelay(10); /* a short fixed delay */
23497 } else
23498 diff -urNp linux-2.6.39.4/drivers/block/cpqarray.h linux-2.6.39.4/drivers/block/cpqarray.h
23499 --- linux-2.6.39.4/drivers/block/cpqarray.h 2011-05-19 00:06:34.000000000 -0400
23500 +++ linux-2.6.39.4/drivers/block/cpqarray.h 2011-08-05 20:34:06.000000000 -0400
23501 @@ -99,7 +99,7 @@ struct ctlr_info {
23502 drv_info_t drv[NWD];
23503 struct proc_dir_entry *proc;
23504
23505 - struct access_method access;
23506 + struct access_method *access;
23507
23508 cmdlist_t *reqQ;
23509 cmdlist_t *cmpQ;
23510 diff -urNp linux-2.6.39.4/drivers/block/DAC960.c linux-2.6.39.4/drivers/block/DAC960.c
23511 --- linux-2.6.39.4/drivers/block/DAC960.c 2011-05-19 00:06:34.000000000 -0400
23512 +++ linux-2.6.39.4/drivers/block/DAC960.c 2011-08-05 19:44:36.000000000 -0400
23513 @@ -1980,6 +1980,8 @@ static bool DAC960_V1_ReadDeviceConfigur
23514 unsigned long flags;
23515 int Channel, TargetID;
23516
23517 + pax_track_stack();
23518 +
23519 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
23520 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
23521 sizeof(DAC960_SCSI_Inquiry_T) +
23522 diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_int.h linux-2.6.39.4/drivers/block/drbd/drbd_int.h
23523 --- linux-2.6.39.4/drivers/block/drbd/drbd_int.h 2011-05-19 00:06:34.000000000 -0400
23524 +++ linux-2.6.39.4/drivers/block/drbd/drbd_int.h 2011-08-05 19:44:36.000000000 -0400
23525 @@ -736,7 +736,7 @@ struct drbd_request;
23526 struct drbd_epoch {
23527 struct list_head list;
23528 unsigned int barrier_nr;
23529 - atomic_t epoch_size; /* increased on every request added. */
23530 + atomic_unchecked_t epoch_size; /* increased on every request added. */
23531 atomic_t active; /* increased on every req. added, and dec on every finished. */
23532 unsigned long flags;
23533 };
23534 @@ -1108,7 +1108,7 @@ struct drbd_conf {
23535 void *int_dig_in;
23536 void *int_dig_vv;
23537 wait_queue_head_t seq_wait;
23538 - atomic_t packet_seq;
23539 + atomic_unchecked_t packet_seq;
23540 unsigned int peer_seq;
23541 spinlock_t peer_seq_lock;
23542 unsigned int minor;
23543 diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_main.c linux-2.6.39.4/drivers/block/drbd/drbd_main.c
23544 --- linux-2.6.39.4/drivers/block/drbd/drbd_main.c 2011-05-19 00:06:34.000000000 -0400
23545 +++ linux-2.6.39.4/drivers/block/drbd/drbd_main.c 2011-08-05 19:44:36.000000000 -0400
23546 @@ -2387,7 +2387,7 @@ static int _drbd_send_ack(struct drbd_co
23547 p.sector = sector;
23548 p.block_id = block_id;
23549 p.blksize = blksize;
23550 - p.seq_num = cpu_to_be32(atomic_add_return(1, &mdev->packet_seq));
23551 + p.seq_num = cpu_to_be32(atomic_add_return_unchecked(1, &mdev->packet_seq));
23552
23553 if (!mdev->meta.socket || mdev->state.conn < C_CONNECTED)
23554 return false;
23555 @@ -2686,7 +2686,7 @@ int drbd_send_dblock(struct drbd_conf *m
23556 p.sector = cpu_to_be64(req->sector);
23557 p.block_id = (unsigned long)req;
23558 p.seq_num = cpu_to_be32(req->seq_num =
23559 - atomic_add_return(1, &mdev->packet_seq));
23560 + atomic_add_return_unchecked(1, &mdev->packet_seq));
23561
23562 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
23563
23564 @@ -2971,7 +2971,7 @@ void drbd_init_set_defaults(struct drbd_
23565 atomic_set(&mdev->unacked_cnt, 0);
23566 atomic_set(&mdev->local_cnt, 0);
23567 atomic_set(&mdev->net_cnt, 0);
23568 - atomic_set(&mdev->packet_seq, 0);
23569 + atomic_set_unchecked(&mdev->packet_seq, 0);
23570 atomic_set(&mdev->pp_in_use, 0);
23571 atomic_set(&mdev->pp_in_use_by_net, 0);
23572 atomic_set(&mdev->rs_sect_in, 0);
23573 @@ -3051,8 +3051,8 @@ void drbd_mdev_cleanup(struct drbd_conf
23574 mdev->receiver.t_state);
23575
23576 /* no need to lock it, I'm the only thread alive */
23577 - if (atomic_read(&mdev->current_epoch->epoch_size) != 0)
23578 - dev_err(DEV, "epoch_size:%d\n", atomic_read(&mdev->current_epoch->epoch_size));
23579 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size) != 0)
23580 + dev_err(DEV, "epoch_size:%d\n", atomic_read_unchecked(&mdev->current_epoch->epoch_size));
23581 mdev->al_writ_cnt =
23582 mdev->bm_writ_cnt =
23583 mdev->read_cnt =
23584 diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_nl.c linux-2.6.39.4/drivers/block/drbd/drbd_nl.c
23585 --- linux-2.6.39.4/drivers/block/drbd/drbd_nl.c 2011-05-19 00:06:34.000000000 -0400
23586 +++ linux-2.6.39.4/drivers/block/drbd/drbd_nl.c 2011-08-05 19:44:36.000000000 -0400
23587 @@ -2298,7 +2298,7 @@ static void drbd_connector_callback(stru
23588 module_put(THIS_MODULE);
23589 }
23590
23591 -static atomic_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23592 +static atomic_unchecked_t drbd_nl_seq = ATOMIC_INIT(2); /* two. */
23593
23594 static unsigned short *
23595 __tl_add_blob(unsigned short *tl, enum drbd_tags tag, const void *data,
23596 @@ -2369,7 +2369,7 @@ void drbd_bcast_state(struct drbd_conf *
23597 cn_reply->id.idx = CN_IDX_DRBD;
23598 cn_reply->id.val = CN_VAL_DRBD;
23599
23600 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23601 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23602 cn_reply->ack = 0; /* not used here. */
23603 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23604 (int)((char *)tl - (char *)reply->tag_list);
23605 @@ -2401,7 +2401,7 @@ void drbd_bcast_ev_helper(struct drbd_co
23606 cn_reply->id.idx = CN_IDX_DRBD;
23607 cn_reply->id.val = CN_VAL_DRBD;
23608
23609 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23610 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23611 cn_reply->ack = 0; /* not used here. */
23612 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23613 (int)((char *)tl - (char *)reply->tag_list);
23614 @@ -2479,7 +2479,7 @@ void drbd_bcast_ee(struct drbd_conf *mde
23615 cn_reply->id.idx = CN_IDX_DRBD;
23616 cn_reply->id.val = CN_VAL_DRBD;
23617
23618 - cn_reply->seq = atomic_add_return(1,&drbd_nl_seq);
23619 + cn_reply->seq = atomic_add_return_unchecked(1,&drbd_nl_seq);
23620 cn_reply->ack = 0; // not used here.
23621 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23622 (int)((char*)tl - (char*)reply->tag_list);
23623 @@ -2518,7 +2518,7 @@ void drbd_bcast_sync_progress(struct drb
23624 cn_reply->id.idx = CN_IDX_DRBD;
23625 cn_reply->id.val = CN_VAL_DRBD;
23626
23627 - cn_reply->seq = atomic_add_return(1, &drbd_nl_seq);
23628 + cn_reply->seq = atomic_add_return_unchecked(1, &drbd_nl_seq);
23629 cn_reply->ack = 0; /* not used here. */
23630 cn_reply->len = sizeof(struct drbd_nl_cfg_reply) +
23631 (int)((char *)tl - (char *)reply->tag_list);
23632 diff -urNp linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c
23633 --- linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c 2011-05-19 00:06:34.000000000 -0400
23634 +++ linux-2.6.39.4/drivers/block/drbd/drbd_receiver.c 2011-08-05 19:44:36.000000000 -0400
23635 @@ -894,7 +894,7 @@ retry:
23636 sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
23637 sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
23638
23639 - atomic_set(&mdev->packet_seq, 0);
23640 + atomic_set_unchecked(&mdev->packet_seq, 0);
23641 mdev->peer_seq = 0;
23642
23643 drbd_thread_start(&mdev->asender);
23644 @@ -990,7 +990,7 @@ static enum finish_epoch drbd_may_finish
23645 do {
23646 next_epoch = NULL;
23647
23648 - epoch_size = atomic_read(&epoch->epoch_size);
23649 + epoch_size = atomic_read_unchecked(&epoch->epoch_size);
23650
23651 switch (ev & ~EV_CLEANUP) {
23652 case EV_PUT:
23653 @@ -1025,7 +1025,7 @@ static enum finish_epoch drbd_may_finish
23654 rv = FE_DESTROYED;
23655 } else {
23656 epoch->flags = 0;
23657 - atomic_set(&epoch->epoch_size, 0);
23658 + atomic_set_unchecked(&epoch->epoch_size, 0);
23659 /* atomic_set(&epoch->active, 0); is already zero */
23660 if (rv == FE_STILL_LIVE)
23661 rv = FE_RECYCLED;
23662 @@ -1196,14 +1196,14 @@ static int receive_Barrier(struct drbd_c
23663 drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
23664 drbd_flush(mdev);
23665
23666 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
23667 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23668 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
23669 if (epoch)
23670 break;
23671 }
23672
23673 epoch = mdev->current_epoch;
23674 - wait_event(mdev->ee_wait, atomic_read(&epoch->epoch_size) == 0);
23675 + wait_event(mdev->ee_wait, atomic_read_unchecked(&epoch->epoch_size) == 0);
23676
23677 D_ASSERT(atomic_read(&epoch->active) == 0);
23678 D_ASSERT(epoch->flags == 0);
23679 @@ -1215,11 +1215,11 @@ static int receive_Barrier(struct drbd_c
23680 }
23681
23682 epoch->flags = 0;
23683 - atomic_set(&epoch->epoch_size, 0);
23684 + atomic_set_unchecked(&epoch->epoch_size, 0);
23685 atomic_set(&epoch->active, 0);
23686
23687 spin_lock(&mdev->epoch_lock);
23688 - if (atomic_read(&mdev->current_epoch->epoch_size)) {
23689 + if (atomic_read_unchecked(&mdev->current_epoch->epoch_size)) {
23690 list_add(&epoch->list, &mdev->current_epoch->list);
23691 mdev->current_epoch = epoch;
23692 mdev->epochs++;
23693 @@ -1668,7 +1668,7 @@ static int receive_Data(struct drbd_conf
23694 spin_unlock(&mdev->peer_seq_lock);
23695
23696 drbd_send_ack_dp(mdev, P_NEG_ACK, p, data_size);
23697 - atomic_inc(&mdev->current_epoch->epoch_size);
23698 + atomic_inc_unchecked(&mdev->current_epoch->epoch_size);
23699 return drbd_drain_block(mdev, data_size);
23700 }
23701
23702 @@ -1694,7 +1694,7 @@ static int receive_Data(struct drbd_conf
23703
23704 spin_lock(&mdev->epoch_lock);
23705 e->epoch = mdev->current_epoch;
23706 - atomic_inc(&e->epoch->epoch_size);
23707 + atomic_inc_unchecked(&e->epoch->epoch_size);
23708 atomic_inc(&e->epoch->active);
23709 spin_unlock(&mdev->epoch_lock);
23710
23711 @@ -3905,7 +3905,7 @@ static void drbd_disconnect(struct drbd_
23712 D_ASSERT(list_empty(&mdev->done_ee));
23713
23714 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
23715 - atomic_set(&mdev->current_epoch->epoch_size, 0);
23716 + atomic_set_unchecked(&mdev->current_epoch->epoch_size, 0);
23717 D_ASSERT(list_empty(&mdev->current_epoch->list));
23718 }
23719
23720 diff -urNp linux-2.6.39.4/drivers/block/nbd.c linux-2.6.39.4/drivers/block/nbd.c
23721 --- linux-2.6.39.4/drivers/block/nbd.c 2011-06-25 12:55:22.000000000 -0400
23722 +++ linux-2.6.39.4/drivers/block/nbd.c 2011-08-05 19:44:36.000000000 -0400
23723 @@ -157,6 +157,8 @@ static int sock_xmit(struct nbd_device *
23724 struct kvec iov;
23725 sigset_t blocked, oldset;
23726
23727 + pax_track_stack();
23728 +
23729 if (unlikely(!sock)) {
23730 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
23731 lo->disk->disk_name, (send ? "send" : "recv"));
23732 @@ -571,6 +573,8 @@ static void do_nbd_request(struct reques
23733 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
23734 unsigned int cmd, unsigned long arg)
23735 {
23736 + pax_track_stack();
23737 +
23738 switch (cmd) {
23739 case NBD_DISCONNECT: {
23740 struct request sreq;
23741 diff -urNp linux-2.6.39.4/drivers/char/agp/frontend.c linux-2.6.39.4/drivers/char/agp/frontend.c
23742 --- linux-2.6.39.4/drivers/char/agp/frontend.c 2011-05-19 00:06:34.000000000 -0400
23743 +++ linux-2.6.39.4/drivers/char/agp/frontend.c 2011-08-05 19:44:36.000000000 -0400
23744 @@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct ag
23745 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
23746 return -EFAULT;
23747
23748 - if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
23749 + if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
23750 return -EFAULT;
23751
23752 client = agp_find_client_by_pid(reserve.pid);
23753 diff -urNp linux-2.6.39.4/drivers/char/briq_panel.c linux-2.6.39.4/drivers/char/briq_panel.c
23754 --- linux-2.6.39.4/drivers/char/briq_panel.c 2011-05-19 00:06:34.000000000 -0400
23755 +++ linux-2.6.39.4/drivers/char/briq_panel.c 2011-08-05 19:44:36.000000000 -0400
23756 @@ -9,6 +9,7 @@
23757 #include <linux/types.h>
23758 #include <linux/errno.h>
23759 #include <linux/tty.h>
23760 +#include <linux/mutex.h>
23761 #include <linux/timer.h>
23762 #include <linux/kernel.h>
23763 #include <linux/wait.h>
23764 @@ -34,6 +35,7 @@ static int vfd_is_open;
23765 static unsigned char vfd[40];
23766 static int vfd_cursor;
23767 static unsigned char ledpb, led;
23768 +static DEFINE_MUTEX(vfd_mutex);
23769
23770 static void update_vfd(void)
23771 {
23772 @@ -140,12 +142,15 @@ static ssize_t briq_panel_write(struct f
23773 if (!vfd_is_open)
23774 return -EBUSY;
23775
23776 + mutex_lock(&vfd_mutex);
23777 for (;;) {
23778 char c;
23779 if (!indx)
23780 break;
23781 - if (get_user(c, buf))
23782 + if (get_user(c, buf)) {
23783 + mutex_unlock(&vfd_mutex);
23784 return -EFAULT;
23785 + }
23786 if (esc) {
23787 set_led(c);
23788 esc = 0;
23789 @@ -175,6 +180,7 @@ static ssize_t briq_panel_write(struct f
23790 buf++;
23791 }
23792 update_vfd();
23793 + mutex_unlock(&vfd_mutex);
23794
23795 return len;
23796 }
23797 diff -urNp linux-2.6.39.4/drivers/char/genrtc.c linux-2.6.39.4/drivers/char/genrtc.c
23798 --- linux-2.6.39.4/drivers/char/genrtc.c 2011-05-19 00:06:34.000000000 -0400
23799 +++ linux-2.6.39.4/drivers/char/genrtc.c 2011-08-05 19:44:36.000000000 -0400
23800 @@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *fi
23801 switch (cmd) {
23802
23803 case RTC_PLL_GET:
23804 + memset(&pll, 0, sizeof(pll));
23805 if (get_rtc_pll(&pll))
23806 return -EINVAL;
23807 else
23808 diff -urNp linux-2.6.39.4/drivers/char/hpet.c linux-2.6.39.4/drivers/char/hpet.c
23809 --- linux-2.6.39.4/drivers/char/hpet.c 2011-05-19 00:06:34.000000000 -0400
23810 +++ linux-2.6.39.4/drivers/char/hpet.c 2011-08-05 19:44:36.000000000 -0400
23811 @@ -553,7 +553,7 @@ static inline unsigned long hpet_time_di
23812 }
23813
23814 static int
23815 -hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
23816 +hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
23817 struct hpet_info *info)
23818 {
23819 struct hpet_timer __iomem *timer;
23820 diff -urNp linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c
23821 --- linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c 2011-05-19 00:06:34.000000000 -0400
23822 +++ linux-2.6.39.4/drivers/char/ipmi/ipmi_msghandler.c 2011-08-05 20:34:06.000000000 -0400
23823 @@ -414,7 +414,7 @@ struct ipmi_smi {
23824 struct proc_dir_entry *proc_dir;
23825 char proc_dir_name[10];
23826
23827 - atomic_t stats[IPMI_NUM_STATS];
23828 + atomic_unchecked_t stats[IPMI_NUM_STATS];
23829
23830 /*
23831 * run_to_completion duplicate of smb_info, smi_info
23832 @@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
23833
23834
23835 #define ipmi_inc_stat(intf, stat) \
23836 - atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
23837 + atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
23838 #define ipmi_get_stat(intf, stat) \
23839 - ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
23840 + ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
23841
23842 static int is_lan_addr(struct ipmi_addr *addr)
23843 {
23844 @@ -2844,7 +2844,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
23845 INIT_LIST_HEAD(&intf->cmd_rcvrs);
23846 init_waitqueue_head(&intf->waitq);
23847 for (i = 0; i < IPMI_NUM_STATS; i++)
23848 - atomic_set(&intf->stats[i], 0);
23849 + atomic_set_unchecked(&intf->stats[i], 0);
23850
23851 intf->proc_dir = NULL;
23852
23853 @@ -4196,6 +4196,8 @@ static void send_panic_events(char *str)
23854 struct ipmi_smi_msg smi_msg;
23855 struct ipmi_recv_msg recv_msg;
23856
23857 + pax_track_stack();
23858 +
23859 si = (struct ipmi_system_interface_addr *) &addr;
23860 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
23861 si->channel = IPMI_BMC_CHANNEL;
23862 diff -urNp linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c
23863 --- linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c 2011-05-19 00:06:34.000000000 -0400
23864 +++ linux-2.6.39.4/drivers/char/ipmi/ipmi_si_intf.c 2011-08-05 19:44:36.000000000 -0400
23865 @@ -276,7 +276,7 @@ struct smi_info {
23866 unsigned char slave_addr;
23867
23868 /* Counters and things for the proc filesystem. */
23869 - atomic_t stats[SI_NUM_STATS];
23870 + atomic_unchecked_t stats[SI_NUM_STATS];
23871
23872 struct task_struct *thread;
23873
23874 @@ -285,9 +285,9 @@ struct smi_info {
23875 };
23876
23877 #define smi_inc_stat(smi, stat) \
23878 - atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
23879 + atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
23880 #define smi_get_stat(smi, stat) \
23881 - ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
23882 + ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
23883
23884 #define SI_MAX_PARMS 4
23885
23886 @@ -3198,7 +3198,7 @@ static int try_smi_init(struct smi_info
23887 atomic_set(&new_smi->req_events, 0);
23888 new_smi->run_to_completion = 0;
23889 for (i = 0; i < SI_NUM_STATS; i++)
23890 - atomic_set(&new_smi->stats[i], 0);
23891 + atomic_set_unchecked(&new_smi->stats[i], 0);
23892
23893 new_smi->interrupt_disabled = 1;
23894 atomic_set(&new_smi->stop_operation, 0);
23895 diff -urNp linux-2.6.39.4/drivers/char/Kconfig linux-2.6.39.4/drivers/char/Kconfig
23896 --- linux-2.6.39.4/drivers/char/Kconfig 2011-05-19 00:06:34.000000000 -0400
23897 +++ linux-2.6.39.4/drivers/char/Kconfig 2011-08-05 19:44:36.000000000 -0400
23898 @@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
23899
23900 config DEVKMEM
23901 bool "/dev/kmem virtual device support"
23902 - default y
23903 + default n
23904 + depends on !GRKERNSEC_KMEM
23905 help
23906 Say Y here if you want to support the /dev/kmem device. The
23907 /dev/kmem device is rarely used, but can be used for certain
23908 @@ -596,6 +597,7 @@ config DEVPORT
23909 bool
23910 depends on !M68K
23911 depends on ISA || PCI
23912 + depends on !GRKERNSEC_KMEM
23913 default y
23914
23915 source "drivers/s390/char/Kconfig"
23916 diff -urNp linux-2.6.39.4/drivers/char/mem.c linux-2.6.39.4/drivers/char/mem.c
23917 --- linux-2.6.39.4/drivers/char/mem.c 2011-05-19 00:06:34.000000000 -0400
23918 +++ linux-2.6.39.4/drivers/char/mem.c 2011-08-05 19:44:36.000000000 -0400
23919 @@ -18,6 +18,7 @@
23920 #include <linux/raw.h>
23921 #include <linux/tty.h>
23922 #include <linux/capability.h>
23923 +#include <linux/security.h>
23924 #include <linux/ptrace.h>
23925 #include <linux/device.h>
23926 #include <linux/highmem.h>
23927 @@ -34,6 +35,10 @@
23928 # include <linux/efi.h>
23929 #endif
23930
23931 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
23932 +extern struct file_operations grsec_fops;
23933 +#endif
23934 +
23935 static inline unsigned long size_inside_page(unsigned long start,
23936 unsigned long size)
23937 {
23938 @@ -65,9 +70,13 @@ static inline int range_is_allowed(unsig
23939
23940 while (cursor < to) {
23941 if (!devmem_is_allowed(pfn)) {
23942 +#ifdef CONFIG_GRKERNSEC_KMEM
23943 + gr_handle_mem_readwrite(from, to);
23944 +#else
23945 printk(KERN_INFO
23946 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
23947 current->comm, from, to);
23948 +#endif
23949 return 0;
23950 }
23951 cursor += PAGE_SIZE;
23952 @@ -75,6 +84,11 @@ static inline int range_is_allowed(unsig
23953 }
23954 return 1;
23955 }
23956 +#elif defined(CONFIG_GRKERNSEC_KMEM)
23957 +static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23958 +{
23959 + return 0;
23960 +}
23961 #else
23962 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
23963 {
23964 @@ -117,6 +131,7 @@ static ssize_t read_mem(struct file *fil
23965
23966 while (count > 0) {
23967 unsigned long remaining;
23968 + char *temp;
23969
23970 sz = size_inside_page(p, count);
23971
23972 @@ -132,7 +147,23 @@ static ssize_t read_mem(struct file *fil
23973 if (!ptr)
23974 return -EFAULT;
23975
23976 - remaining = copy_to_user(buf, ptr, sz);
23977 +#ifdef CONFIG_PAX_USERCOPY
23978 + temp = kmalloc(sz, GFP_KERNEL);
23979 + if (!temp) {
23980 + unxlate_dev_mem_ptr(p, ptr);
23981 + return -ENOMEM;
23982 + }
23983 + memcpy(temp, ptr, sz);
23984 +#else
23985 + temp = ptr;
23986 +#endif
23987 +
23988 + remaining = copy_to_user(buf, temp, sz);
23989 +
23990 +#ifdef CONFIG_PAX_USERCOPY
23991 + kfree(temp);
23992 +#endif
23993 +
23994 unxlate_dev_mem_ptr(p, ptr);
23995 if (remaining)
23996 return -EFAULT;
23997 @@ -395,9 +426,8 @@ static ssize_t read_kmem(struct file *fi
23998 size_t count, loff_t *ppos)
23999 {
24000 unsigned long p = *ppos;
24001 - ssize_t low_count, read, sz;
24002 + ssize_t low_count, read, sz, err = 0;
24003 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
24004 - int err = 0;
24005
24006 read = 0;
24007 if (p < (unsigned long) high_memory) {
24008 @@ -419,6 +449,8 @@ static ssize_t read_kmem(struct file *fi
24009 }
24010 #endif
24011 while (low_count > 0) {
24012 + char *temp;
24013 +
24014 sz = size_inside_page(p, low_count);
24015
24016 /*
24017 @@ -428,7 +460,22 @@ static ssize_t read_kmem(struct file *fi
24018 */
24019 kbuf = xlate_dev_kmem_ptr((char *)p);
24020
24021 - if (copy_to_user(buf, kbuf, sz))
24022 +#ifdef CONFIG_PAX_USERCOPY
24023 + temp = kmalloc(sz, GFP_KERNEL);
24024 + if (!temp)
24025 + return -ENOMEM;
24026 + memcpy(temp, kbuf, sz);
24027 +#else
24028 + temp = kbuf;
24029 +#endif
24030 +
24031 + err = copy_to_user(buf, temp, sz);
24032 +
24033 +#ifdef CONFIG_PAX_USERCOPY
24034 + kfree(temp);
24035 +#endif
24036 +
24037 + if (err)
24038 return -EFAULT;
24039 buf += sz;
24040 p += sz;
24041 @@ -854,6 +901,9 @@ static const struct memdev {
24042 #ifdef CONFIG_CRASH_DUMP
24043 [12] = { "oldmem", 0, &oldmem_fops, NULL },
24044 #endif
24045 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
24046 + [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
24047 +#endif
24048 };
24049
24050 static int memory_open(struct inode *inode, struct file *filp)
24051 diff -urNp linux-2.6.39.4/drivers/char/nvram.c linux-2.6.39.4/drivers/char/nvram.c
24052 --- linux-2.6.39.4/drivers/char/nvram.c 2011-05-19 00:06:34.000000000 -0400
24053 +++ linux-2.6.39.4/drivers/char/nvram.c 2011-08-05 19:44:36.000000000 -0400
24054 @@ -246,7 +246,7 @@ static ssize_t nvram_read(struct file *f
24055
24056 spin_unlock_irq(&rtc_lock);
24057
24058 - if (copy_to_user(buf, contents, tmp - contents))
24059 + if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
24060 return -EFAULT;
24061
24062 *ppos = i;
24063 diff -urNp linux-2.6.39.4/drivers/char/random.c linux-2.6.39.4/drivers/char/random.c
24064 --- linux-2.6.39.4/drivers/char/random.c 2011-05-19 00:06:34.000000000 -0400
24065 +++ linux-2.6.39.4/drivers/char/random.c 2011-08-05 19:44:36.000000000 -0400
24066 @@ -261,8 +261,13 @@
24067 /*
24068 * Configuration information
24069 */
24070 +#ifdef CONFIG_GRKERNSEC_RANDNET
24071 +#define INPUT_POOL_WORDS 512
24072 +#define OUTPUT_POOL_WORDS 128
24073 +#else
24074 #define INPUT_POOL_WORDS 128
24075 #define OUTPUT_POOL_WORDS 32
24076 +#endif
24077 #define SEC_XFER_SIZE 512
24078 #define EXTRACT_SIZE 10
24079
24080 @@ -300,10 +305,17 @@ static struct poolinfo {
24081 int poolwords;
24082 int tap1, tap2, tap3, tap4, tap5;
24083 } poolinfo_table[] = {
24084 +#ifdef CONFIG_GRKERNSEC_RANDNET
24085 + /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
24086 + { 512, 411, 308, 208, 104, 1 },
24087 + /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
24088 + { 128, 103, 76, 51, 25, 1 },
24089 +#else
24090 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
24091 { 128, 103, 76, 51, 25, 1 },
24092 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
24093 { 32, 26, 20, 14, 7, 1 },
24094 +#endif
24095 #if 0
24096 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
24097 { 2048, 1638, 1231, 819, 411, 1 },
24098 @@ -909,7 +921,7 @@ static ssize_t extract_entropy_user(stru
24099
24100 extract_buf(r, tmp);
24101 i = min_t(int, nbytes, EXTRACT_SIZE);
24102 - if (copy_to_user(buf, tmp, i)) {
24103 + if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
24104 ret = -EFAULT;
24105 break;
24106 }
24107 @@ -1214,7 +1226,7 @@ EXPORT_SYMBOL(generate_random_uuid);
24108 #include <linux/sysctl.h>
24109
24110 static int min_read_thresh = 8, min_write_thresh;
24111 -static int max_read_thresh = INPUT_POOL_WORDS * 32;
24112 +static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
24113 static int max_write_thresh = INPUT_POOL_WORDS * 32;
24114 static char sysctl_bootid[16];
24115
24116 diff -urNp linux-2.6.39.4/drivers/char/sonypi.c linux-2.6.39.4/drivers/char/sonypi.c
24117 --- linux-2.6.39.4/drivers/char/sonypi.c 2011-05-19 00:06:34.000000000 -0400
24118 +++ linux-2.6.39.4/drivers/char/sonypi.c 2011-08-05 19:44:36.000000000 -0400
24119 @@ -55,6 +55,7 @@
24120 #include <asm/uaccess.h>
24121 #include <asm/io.h>
24122 #include <asm/system.h>
24123 +#include <asm/local.h>
24124
24125 #include <linux/sonypi.h>
24126
24127 @@ -491,7 +492,7 @@ static struct sonypi_device {
24128 spinlock_t fifo_lock;
24129 wait_queue_head_t fifo_proc_list;
24130 struct fasync_struct *fifo_async;
24131 - int open_count;
24132 + local_t open_count;
24133 int model;
24134 struct input_dev *input_jog_dev;
24135 struct input_dev *input_key_dev;
24136 @@ -898,7 +899,7 @@ static int sonypi_misc_fasync(int fd, st
24137 static int sonypi_misc_release(struct inode *inode, struct file *file)
24138 {
24139 mutex_lock(&sonypi_device.lock);
24140 - sonypi_device.open_count--;
24141 + local_dec(&sonypi_device.open_count);
24142 mutex_unlock(&sonypi_device.lock);
24143 return 0;
24144 }
24145 @@ -907,9 +908,9 @@ static int sonypi_misc_open(struct inode
24146 {
24147 mutex_lock(&sonypi_device.lock);
24148 /* Flush input queue on first open */
24149 - if (!sonypi_device.open_count)
24150 + if (!local_read(&sonypi_device.open_count))
24151 kfifo_reset(&sonypi_device.fifo);
24152 - sonypi_device.open_count++;
24153 + local_inc(&sonypi_device.open_count);
24154 mutex_unlock(&sonypi_device.lock);
24155
24156 return 0;
24157 diff -urNp linux-2.6.39.4/drivers/char/tpm/tpm_bios.c linux-2.6.39.4/drivers/char/tpm/tpm_bios.c
24158 --- linux-2.6.39.4/drivers/char/tpm/tpm_bios.c 2011-05-19 00:06:34.000000000 -0400
24159 +++ linux-2.6.39.4/drivers/char/tpm/tpm_bios.c 2011-08-05 19:44:36.000000000 -0400
24160 @@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start
24161 event = addr;
24162
24163 if ((event->event_type == 0 && event->event_size == 0) ||
24164 - ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
24165 + (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
24166 return NULL;
24167
24168 return addr;
24169 @@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next(
24170 return NULL;
24171
24172 if ((event->event_type == 0 && event->event_size == 0) ||
24173 - ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
24174 + (event->event_size >= limit - v - sizeof(struct tcpa_event)))
24175 return NULL;
24176
24177 (*pos)++;
24178 @@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_
24179 int i;
24180
24181 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
24182 - seq_putc(m, data[i]);
24183 + if (!seq_putc(m, data[i]))
24184 + return -EFAULT;
24185
24186 return 0;
24187 }
24188 @@ -410,6 +411,11 @@ static int read_log(struct tpm_bios_log
24189 log->bios_event_log_end = log->bios_event_log + len;
24190
24191 virt = acpi_os_map_memory(start, len);
24192 + if (!virt) {
24193 + kfree(log->bios_event_log);
24194 + log->bios_event_log = NULL;
24195 + return -EFAULT;
24196 + }
24197
24198 memcpy(log->bios_event_log, virt, len);
24199
24200 diff -urNp linux-2.6.39.4/drivers/char/tpm/tpm.c linux-2.6.39.4/drivers/char/tpm/tpm.c
24201 --- linux-2.6.39.4/drivers/char/tpm/tpm.c 2011-05-19 00:06:34.000000000 -0400
24202 +++ linux-2.6.39.4/drivers/char/tpm/tpm.c 2011-08-05 19:44:36.000000000 -0400
24203 @@ -411,7 +411,7 @@ static ssize_t tpm_transmit(struct tpm_c
24204 chip->vendor.req_complete_val)
24205 goto out_recv;
24206
24207 - if ((status == chip->vendor.req_canceled)) {
24208 + if (status == chip->vendor.req_canceled) {
24209 dev_err(chip->dev, "Operation Canceled\n");
24210 rc = -ECANCELED;
24211 goto out;
24212 @@ -844,6 +844,8 @@ ssize_t tpm_show_pubek(struct device *de
24213
24214 struct tpm_chip *chip = dev_get_drvdata(dev);
24215
24216 + pax_track_stack();
24217 +
24218 tpm_cmd.header.in = tpm_readpubek_header;
24219 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
24220 "attempting to read the PUBEK");
24221 diff -urNp linux-2.6.39.4/drivers/crypto/hifn_795x.c linux-2.6.39.4/drivers/crypto/hifn_795x.c
24222 --- linux-2.6.39.4/drivers/crypto/hifn_795x.c 2011-05-19 00:06:34.000000000 -0400
24223 +++ linux-2.6.39.4/drivers/crypto/hifn_795x.c 2011-08-05 19:44:36.000000000 -0400
24224 @@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
24225 0xCA, 0x34, 0x2B, 0x2E};
24226 struct scatterlist sg;
24227
24228 + pax_track_stack();
24229 +
24230 memset(src, 0, sizeof(src));
24231 memset(ctx.key, 0, sizeof(ctx.key));
24232
24233 diff -urNp linux-2.6.39.4/drivers/crypto/padlock-aes.c linux-2.6.39.4/drivers/crypto/padlock-aes.c
24234 --- linux-2.6.39.4/drivers/crypto/padlock-aes.c 2011-05-19 00:06:34.000000000 -0400
24235 +++ linux-2.6.39.4/drivers/crypto/padlock-aes.c 2011-08-05 19:44:36.000000000 -0400
24236 @@ -109,6 +109,8 @@ static int aes_set_key(struct crypto_tfm
24237 struct crypto_aes_ctx gen_aes;
24238 int cpu;
24239
24240 + pax_track_stack();
24241 +
24242 if (key_len % 8) {
24243 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
24244 return -EINVAL;
24245 diff -urNp linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c
24246 --- linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c 2011-05-19 00:06:34.000000000 -0400
24247 +++ linux-2.6.39.4/drivers/edac/edac_pci_sysfs.c 2011-08-05 19:44:36.000000000 -0400
24248 @@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log
24249 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
24250 static int edac_pci_poll_msec = 1000; /* one second workq period */
24251
24252 -static atomic_t pci_parity_count = ATOMIC_INIT(0);
24253 -static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
24254 +static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
24255 +static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
24256
24257 static struct kobject *edac_pci_top_main_kobj;
24258 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
24259 @@ -582,7 +582,7 @@ static void edac_pci_dev_parity_test(str
24260 edac_printk(KERN_CRIT, EDAC_PCI,
24261 "Signaled System Error on %s\n",
24262 pci_name(dev));
24263 - atomic_inc(&pci_nonparity_count);
24264 + atomic_inc_unchecked(&pci_nonparity_count);
24265 }
24266
24267 if (status & (PCI_STATUS_PARITY)) {
24268 @@ -590,7 +590,7 @@ static void edac_pci_dev_parity_test(str
24269 "Master Data Parity Error on %s\n",
24270 pci_name(dev));
24271
24272 - atomic_inc(&pci_parity_count);
24273 + atomic_inc_unchecked(&pci_parity_count);
24274 }
24275
24276 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24277 @@ -598,7 +598,7 @@ static void edac_pci_dev_parity_test(str
24278 "Detected Parity Error on %s\n",
24279 pci_name(dev));
24280
24281 - atomic_inc(&pci_parity_count);
24282 + atomic_inc_unchecked(&pci_parity_count);
24283 }
24284 }
24285
24286 @@ -619,7 +619,7 @@ static void edac_pci_dev_parity_test(str
24287 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
24288 "Signaled System Error on %s\n",
24289 pci_name(dev));
24290 - atomic_inc(&pci_nonparity_count);
24291 + atomic_inc_unchecked(&pci_nonparity_count);
24292 }
24293
24294 if (status & (PCI_STATUS_PARITY)) {
24295 @@ -627,7 +627,7 @@ static void edac_pci_dev_parity_test(str
24296 "Master Data Parity Error on "
24297 "%s\n", pci_name(dev));
24298
24299 - atomic_inc(&pci_parity_count);
24300 + atomic_inc_unchecked(&pci_parity_count);
24301 }
24302
24303 if (status & (PCI_STATUS_DETECTED_PARITY)) {
24304 @@ -635,7 +635,7 @@ static void edac_pci_dev_parity_test(str
24305 "Detected Parity Error on %s\n",
24306 pci_name(dev));
24307
24308 - atomic_inc(&pci_parity_count);
24309 + atomic_inc_unchecked(&pci_parity_count);
24310 }
24311 }
24312 }
24313 @@ -677,7 +677,7 @@ void edac_pci_do_parity_check(void)
24314 if (!check_pci_errors)
24315 return;
24316
24317 - before_count = atomic_read(&pci_parity_count);
24318 + before_count = atomic_read_unchecked(&pci_parity_count);
24319
24320 /* scan all PCI devices looking for a Parity Error on devices and
24321 * bridges.
24322 @@ -689,7 +689,7 @@ void edac_pci_do_parity_check(void)
24323 /* Only if operator has selected panic on PCI Error */
24324 if (edac_pci_get_panic_on_pe()) {
24325 /* If the count is different 'after' from 'before' */
24326 - if (before_count != atomic_read(&pci_parity_count))
24327 + if (before_count != atomic_read_unchecked(&pci_parity_count))
24328 panic("EDAC: PCI Parity Error");
24329 }
24330 }
24331 diff -urNp linux-2.6.39.4/drivers/edac/i7core_edac.c linux-2.6.39.4/drivers/edac/i7core_edac.c
24332 --- linux-2.6.39.4/drivers/edac/i7core_edac.c 2011-05-19 00:06:34.000000000 -0400
24333 +++ linux-2.6.39.4/drivers/edac/i7core_edac.c 2011-08-05 19:44:36.000000000 -0400
24334 @@ -1670,7 +1670,7 @@ static void i7core_mce_output_error(stru
24335 char *type, *optype, *err, *msg;
24336 unsigned long error = m->status & 0x1ff0000l;
24337 u32 optypenum = (m->status >> 4) & 0x07;
24338 - u32 core_err_cnt = (m->status >> 38) && 0x7fff;
24339 + u32 core_err_cnt = (m->status >> 38) & 0x7fff;
24340 u32 dimm = (m->misc >> 16) & 0x3;
24341 u32 channel = (m->misc >> 18) & 0x3;
24342 u32 syndrome = m->misc >> 32;
24343 diff -urNp linux-2.6.39.4/drivers/edac/mce_amd.h linux-2.6.39.4/drivers/edac/mce_amd.h
24344 --- linux-2.6.39.4/drivers/edac/mce_amd.h 2011-05-19 00:06:34.000000000 -0400
24345 +++ linux-2.6.39.4/drivers/edac/mce_amd.h 2011-08-05 20:34:06.000000000 -0400
24346 @@ -83,7 +83,7 @@ struct amd_decoder_ops {
24347 bool (*dc_mce)(u16, u8);
24348 bool (*ic_mce)(u16, u8);
24349 bool (*nb_mce)(u16, u8);
24350 -};
24351 +} __no_const;
24352
24353 void amd_report_gart_errors(bool);
24354 void amd_register_ecc_decoder(void (*f)(int, struct mce *, u32));
24355 diff -urNp linux-2.6.39.4/drivers/firewire/core-card.c linux-2.6.39.4/drivers/firewire/core-card.c
24356 --- linux-2.6.39.4/drivers/firewire/core-card.c 2011-05-19 00:06:34.000000000 -0400
24357 +++ linux-2.6.39.4/drivers/firewire/core-card.c 2011-08-05 20:34:06.000000000 -0400
24358 @@ -652,7 +652,7 @@ void fw_card_release(struct kref *kref)
24359
24360 void fw_core_remove_card(struct fw_card *card)
24361 {
24362 - struct fw_card_driver dummy_driver = dummy_driver_template;
24363 + fw_card_driver_no_const dummy_driver = dummy_driver_template;
24364
24365 card->driver->update_phy_reg(card, 4,
24366 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
24367 diff -urNp linux-2.6.39.4/drivers/firewire/core-cdev.c linux-2.6.39.4/drivers/firewire/core-cdev.c
24368 --- linux-2.6.39.4/drivers/firewire/core-cdev.c 2011-05-19 00:06:34.000000000 -0400
24369 +++ linux-2.6.39.4/drivers/firewire/core-cdev.c 2011-08-05 19:44:36.000000000 -0400
24370 @@ -1312,8 +1312,7 @@ static int init_iso_resource(struct clie
24371 int ret;
24372
24373 if ((request->channels == 0 && request->bandwidth == 0) ||
24374 - request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
24375 - request->bandwidth < 0)
24376 + request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
24377 return -EINVAL;
24378
24379 r = kmalloc(sizeof(*r), GFP_KERNEL);
24380 diff -urNp linux-2.6.39.4/drivers/firewire/core.h linux-2.6.39.4/drivers/firewire/core.h
24381 --- linux-2.6.39.4/drivers/firewire/core.h 2011-05-19 00:06:34.000000000 -0400
24382 +++ linux-2.6.39.4/drivers/firewire/core.h 2011-08-05 20:34:06.000000000 -0400
24383 @@ -99,6 +99,7 @@ struct fw_card_driver {
24384
24385 int (*stop_iso)(struct fw_iso_context *ctx);
24386 };
24387 +typedef struct fw_card_driver __no_const fw_card_driver_no_const;
24388
24389 void fw_card_initialize(struct fw_card *card,
24390 const struct fw_card_driver *driver, struct device *device);
24391 diff -urNp linux-2.6.39.4/drivers/firewire/core-transaction.c linux-2.6.39.4/drivers/firewire/core-transaction.c
24392 --- linux-2.6.39.4/drivers/firewire/core-transaction.c 2011-05-19 00:06:34.000000000 -0400
24393 +++ linux-2.6.39.4/drivers/firewire/core-transaction.c 2011-08-05 19:44:36.000000000 -0400
24394 @@ -36,6 +36,7 @@
24395 #include <linux/string.h>
24396 #include <linux/timer.h>
24397 #include <linux/types.h>
24398 +#include <linux/sched.h>
24399
24400 #include <asm/byteorder.h>
24401
24402 @@ -420,6 +421,8 @@ int fw_run_transaction(struct fw_card *c
24403 struct transaction_callback_data d;
24404 struct fw_transaction t;
24405
24406 + pax_track_stack();
24407 +
24408 init_timer_on_stack(&t.split_timeout_timer);
24409 init_completion(&d.done);
24410 d.payload = payload;
24411 diff -urNp linux-2.6.39.4/drivers/firmware/dmi_scan.c linux-2.6.39.4/drivers/firmware/dmi_scan.c
24412 --- linux-2.6.39.4/drivers/firmware/dmi_scan.c 2011-05-19 00:06:34.000000000 -0400
24413 +++ linux-2.6.39.4/drivers/firmware/dmi_scan.c 2011-08-05 19:44:36.000000000 -0400
24414 @@ -449,11 +449,6 @@ void __init dmi_scan_machine(void)
24415 }
24416 }
24417 else {
24418 - /*
24419 - * no iounmap() for that ioremap(); it would be a no-op, but
24420 - * it's so early in setup that sucker gets confused into doing
24421 - * what it shouldn't if we actually call it.
24422 - */
24423 p = dmi_ioremap(0xF0000, 0x10000);
24424 if (p == NULL)
24425 goto error;
24426 diff -urNp linux-2.6.39.4/drivers/gpio/vr41xx_giu.c linux-2.6.39.4/drivers/gpio/vr41xx_giu.c
24427 --- linux-2.6.39.4/drivers/gpio/vr41xx_giu.c 2011-05-19 00:06:34.000000000 -0400
24428 +++ linux-2.6.39.4/drivers/gpio/vr41xx_giu.c 2011-08-05 19:44:36.000000000 -0400
24429 @@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
24430 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
24431 maskl, pendl, maskh, pendh);
24432
24433 - atomic_inc(&irq_err_count);
24434 + atomic_inc_unchecked(&irq_err_count);
24435
24436 return -EINVAL;
24437 }
24438 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c
24439 --- linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c 2011-05-19 00:06:34.000000000 -0400
24440 +++ linux-2.6.39.4/drivers/gpu/drm/drm_crtc_helper.c 2011-08-05 19:44:36.000000000 -0400
24441 @@ -276,7 +276,7 @@ static bool drm_encoder_crtc_ok(struct d
24442 struct drm_crtc *tmp;
24443 int crtc_mask = 1;
24444
24445 - WARN(!crtc, "checking null crtc?\n");
24446 + BUG_ON(!crtc);
24447
24448 dev = crtc->dev;
24449
24450 @@ -343,6 +343,8 @@ bool drm_crtc_helper_set_mode(struct drm
24451 struct drm_encoder *encoder;
24452 bool ret = true;
24453
24454 + pax_track_stack();
24455 +
24456 crtc->enabled = drm_helper_crtc_in_use(crtc);
24457 if (!crtc->enabled)
24458 return true;
24459 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_drv.c linux-2.6.39.4/drivers/gpu/drm/drm_drv.c
24460 --- linux-2.6.39.4/drivers/gpu/drm/drm_drv.c 2011-05-19 00:06:34.000000000 -0400
24461 +++ linux-2.6.39.4/drivers/gpu/drm/drm_drv.c 2011-08-05 19:44:36.000000000 -0400
24462 @@ -386,7 +386,7 @@ long drm_ioctl(struct file *filp,
24463
24464 dev = file_priv->minor->dev;
24465 atomic_inc(&dev->ioctl_count);
24466 - atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
24467 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
24468 ++file_priv->ioctl_count;
24469
24470 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
24471 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_fops.c linux-2.6.39.4/drivers/gpu/drm/drm_fops.c
24472 --- linux-2.6.39.4/drivers/gpu/drm/drm_fops.c 2011-05-19 00:06:34.000000000 -0400
24473 +++ linux-2.6.39.4/drivers/gpu/drm/drm_fops.c 2011-08-05 19:44:36.000000000 -0400
24474 @@ -70,7 +70,7 @@ static int drm_setup(struct drm_device *
24475 }
24476
24477 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
24478 - atomic_set(&dev->counts[i], 0);
24479 + atomic_set_unchecked(&dev->counts[i], 0);
24480
24481 dev->sigdata.lock = NULL;
24482
24483 @@ -134,8 +134,8 @@ int drm_open(struct inode *inode, struct
24484
24485 retcode = drm_open_helper(inode, filp, dev);
24486 if (!retcode) {
24487 - atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
24488 - if (!dev->open_count++)
24489 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
24490 + if (local_inc_return(&dev->open_count) == 1)
24491 retcode = drm_setup(dev);
24492 }
24493 if (!retcode) {
24494 @@ -472,7 +472,7 @@ int drm_release(struct inode *inode, str
24495
24496 mutex_lock(&drm_global_mutex);
24497
24498 - DRM_DEBUG("open_count = %d\n", dev->open_count);
24499 + DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
24500
24501 if (dev->driver->preclose)
24502 dev->driver->preclose(dev, file_priv);
24503 @@ -484,7 +484,7 @@ int drm_release(struct inode *inode, str
24504 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
24505 task_pid_nr(current),
24506 (long)old_encode_dev(file_priv->minor->device),
24507 - dev->open_count);
24508 + local_read(&dev->open_count));
24509
24510 /* if the master has gone away we can't do anything with the lock */
24511 if (file_priv->minor->master)
24512 @@ -565,8 +565,8 @@ int drm_release(struct inode *inode, str
24513 * End inline drm_release
24514 */
24515
24516 - atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
24517 - if (!--dev->open_count) {
24518 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
24519 + if (local_dec_and_test(&dev->open_count)) {
24520 if (atomic_read(&dev->ioctl_count)) {
24521 DRM_ERROR("Device busy: %d\n",
24522 atomic_read(&dev->ioctl_count));
24523 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_global.c linux-2.6.39.4/drivers/gpu/drm/drm_global.c
24524 --- linux-2.6.39.4/drivers/gpu/drm/drm_global.c 2011-05-19 00:06:34.000000000 -0400
24525 +++ linux-2.6.39.4/drivers/gpu/drm/drm_global.c 2011-08-05 19:44:36.000000000 -0400
24526 @@ -36,7 +36,7 @@
24527 struct drm_global_item {
24528 struct mutex mutex;
24529 void *object;
24530 - int refcount;
24531 + atomic_t refcount;
24532 };
24533
24534 static struct drm_global_item glob[DRM_GLOBAL_NUM];
24535 @@ -49,7 +49,7 @@ void drm_global_init(void)
24536 struct drm_global_item *item = &glob[i];
24537 mutex_init(&item->mutex);
24538 item->object = NULL;
24539 - item->refcount = 0;
24540 + atomic_set(&item->refcount, 0);
24541 }
24542 }
24543
24544 @@ -59,7 +59,7 @@ void drm_global_release(void)
24545 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
24546 struct drm_global_item *item = &glob[i];
24547 BUG_ON(item->object != NULL);
24548 - BUG_ON(item->refcount != 0);
24549 + BUG_ON(atomic_read(&item->refcount) != 0);
24550 }
24551 }
24552
24553 @@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_globa
24554 void *object;
24555
24556 mutex_lock(&item->mutex);
24557 - if (item->refcount == 0) {
24558 + if (atomic_read(&item->refcount) == 0) {
24559 item->object = kzalloc(ref->size, GFP_KERNEL);
24560 if (unlikely(item->object == NULL)) {
24561 ret = -ENOMEM;
24562 @@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_globa
24563 goto out_err;
24564
24565 }
24566 - ++item->refcount;
24567 + atomic_inc(&item->refcount);
24568 ref->object = item->object;
24569 object = item->object;
24570 mutex_unlock(&item->mutex);
24571 @@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_gl
24572 struct drm_global_item *item = &glob[ref->global_type];
24573
24574 mutex_lock(&item->mutex);
24575 - BUG_ON(item->refcount == 0);
24576 + BUG_ON(atomic_read(&item->refcount) == 0);
24577 BUG_ON(ref->object != item->object);
24578 - if (--item->refcount == 0) {
24579 + if (atomic_dec_and_test(&item->refcount)) {
24580 ref->release(ref);
24581 item->object = NULL;
24582 }
24583 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_info.c linux-2.6.39.4/drivers/gpu/drm/drm_info.c
24584 --- linux-2.6.39.4/drivers/gpu/drm/drm_info.c 2011-05-19 00:06:34.000000000 -0400
24585 +++ linux-2.6.39.4/drivers/gpu/drm/drm_info.c 2011-08-05 19:44:36.000000000 -0400
24586 @@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
24587 struct drm_local_map *map;
24588 struct drm_map_list *r_list;
24589
24590 - /* Hardcoded from _DRM_FRAME_BUFFER,
24591 - _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
24592 - _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
24593 - const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
24594 + static const char * const types[] = {
24595 + [_DRM_FRAME_BUFFER] = "FB",
24596 + [_DRM_REGISTERS] = "REG",
24597 + [_DRM_SHM] = "SHM",
24598 + [_DRM_AGP] = "AGP",
24599 + [_DRM_SCATTER_GATHER] = "SG",
24600 + [_DRM_CONSISTENT] = "PCI",
24601 + [_DRM_GEM] = "GEM" };
24602 const char *type;
24603 int i;
24604
24605 @@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
24606 map = r_list->map;
24607 if (!map)
24608 continue;
24609 - if (map->type < 0 || map->type > 5)
24610 + if (map->type >= ARRAY_SIZE(types))
24611 type = "??";
24612 else
24613 type = types[map->type];
24614 @@ -290,7 +294,11 @@ int drm_vma_info(struct seq_file *m, voi
24615 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
24616 vma->vm_flags & VM_LOCKED ? 'l' : '-',
24617 vma->vm_flags & VM_IO ? 'i' : '-',
24618 +#ifdef CONFIG_GRKERNSEC_HIDESYM
24619 + 0);
24620 +#else
24621 vma->vm_pgoff);
24622 +#endif
24623
24624 #if defined(__i386__)
24625 pgprot = pgprot_val(vma->vm_page_prot);
24626 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c
24627 --- linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c 2011-05-19 00:06:34.000000000 -0400
24628 +++ linux-2.6.39.4/drivers/gpu/drm/drm_ioctl.c 2011-08-05 19:44:36.000000000 -0400
24629 @@ -256,7 +256,7 @@ int drm_getstats(struct drm_device *dev,
24630 stats->data[i].value =
24631 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
24632 else
24633 - stats->data[i].value = atomic_read(&dev->counts[i]);
24634 + stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
24635 stats->data[i].type = dev->types[i];
24636 }
24637
24638 diff -urNp linux-2.6.39.4/drivers/gpu/drm/drm_lock.c linux-2.6.39.4/drivers/gpu/drm/drm_lock.c
24639 --- linux-2.6.39.4/drivers/gpu/drm/drm_lock.c 2011-05-19 00:06:34.000000000 -0400
24640 +++ linux-2.6.39.4/drivers/gpu/drm/drm_lock.c 2011-08-05 19:44:36.000000000 -0400
24641 @@ -89,7 +89,7 @@ int drm_lock(struct drm_device *dev, voi
24642 if (drm_lock_take(&master->lock, lock->context)) {
24643 master->lock.file_priv = file_priv;
24644 master->lock.lock_time = jiffies;
24645 - atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
24646 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
24647 break; /* Got lock */
24648 }
24649
24650 @@ -160,7 +160,7 @@ int drm_unlock(struct drm_device *dev, v
24651 return -EINVAL;
24652 }
24653
24654 - atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
24655 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
24656
24657 if (drm_lock_free(&master->lock, lock->context)) {
24658 /* FIXME: Should really bail out here. */
24659 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c
24660 --- linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c 2011-05-19 00:06:34.000000000 -0400
24661 +++ linux-2.6.39.4/drivers/gpu/drm/i810/i810_dma.c 2011-08-05 19:44:36.000000000 -0400
24662 @@ -950,8 +950,8 @@ static int i810_dma_vertex(struct drm_de
24663 dma->buflist[vertex->idx],
24664 vertex->discard, vertex->used);
24665
24666 - atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24667 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24668 + atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
24669 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24670 sarea_priv->last_enqueue = dev_priv->counter - 1;
24671 sarea_priv->last_dispatch = (int)hw_status[5];
24672
24673 @@ -1111,8 +1111,8 @@ static int i810_dma_mc(struct drm_device
24674 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
24675 mc->last_render);
24676
24677 - atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24678 - atomic_inc(&dev->counts[_DRM_STAT_DMA]);
24679 + atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
24680 + atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
24681 sarea_priv->last_enqueue = dev_priv->counter - 1;
24682 sarea_priv->last_dispatch = (int)hw_status[5];
24683
24684 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h
24685 --- linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h 2011-05-19 00:06:34.000000000 -0400
24686 +++ linux-2.6.39.4/drivers/gpu/drm/i810/i810_drv.h 2011-08-05 19:44:36.000000000 -0400
24687 @@ -108,8 +108,8 @@ typedef struct drm_i810_private {
24688 int page_flipping;
24689
24690 wait_queue_head_t irq_queue;
24691 - atomic_t irq_received;
24692 - atomic_t irq_emitted;
24693 + atomic_unchecked_t irq_received;
24694 + atomic_unchecked_t irq_emitted;
24695
24696 int front_offset;
24697 } drm_i810_private_t;
24698 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c
24699 --- linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-19 00:06:34.000000000 -0400
24700 +++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_debugfs.c 2011-08-05 19:44:36.000000000 -0400
24701 @@ -496,7 +496,7 @@ static int i915_interrupt_info(struct se
24702 I915_READ(GTIMR));
24703 }
24704 seq_printf(m, "Interrupts received: %d\n",
24705 - atomic_read(&dev_priv->irq_received));
24706 + atomic_read_unchecked(&dev_priv->irq_received));
24707 for (i = 0; i < I915_NUM_RINGS; i++) {
24708 if (IS_GEN6(dev)) {
24709 seq_printf(m, "Graphics Interrupt mask (%s): %08x\n",
24710 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c
24711 --- linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c 2011-05-19 00:06:34.000000000 -0400
24712 +++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_dma.c 2011-08-05 19:44:36.000000000 -0400
24713 @@ -1171,7 +1171,7 @@ static bool i915_switcheroo_can_switch(s
24714 bool can_switch;
24715
24716 spin_lock(&dev->count_lock);
24717 - can_switch = (dev->open_count == 0);
24718 + can_switch = (local_read(&dev->open_count) == 0);
24719 spin_unlock(&dev->count_lock);
24720 return can_switch;
24721 }
24722 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h
24723 --- linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h 2011-05-19 00:06:34.000000000 -0400
24724 +++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_drv.h 2011-08-05 20:34:06.000000000 -0400
24725 @@ -209,7 +209,7 @@ struct drm_i915_display_funcs {
24726 /* display clock increase/decrease */
24727 /* pll clock increase/decrease */
24728 /* clock gating init */
24729 -};
24730 +} __no_const;
24731
24732 struct intel_device_info {
24733 u8 gen;
24734 @@ -287,7 +287,7 @@ typedef struct drm_i915_private {
24735 int current_page;
24736 int page_flipping;
24737
24738 - atomic_t irq_received;
24739 + atomic_unchecked_t irq_received;
24740
24741 /* protects the irq masks */
24742 spinlock_t irq_lock;
24743 @@ -848,7 +848,7 @@ struct drm_i915_gem_object {
24744 * will be page flipped away on the next vblank. When it
24745 * reaches 0, dev_priv->pending_flip_queue will be woken up.
24746 */
24747 - atomic_t pending_flip;
24748 + atomic_unchecked_t pending_flip;
24749 };
24750
24751 #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)
24752 @@ -1232,7 +1232,7 @@ extern int intel_setup_gmbus(struct drm_
24753 extern void intel_teardown_gmbus(struct drm_device *dev);
24754 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
24755 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
24756 -extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24757 +static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
24758 {
24759 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
24760 }
24761 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c
24762 --- linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-05-19 00:06:34.000000000 -0400
24763 +++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2011-08-05 19:44:36.000000000 -0400
24764 @@ -192,7 +192,7 @@ i915_gem_object_set_to_gpu_domain(struct
24765 i915_gem_release_mmap(obj);
24766
24767 if (obj->base.pending_write_domain)
24768 - cd->flips |= atomic_read(&obj->pending_flip);
24769 + cd->flips |= atomic_read_unchecked(&obj->pending_flip);
24770
24771 /* The actual obj->write_domain will be updated with
24772 * pending_write_domain after we emit the accumulated flush for all
24773 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c
24774 --- linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c 2011-07-09 09:18:51.000000000 -0400
24775 +++ linux-2.6.39.4/drivers/gpu/drm/i915/i915_irq.c 2011-08-05 19:44:36.000000000 -0400
24776 @@ -1101,7 +1101,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
24777 int ret = IRQ_NONE, pipe;
24778 bool blc_event = false;
24779
24780 - atomic_inc(&dev_priv->irq_received);
24781 + atomic_inc_unchecked(&dev_priv->irq_received);
24782
24783 if (HAS_PCH_SPLIT(dev))
24784 return ironlake_irq_handler(dev);
24785 @@ -1666,7 +1666,7 @@ void i915_driver_irq_preinstall(struct d
24786 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
24787 int pipe;
24788
24789 - atomic_set(&dev_priv->irq_received, 0);
24790 + atomic_set_unchecked(&dev_priv->irq_received, 0);
24791
24792 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
24793 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
24794 diff -urNp linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c
24795 --- linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c 2011-05-19 00:06:34.000000000 -0400
24796 +++ linux-2.6.39.4/drivers/gpu/drm/i915/intel_display.c 2011-08-05 19:44:36.000000000 -0400
24797 @@ -2244,7 +2244,7 @@ intel_pipe_set_base(struct drm_crtc *crt
24798
24799 wait_event(dev_priv->pending_flip_queue,
24800 atomic_read(&dev_priv->mm.wedged) ||
24801 - atomic_read(&obj->pending_flip) == 0);
24802 + atomic_read_unchecked(&obj->pending_flip) == 0);
24803
24804 /* Big Hammer, we also need to ensure that any pending
24805 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
24806 @@ -2712,7 +2712,7 @@ static void intel_crtc_wait_for_pending_
24807 obj = to_intel_framebuffer(crtc->fb)->obj;
24808 dev_priv = crtc->dev->dev_private;
24809 wait_event(dev_priv->pending_flip_queue,
24810 - atomic_read(&obj->pending_flip) == 0);
24811 + atomic_read_unchecked(&obj->pending_flip) == 0);
24812 }
24813
24814 static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
24815 @@ -6016,7 +6016,7 @@ static void do_intel_finish_page_flip(st
24816
24817 atomic_clear_mask(1 << intel_crtc->plane,
24818 &obj->pending_flip.counter);
24819 - if (atomic_read(&obj->pending_flip) == 0)
24820 + if (atomic_read_unchecked(&obj->pending_flip) == 0)
24821 wake_up(&dev_priv->pending_flip_queue);
24822
24823 schedule_work(&work->work);
24824 @@ -6145,7 +6145,7 @@ static int intel_crtc_page_flip(struct d
24825 /* Block clients from rendering to the new back buffer until
24826 * the flip occurs and the object is no longer visible.
24827 */
24828 - atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24829 + atomic_add_unchecked(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
24830
24831 switch (INTEL_INFO(dev)->gen) {
24832 case 2:
24833 diff -urNp linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h
24834 --- linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h 2011-05-19 00:06:34.000000000 -0400
24835 +++ linux-2.6.39.4/drivers/gpu/drm/mga/mga_drv.h 2011-08-05 19:44:36.000000000 -0400
24836 @@ -120,9 +120,9 @@ typedef struct drm_mga_private {
24837 u32 clear_cmd;
24838 u32 maccess;
24839
24840 - atomic_t vbl_received; /**< Number of vblanks received. */
24841 + atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
24842 wait_queue_head_t fence_queue;
24843 - atomic_t last_fence_retired;
24844 + atomic_unchecked_t last_fence_retired;
24845 u32 next_fence_to_post;
24846
24847 unsigned int fb_cpp;
24848 diff -urNp linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c
24849 --- linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c 2011-05-19 00:06:34.000000000 -0400
24850 +++ linux-2.6.39.4/drivers/gpu/drm/mga/mga_irq.c 2011-08-05 19:44:36.000000000 -0400
24851 @@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
24852 if (crtc != 0)
24853 return 0;
24854
24855 - return atomic_read(&dev_priv->vbl_received);
24856 + return atomic_read_unchecked(&dev_priv->vbl_received);
24857 }
24858
24859
24860 @@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24861 /* VBLANK interrupt */
24862 if (status & MGA_VLINEPEN) {
24863 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
24864 - atomic_inc(&dev_priv->vbl_received);
24865 + atomic_inc_unchecked(&dev_priv->vbl_received);
24866 drm_handle_vblank(dev, 0);
24867 handled = 1;
24868 }
24869 @@ -79,7 +79,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
24870 if ((prim_start & ~0x03) != (prim_end & ~0x03))
24871 MGA_WRITE(MGA_PRIMEND, prim_end);
24872
24873 - atomic_inc(&dev_priv->last_fence_retired);
24874 + atomic_inc_unchecked(&dev_priv->last_fence_retired);
24875 DRM_WAKEUP(&dev_priv->fence_queue);
24876 handled = 1;
24877 }
24878 @@ -130,7 +130,7 @@ int mga_driver_fence_wait(struct drm_dev
24879 * using fences.
24880 */
24881 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
24882 - (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
24883 + (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
24884 - *sequence) <= (1 << 23)));
24885
24886 *sequence = cur_fence;
24887 diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h
24888 --- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-05-19 00:06:34.000000000 -0400
24889 +++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_drv.h 2011-08-05 20:34:06.000000000 -0400
24890 @@ -228,7 +228,7 @@ struct nouveau_channel {
24891 struct list_head pending;
24892 uint32_t sequence;
24893 uint32_t sequence_ack;
24894 - atomic_t last_sequence_irq;
24895 + atomic_unchecked_t last_sequence_irq;
24896 } fence;
24897
24898 /* DMA push buffer */
24899 @@ -317,13 +317,13 @@ struct nouveau_instmem_engine {
24900 struct nouveau_mc_engine {
24901 int (*init)(struct drm_device *dev);
24902 void (*takedown)(struct drm_device *dev);
24903 -};
24904 +} __no_const;
24905
24906 struct nouveau_timer_engine {
24907 int (*init)(struct drm_device *dev);
24908 void (*takedown)(struct drm_device *dev);
24909 uint64_t (*read)(struct drm_device *dev);
24910 -};
24911 +} __no_const;
24912
24913 struct nouveau_fb_engine {
24914 int num_tiles;
24915 @@ -516,7 +516,7 @@ struct nouveau_vram_engine {
24916 void (*put)(struct drm_device *, struct nouveau_mem **);
24917
24918 bool (*flags_valid)(struct drm_device *, u32 tile_flags);
24919 -};
24920 +} __no_const;
24921
24922 struct nouveau_engine {
24923 struct nouveau_instmem_engine instmem;
24924 @@ -662,7 +662,7 @@ struct drm_nouveau_private {
24925 struct drm_global_reference mem_global_ref;
24926 struct ttm_bo_global_ref bo_global_ref;
24927 struct ttm_bo_device bdev;
24928 - atomic_t validate_sequence;
24929 + atomic_unchecked_t validate_sequence;
24930 } ttm;
24931
24932 struct {
24933 diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c
24934 --- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-05-19 00:06:34.000000000 -0400
24935 +++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_fence.c 2011-08-05 19:44:36.000000000 -0400
24936 @@ -85,7 +85,7 @@ nouveau_fence_update(struct nouveau_chan
24937 if (USE_REFCNT(dev))
24938 sequence = nvchan_rd32(chan, 0x48);
24939 else
24940 - sequence = atomic_read(&chan->fence.last_sequence_irq);
24941 + sequence = atomic_read_unchecked(&chan->fence.last_sequence_irq);
24942
24943 if (chan->fence.sequence_ack == sequence)
24944 goto out;
24945 @@ -553,7 +553,7 @@ nouveau_fence_channel_init(struct nouvea
24946 out_initialised:
24947 INIT_LIST_HEAD(&chan->fence.pending);
24948 spin_lock_init(&chan->fence.lock);
24949 - atomic_set(&chan->fence.last_sequence_irq, 0);
24950 + atomic_set_unchecked(&chan->fence.last_sequence_irq, 0);
24951 return 0;
24952 }
24953
24954 diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c
24955 --- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-05-19 00:06:34.000000000 -0400
24956 +++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_gem.c 2011-08-05 19:44:36.000000000 -0400
24957 @@ -249,7 +249,7 @@ validate_init(struct nouveau_channel *ch
24958 int trycnt = 0;
24959 int ret, i;
24960
24961 - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence);
24962 + sequence = atomic_add_return_unchecked(1, &dev_priv->ttm.validate_sequence);
24963 retry:
24964 if (++trycnt > 100000) {
24965 NV_ERROR(dev, "%s failed and gave up.\n", __func__);
24966 diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c
24967 --- linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-05-19 00:06:34.000000000 -0400
24968 +++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nouveau_state.c 2011-08-05 19:44:36.000000000 -0400
24969 @@ -583,7 +583,7 @@ static bool nouveau_switcheroo_can_switc
24970 bool can_switch;
24971
24972 spin_lock(&dev->count_lock);
24973 - can_switch = (dev->open_count == 0);
24974 + can_switch = (local_read(&dev->open_count) == 0);
24975 spin_unlock(&dev->count_lock);
24976 return can_switch;
24977 }
24978 diff -urNp linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c
24979 --- linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-05-19 00:06:34.000000000 -0400
24980 +++ linux-2.6.39.4/drivers/gpu/drm/nouveau/nv04_graph.c 2011-08-05 19:44:36.000000000 -0400
24981 @@ -552,7 +552,7 @@ static int
24982 nv04_graph_mthd_set_ref(struct nouveau_channel *chan,
24983 u32 class, u32 mthd, u32 data)
24984 {
24985 - atomic_set(&chan->fence.last_sequence_irq, data);
24986 + atomic_set_unchecked(&chan->fence.last_sequence_irq, data);
24987 return 0;
24988 }
24989
24990 diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c
24991 --- linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c 2011-05-19 00:06:34.000000000 -0400
24992 +++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_cce.c 2011-08-05 19:44:36.000000000 -0400
24993 @@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
24994
24995 /* GH: Simple idle check.
24996 */
24997 - atomic_set(&dev_priv->idle_count, 0);
24998 + atomic_set_unchecked(&dev_priv->idle_count, 0);
24999
25000 /* We don't support anything other than bus-mastering ring mode,
25001 * but the ring can be in either AGP or PCI space for the ring
25002 diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h
25003 --- linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h 2011-05-19 00:06:34.000000000 -0400
25004 +++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_drv.h 2011-08-05 19:44:36.000000000 -0400
25005 @@ -90,14 +90,14 @@ typedef struct drm_r128_private {
25006 int is_pci;
25007 unsigned long cce_buffers_offset;
25008
25009 - atomic_t idle_count;
25010 + atomic_unchecked_t idle_count;
25011
25012 int page_flipping;
25013 int current_page;
25014 u32 crtc_offset;
25015 u32 crtc_offset_cntl;
25016
25017 - atomic_t vbl_received;
25018 + atomic_unchecked_t vbl_received;
25019
25020 u32 color_fmt;
25021 unsigned int front_offset;
25022 diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c
25023 --- linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c 2011-05-19 00:06:34.000000000 -0400
25024 +++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_irq.c 2011-08-05 19:44:36.000000000 -0400
25025 @@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
25026 if (crtc != 0)
25027 return 0;
25028
25029 - return atomic_read(&dev_priv->vbl_received);
25030 + return atomic_read_unchecked(&dev_priv->vbl_received);
25031 }
25032
25033 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
25034 @@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
25035 /* VBLANK interrupt */
25036 if (status & R128_CRTC_VBLANK_INT) {
25037 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
25038 - atomic_inc(&dev_priv->vbl_received);
25039 + atomic_inc_unchecked(&dev_priv->vbl_received);
25040 drm_handle_vblank(dev, 0);
25041 return IRQ_HANDLED;
25042 }
25043 diff -urNp linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c
25044 --- linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c 2011-05-19 00:06:34.000000000 -0400
25045 +++ linux-2.6.39.4/drivers/gpu/drm/r128/r128_state.c 2011-08-05 19:44:36.000000000 -0400
25046 @@ -321,10 +321,10 @@ static void r128_clear_box(drm_r128_priv
25047
25048 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
25049 {
25050 - if (atomic_read(&dev_priv->idle_count) == 0)
25051 + if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
25052 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
25053 else
25054 - atomic_set(&dev_priv->idle_count, 0);
25055 + atomic_set_unchecked(&dev_priv->idle_count, 0);
25056 }
25057
25058 #endif
25059 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c
25060 --- linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c 2011-05-19 00:06:34.000000000 -0400
25061 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/atom.c 2011-08-05 19:44:36.000000000 -0400
25062 @@ -1245,6 +1245,8 @@ struct atom_context *atom_parse(struct c
25063 char name[512];
25064 int i;
25065
25066 + pax_track_stack();
25067 +
25068 ctx->card = card;
25069 ctx->bios = bios;
25070
25071 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c
25072 --- linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c 2011-05-19 00:06:34.000000000 -0400
25073 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/mkregtable.c 2011-08-05 19:44:36.000000000 -0400
25074 @@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
25075 regex_t mask_rex;
25076 regmatch_t match[4];
25077 char buf[1024];
25078 - size_t end;
25079 + long end;
25080 int len;
25081 int done = 0;
25082 int r;
25083 unsigned o;
25084 struct offset *offset;
25085 char last_reg_s[10];
25086 - int last_reg;
25087 + unsigned long last_reg;
25088
25089 if (regcomp
25090 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
25091 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c
25092 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-19 00:06:34.000000000 -0400
25093 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_atombios.c 2011-08-05 19:44:36.000000000 -0400
25094 @@ -545,6 +545,8 @@ bool radeon_get_atom_connector_info_from
25095 struct radeon_gpio_rec gpio;
25096 struct radeon_hpd hpd;
25097
25098 + pax_track_stack();
25099 +
25100 if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
25101 return false;
25102
25103 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c
25104 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c 2011-06-25 12:55:22.000000000 -0400
25105 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_device.c 2011-08-05 19:44:36.000000000 -0400
25106 @@ -674,7 +674,7 @@ static bool radeon_switcheroo_can_switch
25107 bool can_switch;
25108
25109 spin_lock(&dev->count_lock);
25110 - can_switch = (dev->open_count == 0);
25111 + can_switch = (local_read(&dev->open_count) == 0);
25112 spin_unlock(&dev->count_lock);
25113 return can_switch;
25114 }
25115 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c
25116 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c 2011-08-05 21:11:51.000000000 -0400
25117 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_display.c 2011-08-05 21:12:20.000000000 -0400
25118 @@ -937,6 +937,8 @@ void radeon_compute_pll_legacy(struct ra
25119 uint32_t post_div;
25120 u32 pll_out_min, pll_out_max;
25121
25122 + pax_track_stack();
25123 +
25124 DRM_DEBUG_KMS("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
25125 freq = freq * 1000;
25126
25127 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h
25128 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-19 00:06:34.000000000 -0400
25129 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_drv.h 2011-08-05 19:44:36.000000000 -0400
25130 @@ -255,7 +255,7 @@ typedef struct drm_radeon_private {
25131
25132 /* SW interrupt */
25133 wait_queue_head_t swi_queue;
25134 - atomic_t swi_emitted;
25135 + atomic_unchecked_t swi_emitted;
25136 int vblank_crtc;
25137 uint32_t irq_enable_reg;
25138 uint32_t r500_disp_irq_reg;
25139 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c
25140 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-19 00:06:34.000000000 -0400
25141 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_fence.c 2011-08-05 19:44:36.000000000 -0400
25142 @@ -49,7 +49,7 @@ int radeon_fence_emit(struct radeon_devi
25143 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
25144 return 0;
25145 }
25146 - fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
25147 + fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
25148 if (!rdev->cp.ready) {
25149 /* FIXME: cp is not running assume everythings is done right
25150 * away
25151 @@ -352,7 +352,7 @@ int radeon_fence_driver_init(struct rade
25152 return r;
25153 }
25154 WREG32(rdev->fence_drv.scratch_reg, 0);
25155 - atomic_set(&rdev->fence_drv.seq, 0);
25156 + atomic_set_unchecked(&rdev->fence_drv.seq, 0);
25157 INIT_LIST_HEAD(&rdev->fence_drv.created);
25158 INIT_LIST_HEAD(&rdev->fence_drv.emited);
25159 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
25160 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h
25161 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h 2011-05-19 00:06:34.000000000 -0400
25162 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon.h 2011-08-05 20:34:06.000000000 -0400
25163 @@ -189,7 +189,7 @@ extern int sumo_get_temp(struct radeon_d
25164 */
25165 struct radeon_fence_driver {
25166 uint32_t scratch_reg;
25167 - atomic_t seq;
25168 + atomic_unchecked_t seq;
25169 uint32_t last_seq;
25170 unsigned long last_jiffies;
25171 unsigned long last_timeout;
25172 @@ -958,7 +958,7 @@ struct radeon_asic {
25173 void (*pre_page_flip)(struct radeon_device *rdev, int crtc);
25174 u32 (*page_flip)(struct radeon_device *rdev, int crtc, u64 crtc_base);
25175 void (*post_page_flip)(struct radeon_device *rdev, int crtc);
25176 -};
25177 +} __no_const;
25178
25179 /*
25180 * Asic structures
25181 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c
25182 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-05-19 00:06:34.000000000 -0400
25183 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-08-05 19:44:36.000000000 -0400
25184 @@ -359,7 +359,7 @@ static int compat_radeon_cp_setparam(str
25185 request = compat_alloc_user_space(sizeof(*request));
25186 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
25187 || __put_user(req32.param, &request->param)
25188 - || __put_user((void __user *)(unsigned long)req32.value,
25189 + || __put_user((unsigned long)req32.value,
25190 &request->value))
25191 return -EFAULT;
25192
25193 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c
25194 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-19 00:06:34.000000000 -0400
25195 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_irq.c 2011-08-05 19:44:36.000000000 -0400
25196 @@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
25197 unsigned int ret;
25198 RING_LOCALS;
25199
25200 - atomic_inc(&dev_priv->swi_emitted);
25201 - ret = atomic_read(&dev_priv->swi_emitted);
25202 + atomic_inc_unchecked(&dev_priv->swi_emitted);
25203 + ret = atomic_read_unchecked(&dev_priv->swi_emitted);
25204
25205 BEGIN_RING(4);
25206 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
25207 @@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
25208 drm_radeon_private_t *dev_priv =
25209 (drm_radeon_private_t *) dev->dev_private;
25210
25211 - atomic_set(&dev_priv->swi_emitted, 0);
25212 + atomic_set_unchecked(&dev_priv->swi_emitted, 0);
25213 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
25214
25215 dev->max_vblank_count = 0x001fffff;
25216 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c
25217 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c 2011-05-19 00:06:34.000000000 -0400
25218 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_state.c 2011-08-05 19:44:36.000000000 -0400
25219 @@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de
25220 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
25221 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
25222
25223 - if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25224 + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
25225 sarea_priv->nbox * sizeof(depth_boxes[0])))
25226 return -EFAULT;
25227
25228 @@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm
25229 {
25230 drm_radeon_private_t *dev_priv = dev->dev_private;
25231 drm_radeon_getparam_t *param = data;
25232 - int value;
25233 + int value = 0;
25234
25235 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
25236
25237 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c
25238 --- linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-05-19 00:06:34.000000000 -0400
25239 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/radeon_ttm.c 2011-08-05 20:34:06.000000000 -0400
25240 @@ -644,8 +644,10 @@ int radeon_mmap(struct file *filp, struc
25241 }
25242 if (unlikely(ttm_vm_ops == NULL)) {
25243 ttm_vm_ops = vma->vm_ops;
25244 - radeon_ttm_vm_ops = *ttm_vm_ops;
25245 - radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25246 + pax_open_kernel();
25247 + memcpy((void *)&radeon_ttm_vm_ops, ttm_vm_ops, sizeof(radeon_ttm_vm_ops));
25248 + *(void **)&radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
25249 + pax_close_kernel();
25250 }
25251 vma->vm_ops = &radeon_ttm_vm_ops;
25252 return 0;
25253 diff -urNp linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c
25254 --- linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c 2011-05-19 00:06:34.000000000 -0400
25255 +++ linux-2.6.39.4/drivers/gpu/drm/radeon/rs690.c 2011-08-05 19:44:36.000000000 -0400
25256 @@ -304,9 +304,11 @@ void rs690_crtc_bandwidth_compute(struct
25257 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
25258 rdev->pm.sideport_bandwidth.full)
25259 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
25260 - read_delay_latency.full = dfixed_const(370 * 800 * 1000);
25261 + read_delay_latency.full = dfixed_const(800 * 1000);
25262 read_delay_latency.full = dfixed_div(read_delay_latency,
25263 rdev->pm.igp_sideport_mclk);
25264 + a.full = dfixed_const(370);
25265 + read_delay_latency.full = dfixed_mul(read_delay_latency, a);
25266 } else {
25267 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
25268 rdev->pm.k8_bandwidth.full)
25269 diff -urNp linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c
25270 --- linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-05-19 00:06:34.000000000 -0400
25271 +++ linux-2.6.39.4/drivers/gpu/drm/ttm/ttm_page_alloc.c 2011-08-05 19:44:36.000000000 -0400
25272 @@ -397,9 +397,9 @@ static int ttm_pool_get_num_unused_pages
25273 */
25274 static int ttm_pool_mm_shrink(struct shrinker *shrink, int shrink_pages, gfp_t gfp_mask)
25275 {
25276 - static atomic_t start_pool = ATOMIC_INIT(0);
25277 + static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
25278 unsigned i;
25279 - unsigned pool_offset = atomic_add_return(1, &start_pool);
25280 + unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
25281 struct ttm_page_pool *pool;
25282
25283 pool_offset = pool_offset % NUM_POOLS;
25284 diff -urNp linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h
25285 --- linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h 2011-05-19 00:06:34.000000000 -0400
25286 +++ linux-2.6.39.4/drivers/gpu/drm/via/via_drv.h 2011-08-05 19:44:36.000000000 -0400
25287 @@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
25288 typedef uint32_t maskarray_t[5];
25289
25290 typedef struct drm_via_irq {
25291 - atomic_t irq_received;
25292 + atomic_unchecked_t irq_received;
25293 uint32_t pending_mask;
25294 uint32_t enable_mask;
25295 wait_queue_head_t irq_queue;
25296 @@ -75,7 +75,7 @@ typedef struct drm_via_private {
25297 struct timeval last_vblank;
25298 int last_vblank_valid;
25299 unsigned usec_per_vblank;
25300 - atomic_t vbl_received;
25301 + atomic_unchecked_t vbl_received;
25302 drm_via_state_t hc_state;
25303 char pci_buf[VIA_PCI_BUF_SIZE];
25304 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
25305 diff -urNp linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c
25306 --- linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c 2011-05-19 00:06:34.000000000 -0400
25307 +++ linux-2.6.39.4/drivers/gpu/drm/via/via_irq.c 2011-08-05 19:44:36.000000000 -0400
25308 @@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
25309 if (crtc != 0)
25310 return 0;
25311
25312 - return atomic_read(&dev_priv->vbl_received);
25313 + return atomic_read_unchecked(&dev_priv->vbl_received);
25314 }
25315
25316 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
25317 @@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
25318
25319 status = VIA_READ(VIA_REG_INTERRUPT);
25320 if (status & VIA_IRQ_VBLANK_PENDING) {
25321 - atomic_inc(&dev_priv->vbl_received);
25322 - if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
25323 + atomic_inc_unchecked(&dev_priv->vbl_received);
25324 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
25325 do_gettimeofday(&cur_vblank);
25326 if (dev_priv->last_vblank_valid) {
25327 dev_priv->usec_per_vblank =
25328 @@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25329 dev_priv->last_vblank = cur_vblank;
25330 dev_priv->last_vblank_valid = 1;
25331 }
25332 - if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
25333 + if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
25334 DRM_DEBUG("US per vblank is: %u\n",
25335 dev_priv->usec_per_vblank);
25336 }
25337 @@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
25338
25339 for (i = 0; i < dev_priv->num_irqs; ++i) {
25340 if (status & cur_irq->pending_mask) {
25341 - atomic_inc(&cur_irq->irq_received);
25342 + atomic_inc_unchecked(&cur_irq->irq_received);
25343 DRM_WAKEUP(&cur_irq->irq_queue);
25344 handled = 1;
25345 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
25346 @@ -243,11 +243,11 @@ via_driver_irq_wait(struct drm_device *d
25347 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25348 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
25349 masks[irq][4]));
25350 - cur_irq_sequence = atomic_read(&cur_irq->irq_received);
25351 + cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
25352 } else {
25353 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
25354 (((cur_irq_sequence =
25355 - atomic_read(&cur_irq->irq_received)) -
25356 + atomic_read_unchecked(&cur_irq->irq_received)) -
25357 *sequence) <= (1 << 23)));
25358 }
25359 *sequence = cur_irq_sequence;
25360 @@ -285,7 +285,7 @@ void via_driver_irq_preinstall(struct dr
25361 }
25362
25363 for (i = 0; i < dev_priv->num_irqs; ++i) {
25364 - atomic_set(&cur_irq->irq_received, 0);
25365 + atomic_set_unchecked(&cur_irq->irq_received, 0);
25366 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
25367 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
25368 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
25369 @@ -367,7 +367,7 @@ int via_wait_irq(struct drm_device *dev,
25370 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
25371 case VIA_IRQ_RELATIVE:
25372 irqwait->request.sequence +=
25373 - atomic_read(&cur_irq->irq_received);
25374 + atomic_read_unchecked(&cur_irq->irq_received);
25375 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
25376 case VIA_IRQ_ABSOLUTE:
25377 break;
25378 diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
25379 --- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-05-19 00:06:34.000000000 -0400
25380 +++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h 2011-08-05 19:44:36.000000000 -0400
25381 @@ -240,7 +240,7 @@ struct vmw_private {
25382 * Fencing and IRQs.
25383 */
25384
25385 - atomic_t fence_seq;
25386 + atomic_unchecked_t fence_seq;
25387 wait_queue_head_t fence_queue;
25388 wait_queue_head_t fifo_queue;
25389 atomic_t fence_queue_waiters;
25390 diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
25391 --- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-05-19 00:06:34.000000000 -0400
25392 +++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c 2011-08-05 19:44:36.000000000 -0400
25393 @@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev
25394 while (!vmw_lag_lt(queue, us)) {
25395 spin_lock(&queue->lock);
25396 if (list_empty(&queue->head))
25397 - sequence = atomic_read(&dev_priv->fence_seq);
25398 + sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25399 else {
25400 fence = list_first_entry(&queue->head,
25401 struct vmw_fence, head);
25402 diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
25403 --- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-05-19 00:06:34.000000000 -0400
25404 +++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c 2011-08-05 20:34:06.000000000 -0400
25405 @@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *de
25406 (unsigned int) min,
25407 (unsigned int) fifo->capabilities);
25408
25409 - atomic_set(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25410 + atomic_set_unchecked(&dev_priv->fence_seq, dev_priv->last_read_sequence);
25411 iowrite32(dev_priv->last_read_sequence, fifo_mem + SVGA_FIFO_FENCE);
25412 vmw_fence_queue_init(&fifo->fence_queue);
25413 return vmw_fifo_send_fence(dev_priv, &dummy);
25414 @@ -476,7 +476,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25415
25416 fm = vmw_fifo_reserve(dev_priv, bytes);
25417 if (unlikely(fm == NULL)) {
25418 - *sequence = atomic_read(&dev_priv->fence_seq);
25419 + *sequence = atomic_read_unchecked(&dev_priv->fence_seq);
25420 ret = -ENOMEM;
25421 (void)vmw_fallback_wait(dev_priv, false, true, *sequence,
25422 false, 3*HZ);
25423 @@ -484,7 +484,7 @@ int vmw_fifo_send_fence(struct vmw_priva
25424 }
25425
25426 do {
25427 - *sequence = atomic_add_return(1, &dev_priv->fence_seq);
25428 + *sequence = atomic_add_return_unchecked(1, &dev_priv->fence_seq);
25429 } while (*sequence == 0);
25430
25431 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
25432 diff -urNp linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
25433 --- linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-05-19 00:06:34.000000000 -0400
25434 +++ linux-2.6.39.4/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c 2011-08-05 19:44:36.000000000 -0400
25435 @@ -100,7 +100,7 @@ bool vmw_fence_signaled(struct vmw_priva
25436 * emitted. Then the fence is stale and signaled.
25437 */
25438
25439 - ret = ((atomic_read(&dev_priv->fence_seq) - sequence)
25440 + ret = ((atomic_read_unchecked(&dev_priv->fence_seq) - sequence)
25441 > VMW_FENCE_WRAP);
25442
25443 return ret;
25444 @@ -131,7 +131,7 @@ int vmw_fallback_wait(struct vmw_private
25445
25446 if (fifo_idle)
25447 down_read(&fifo_state->rwsem);
25448 - signal_seq = atomic_read(&dev_priv->fence_seq);
25449 + signal_seq = atomic_read_unchecked(&dev_priv->fence_seq);
25450 ret = 0;
25451
25452 for (;;) {
25453 diff -urNp linux-2.6.39.4/drivers/hid/hid-core.c linux-2.6.39.4/drivers/hid/hid-core.c
25454 --- linux-2.6.39.4/drivers/hid/hid-core.c 2011-05-19 00:06:34.000000000 -0400
25455 +++ linux-2.6.39.4/drivers/hid/hid-core.c 2011-08-05 19:44:36.000000000 -0400
25456 @@ -1888,7 +1888,7 @@ static bool hid_ignore(struct hid_device
25457
25458 int hid_add_device(struct hid_device *hdev)
25459 {
25460 - static atomic_t id = ATOMIC_INIT(0);
25461 + static atomic_unchecked_t id = ATOMIC_INIT(0);
25462 int ret;
25463
25464 if (WARN_ON(hdev->status & HID_STAT_ADDED))
25465 @@ -1903,7 +1903,7 @@ int hid_add_device(struct hid_device *hd
25466 /* XXX hack, any other cleaner solution after the driver core
25467 * is converted to allow more than 20 bytes as the device name? */
25468 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
25469 - hdev->vendor, hdev->product, atomic_inc_return(&id));
25470 + hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
25471
25472 hid_debug_register(hdev, dev_name(&hdev->dev));
25473 ret = device_add(&hdev->dev);
25474 diff -urNp linux-2.6.39.4/drivers/hid/usbhid/hiddev.c linux-2.6.39.4/drivers/hid/usbhid/hiddev.c
25475 --- linux-2.6.39.4/drivers/hid/usbhid/hiddev.c 2011-05-19 00:06:34.000000000 -0400
25476 +++ linux-2.6.39.4/drivers/hid/usbhid/hiddev.c 2011-08-05 19:44:36.000000000 -0400
25477 @@ -613,7 +613,7 @@ static long hiddev_ioctl(struct file *fi
25478 break;
25479
25480 case HIDIOCAPPLICATION:
25481 - if (arg < 0 || arg >= hid->maxapplication)
25482 + if (arg >= hid->maxapplication)
25483 break;
25484
25485 for (i = 0; i < hid->maxcollection; i++)
25486 diff -urNp linux-2.6.39.4/drivers/hwmon/sht15.c linux-2.6.39.4/drivers/hwmon/sht15.c
25487 --- linux-2.6.39.4/drivers/hwmon/sht15.c 2011-05-19 00:06:34.000000000 -0400
25488 +++ linux-2.6.39.4/drivers/hwmon/sht15.c 2011-08-05 19:44:36.000000000 -0400
25489 @@ -113,7 +113,7 @@ struct sht15_data {
25490 int supply_uV;
25491 int supply_uV_valid;
25492 struct work_struct update_supply_work;
25493 - atomic_t interrupt_handled;
25494 + atomic_unchecked_t interrupt_handled;
25495 };
25496
25497 /**
25498 @@ -246,13 +246,13 @@ static inline int sht15_update_single_va
25499 return ret;
25500
25501 gpio_direction_input(data->pdata->gpio_data);
25502 - atomic_set(&data->interrupt_handled, 0);
25503 + atomic_set_unchecked(&data->interrupt_handled, 0);
25504
25505 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25506 if (gpio_get_value(data->pdata->gpio_data) == 0) {
25507 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
25508 /* Only relevant if the interrupt hasn't occurred. */
25509 - if (!atomic_read(&data->interrupt_handled))
25510 + if (!atomic_read_unchecked(&data->interrupt_handled))
25511 schedule_work(&data->read_work);
25512 }
25513 ret = wait_event_timeout(data->wait_queue,
25514 @@ -399,7 +399,7 @@ static irqreturn_t sht15_interrupt_fired
25515 struct sht15_data *data = d;
25516 /* First disable the interrupt */
25517 disable_irq_nosync(irq);
25518 - atomic_inc(&data->interrupt_handled);
25519 + atomic_inc_unchecked(&data->interrupt_handled);
25520 /* Then schedule a reading work struct */
25521 if (data->flag != SHT15_READING_NOTHING)
25522 schedule_work(&data->read_work);
25523 @@ -450,11 +450,11 @@ static void sht15_bh_read_data(struct wo
25524 here as could have gone low in meantime so verify
25525 it hasn't!
25526 */
25527 - atomic_set(&data->interrupt_handled, 0);
25528 + atomic_set_unchecked(&data->interrupt_handled, 0);
25529 enable_irq(gpio_to_irq(data->pdata->gpio_data));
25530 /* If still not occurred or another handler has been scheduled */
25531 if (gpio_get_value(data->pdata->gpio_data)
25532 - || atomic_read(&data->interrupt_handled))
25533 + || atomic_read_unchecked(&data->interrupt_handled))
25534 return;
25535 }
25536 /* Read the data back from the device */
25537 diff -urNp linux-2.6.39.4/drivers/hwmon/w83791d.c linux-2.6.39.4/drivers/hwmon/w83791d.c
25538 --- linux-2.6.39.4/drivers/hwmon/w83791d.c 2011-05-19 00:06:34.000000000 -0400
25539 +++ linux-2.6.39.4/drivers/hwmon/w83791d.c 2011-08-05 19:44:36.000000000 -0400
25540 @@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli
25541 struct i2c_board_info *info);
25542 static int w83791d_remove(struct i2c_client *client);
25543
25544 -static int w83791d_read(struct i2c_client *client, u8 register);
25545 -static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
25546 +static int w83791d_read(struct i2c_client *client, u8 reg);
25547 +static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
25548 static struct w83791d_data *w83791d_update_device(struct device *dev);
25549
25550 #ifdef DEBUG
25551 diff -urNp linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c
25552 --- linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-05-19 00:06:34.000000000 -0400
25553 +++ linux-2.6.39.4/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-05 20:34:06.000000000 -0400
25554 @@ -43,7 +43,7 @@
25555 extern struct i2c_adapter amd756_smbus;
25556
25557 static struct i2c_adapter *s4882_adapter;
25558 -static struct i2c_algorithm *s4882_algo;
25559 +static i2c_algorithm_no_const *s4882_algo;
25560
25561 /* Wrapper access functions for multiplexed SMBus */
25562 static DEFINE_MUTEX(amd756_lock);
25563 diff -urNp linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c
25564 --- linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-05-19 00:06:34.000000000 -0400
25565 +++ linux-2.6.39.4/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-05 20:34:06.000000000 -0400
25566 @@ -41,7 +41,7 @@
25567 extern struct i2c_adapter *nforce2_smbus;
25568
25569 static struct i2c_adapter *s4985_adapter;
25570 -static struct i2c_algorithm *s4985_algo;
25571 +static i2c_algorithm_no_const *s4985_algo;
25572
25573 /* Wrapper access functions for multiplexed SMBus */
25574 static DEFINE_MUTEX(nforce2_lock);
25575 diff -urNp linux-2.6.39.4/drivers/i2c/i2c-mux.c linux-2.6.39.4/drivers/i2c/i2c-mux.c
25576 --- linux-2.6.39.4/drivers/i2c/i2c-mux.c 2011-05-19 00:06:34.000000000 -0400
25577 +++ linux-2.6.39.4/drivers/i2c/i2c-mux.c 2011-08-05 20:34:06.000000000 -0400
25578 @@ -28,7 +28,7 @@
25579 /* multiplexer per channel data */
25580 struct i2c_mux_priv {
25581 struct i2c_adapter adap;
25582 - struct i2c_algorithm algo;
25583 + i2c_algorithm_no_const algo;
25584
25585 struct i2c_adapter *parent;
25586 void *mux_dev; /* the mux chip/device */
25587 diff -urNp linux-2.6.39.4/drivers/ide/ide-cd.c linux-2.6.39.4/drivers/ide/ide-cd.c
25588 --- linux-2.6.39.4/drivers/ide/ide-cd.c 2011-06-03 00:04:14.000000000 -0400
25589 +++ linux-2.6.39.4/drivers/ide/ide-cd.c 2011-08-05 19:44:36.000000000 -0400
25590 @@ -769,7 +769,7 @@ static void cdrom_do_block_pc(ide_drive_
25591 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
25592 if ((unsigned long)buf & alignment
25593 || blk_rq_bytes(rq) & q->dma_pad_mask
25594 - || object_is_on_stack(buf))
25595 + || object_starts_on_stack(buf))
25596 drive->dma = 0;
25597 }
25598 }
25599 diff -urNp linux-2.6.39.4/drivers/ide/ide-floppy.c linux-2.6.39.4/drivers/ide/ide-floppy.c
25600 --- linux-2.6.39.4/drivers/ide/ide-floppy.c 2011-05-19 00:06:34.000000000 -0400
25601 +++ linux-2.6.39.4/drivers/ide/ide-floppy.c 2011-08-05 19:44:36.000000000 -0400
25602 @@ -379,6 +379,8 @@ static int ide_floppy_get_capacity(ide_d
25603 u8 pc_buf[256], header_len, desc_cnt;
25604 int i, rc = 1, blocks, length;
25605
25606 + pax_track_stack();
25607 +
25608 ide_debug_log(IDE_DBG_FUNC, "enter");
25609
25610 drive->bios_cyl = 0;
25611 diff -urNp linux-2.6.39.4/drivers/ide/setup-pci.c linux-2.6.39.4/drivers/ide/setup-pci.c
25612 --- linux-2.6.39.4/drivers/ide/setup-pci.c 2011-05-19 00:06:34.000000000 -0400
25613 +++ linux-2.6.39.4/drivers/ide/setup-pci.c 2011-08-05 19:44:36.000000000 -0400
25614 @@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
25615 int ret, i, n_ports = dev2 ? 4 : 2;
25616 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
25617
25618 + pax_track_stack();
25619 +
25620 for (i = 0; i < n_ports / 2; i++) {
25621 ret = ide_setup_pci_controller(pdev[i], d, !i);
25622 if (ret < 0)
25623 diff -urNp linux-2.6.39.4/drivers/infiniband/core/cm.c linux-2.6.39.4/drivers/infiniband/core/cm.c
25624 --- linux-2.6.39.4/drivers/infiniband/core/cm.c 2011-05-19 00:06:34.000000000 -0400
25625 +++ linux-2.6.39.4/drivers/infiniband/core/cm.c 2011-08-05 19:44:36.000000000 -0400
25626 @@ -113,7 +113,7 @@ static char const counter_group_names[CM
25627
25628 struct cm_counter_group {
25629 struct kobject obj;
25630 - atomic_long_t counter[CM_ATTR_COUNT];
25631 + atomic_long_unchecked_t counter[CM_ATTR_COUNT];
25632 };
25633
25634 struct cm_counter_attribute {
25635 @@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm
25636 struct ib_mad_send_buf *msg = NULL;
25637 int ret;
25638
25639 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25640 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25641 counter[CM_REQ_COUNTER]);
25642
25643 /* Quick state check to discard duplicate REQs. */
25644 @@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm
25645 if (!cm_id_priv)
25646 return;
25647
25648 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25649 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25650 counter[CM_REP_COUNTER]);
25651 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
25652 if (ret)
25653 @@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work
25654 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
25655 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
25656 spin_unlock_irq(&cm_id_priv->lock);
25657 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25658 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25659 counter[CM_RTU_COUNTER]);
25660 goto out;
25661 }
25662 @@ -2115,7 +2115,7 @@ static int cm_dreq_handler(struct cm_wor
25663 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
25664 dreq_msg->local_comm_id);
25665 if (!cm_id_priv) {
25666 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25667 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25668 counter[CM_DREQ_COUNTER]);
25669 cm_issue_drep(work->port, work->mad_recv_wc);
25670 return -EINVAL;
25671 @@ -2140,7 +2140,7 @@ static int cm_dreq_handler(struct cm_wor
25672 case IB_CM_MRA_REP_RCVD:
25673 break;
25674 case IB_CM_TIMEWAIT:
25675 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25676 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25677 counter[CM_DREQ_COUNTER]);
25678 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25679 goto unlock;
25680 @@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_wor
25681 cm_free_msg(msg);
25682 goto deref;
25683 case IB_CM_DREQ_RCVD:
25684 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25685 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25686 counter[CM_DREQ_COUNTER]);
25687 goto unlock;
25688 default:
25689 @@ -2521,7 +2521,7 @@ static int cm_mra_handler(struct cm_work
25690 ib_modify_mad(cm_id_priv->av.port->mad_agent,
25691 cm_id_priv->msg, timeout)) {
25692 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
25693 - atomic_long_inc(&work->port->
25694 + atomic_long_inc_unchecked(&work->port->
25695 counter_group[CM_RECV_DUPLICATES].
25696 counter[CM_MRA_COUNTER]);
25697 goto out;
25698 @@ -2530,7 +2530,7 @@ static int cm_mra_handler(struct cm_work
25699 break;
25700 case IB_CM_MRA_REQ_RCVD:
25701 case IB_CM_MRA_REP_RCVD:
25702 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25703 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25704 counter[CM_MRA_COUNTER]);
25705 /* fall through */
25706 default:
25707 @@ -2692,7 +2692,7 @@ static int cm_lap_handler(struct cm_work
25708 case IB_CM_LAP_IDLE:
25709 break;
25710 case IB_CM_MRA_LAP_SENT:
25711 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25712 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25713 counter[CM_LAP_COUNTER]);
25714 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
25715 goto unlock;
25716 @@ -2708,7 +2708,7 @@ static int cm_lap_handler(struct cm_work
25717 cm_free_msg(msg);
25718 goto deref;
25719 case IB_CM_LAP_RCVD:
25720 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25721 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25722 counter[CM_LAP_COUNTER]);
25723 goto unlock;
25724 default:
25725 @@ -2992,7 +2992,7 @@ static int cm_sidr_req_handler(struct cm
25726 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
25727 if (cur_cm_id_priv) {
25728 spin_unlock_irq(&cm.lock);
25729 - atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
25730 + atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
25731 counter[CM_SIDR_REQ_COUNTER]);
25732 goto out; /* Duplicate message. */
25733 }
25734 @@ -3204,10 +3204,10 @@ static void cm_send_handler(struct ib_ma
25735 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
25736 msg->retries = 1;
25737
25738 - atomic_long_add(1 + msg->retries,
25739 + atomic_long_add_unchecked(1 + msg->retries,
25740 &port->counter_group[CM_XMIT].counter[attr_index]);
25741 if (msg->retries)
25742 - atomic_long_add(msg->retries,
25743 + atomic_long_add_unchecked(msg->retries,
25744 &port->counter_group[CM_XMIT_RETRIES].
25745 counter[attr_index]);
25746
25747 @@ -3417,7 +3417,7 @@ static void cm_recv_handler(struct ib_ma
25748 }
25749
25750 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
25751 - atomic_long_inc(&port->counter_group[CM_RECV].
25752 + atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
25753 counter[attr_id - CM_ATTR_ID_OFFSET]);
25754
25755 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
25756 @@ -3615,7 +3615,7 @@ static ssize_t cm_show_counter(struct ko
25757 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
25758
25759 return sprintf(buf, "%ld\n",
25760 - atomic_long_read(&group->counter[cm_attr->index]));
25761 + atomic_long_read_unchecked(&group->counter[cm_attr->index]));
25762 }
25763
25764 static const struct sysfs_ops cm_counter_ops = {
25765 diff -urNp linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c
25766 --- linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c 2011-05-19 00:06:34.000000000 -0400
25767 +++ linux-2.6.39.4/drivers/infiniband/core/fmr_pool.c 2011-08-05 19:44:36.000000000 -0400
25768 @@ -97,8 +97,8 @@ struct ib_fmr_pool {
25769
25770 struct task_struct *thread;
25771
25772 - atomic_t req_ser;
25773 - atomic_t flush_ser;
25774 + atomic_unchecked_t req_ser;
25775 + atomic_unchecked_t flush_ser;
25776
25777 wait_queue_head_t force_wait;
25778 };
25779 @@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
25780 struct ib_fmr_pool *pool = pool_ptr;
25781
25782 do {
25783 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
25784 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
25785 ib_fmr_batch_release(pool);
25786
25787 - atomic_inc(&pool->flush_ser);
25788 + atomic_inc_unchecked(&pool->flush_ser);
25789 wake_up_interruptible(&pool->force_wait);
25790
25791 if (pool->flush_function)
25792 @@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
25793 }
25794
25795 set_current_state(TASK_INTERRUPTIBLE);
25796 - if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
25797 + if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
25798 !kthread_should_stop())
25799 schedule();
25800 __set_current_state(TASK_RUNNING);
25801 @@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
25802 pool->dirty_watermark = params->dirty_watermark;
25803 pool->dirty_len = 0;
25804 spin_lock_init(&pool->pool_lock);
25805 - atomic_set(&pool->req_ser, 0);
25806 - atomic_set(&pool->flush_ser, 0);
25807 + atomic_set_unchecked(&pool->req_ser, 0);
25808 + atomic_set_unchecked(&pool->flush_ser, 0);
25809 init_waitqueue_head(&pool->force_wait);
25810
25811 pool->thread = kthread_run(ib_fmr_cleanup_thread,
25812 @@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
25813 }
25814 spin_unlock_irq(&pool->pool_lock);
25815
25816 - serial = atomic_inc_return(&pool->req_ser);
25817 + serial = atomic_inc_return_unchecked(&pool->req_ser);
25818 wake_up_process(pool->thread);
25819
25820 if (wait_event_interruptible(pool->force_wait,
25821 - atomic_read(&pool->flush_ser) - serial >= 0))
25822 + atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
25823 return -EINTR;
25824
25825 return 0;
25826 @@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
25827 } else {
25828 list_add_tail(&fmr->list, &pool->dirty_list);
25829 if (++pool->dirty_len >= pool->dirty_watermark) {
25830 - atomic_inc(&pool->req_ser);
25831 + atomic_inc_unchecked(&pool->req_ser);
25832 wake_up_process(pool->thread);
25833 }
25834 }
25835 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c
25836 --- linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c 2011-05-19 00:06:34.000000000 -0400
25837 +++ linux-2.6.39.4/drivers/infiniband/hw/cxgb4/mem.c 2011-08-05 19:44:36.000000000 -0400
25838 @@ -122,7 +122,7 @@ static int write_tpt_entry(struct c4iw_r
25839 int err;
25840 struct fw_ri_tpte tpt;
25841 u32 stag_idx;
25842 - static atomic_t key;
25843 + static atomic_unchecked_t key;
25844
25845 if (c4iw_fatal_error(rdev))
25846 return -EIO;
25847 @@ -135,7 +135,7 @@ static int write_tpt_entry(struct c4iw_r
25848 &rdev->resource.tpt_fifo_lock);
25849 if (!stag_idx)
25850 return -ENOMEM;
25851 - *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
25852 + *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
25853 }
25854 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
25855 __func__, stag_state, type, pdid, stag_idx);
25856 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c
25857 --- linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-19 00:06:34.000000000 -0400
25858 +++ linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_fs.c 2011-08-05 19:44:36.000000000 -0400
25859 @@ -113,6 +113,8 @@ static ssize_t atomic_counters_read(stru
25860 struct infinipath_counters counters;
25861 struct ipath_devdata *dd;
25862
25863 + pax_track_stack();
25864 +
25865 dd = file->f_path.dentry->d_inode->i_private;
25866 dd->ipath_f_read_counters(dd, &counters);
25867
25868 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c
25869 --- linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-05-19 00:06:34.000000000 -0400
25870 +++ linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_rc.c 2011-08-05 19:44:36.000000000 -0400
25871 @@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25872 struct ib_atomic_eth *ateth;
25873 struct ipath_ack_entry *e;
25874 u64 vaddr;
25875 - atomic64_t *maddr;
25876 + atomic64_unchecked_t *maddr;
25877 u64 sdata;
25878 u32 rkey;
25879 u8 next;
25880 @@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *de
25881 IB_ACCESS_REMOTE_ATOMIC)))
25882 goto nack_acc_unlck;
25883 /* Perform atomic OP and save result. */
25884 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25885 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25886 sdata = be64_to_cpu(ateth->swap_data);
25887 e = &qp->s_ack_queue[qp->r_head_ack_queue];
25888 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
25889 - (u64) atomic64_add_return(sdata, maddr) - sdata :
25890 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25891 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25892 be64_to_cpu(ateth->compare_data),
25893 sdata);
25894 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c
25895 --- linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-05-19 00:06:34.000000000 -0400
25896 +++ linux-2.6.39.4/drivers/infiniband/hw/ipath/ipath_ruc.c 2011-08-05 19:44:36.000000000 -0400
25897 @@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ip
25898 unsigned long flags;
25899 struct ib_wc wc;
25900 u64 sdata;
25901 - atomic64_t *maddr;
25902 + atomic64_unchecked_t *maddr;
25903 enum ib_wc_status send_status;
25904
25905 /*
25906 @@ -382,11 +382,11 @@ again:
25907 IB_ACCESS_REMOTE_ATOMIC)))
25908 goto acc_err;
25909 /* Perform atomic OP and save result. */
25910 - maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
25911 + maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
25912 sdata = wqe->wr.wr.atomic.compare_add;
25913 *(u64 *) sqp->s_sge.sge.vaddr =
25914 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
25915 - (u64) atomic64_add_return(sdata, maddr) - sdata :
25916 + (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
25917 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
25918 sdata, wqe->wr.wr.atomic.swap);
25919 goto send_comp;
25920 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c
25921 --- linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c 2011-05-19 00:06:34.000000000 -0400
25922 +++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes.c 2011-08-05 19:44:36.000000000 -0400
25923 @@ -103,7 +103,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
25924 LIST_HEAD(nes_adapter_list);
25925 static LIST_HEAD(nes_dev_list);
25926
25927 -atomic_t qps_destroyed;
25928 +atomic_unchecked_t qps_destroyed;
25929
25930 static unsigned int ee_flsh_adapter;
25931 static unsigned int sysfs_nonidx_addr;
25932 @@ -275,7 +275,7 @@ static void nes_cqp_rem_ref_callback(str
25933 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
25934 struct nes_adapter *nesadapter = nesdev->nesadapter;
25935
25936 - atomic_inc(&qps_destroyed);
25937 + atomic_inc_unchecked(&qps_destroyed);
25938
25939 /* Free the control structures */
25940
25941 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c
25942 --- linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c 2011-05-19 00:06:34.000000000 -0400
25943 +++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes_cm.c 2011-08-05 19:44:36.000000000 -0400
25944 @@ -68,14 +68,14 @@ u32 cm_packets_dropped;
25945 u32 cm_packets_retrans;
25946 u32 cm_packets_created;
25947 u32 cm_packets_received;
25948 -atomic_t cm_listens_created;
25949 -atomic_t cm_listens_destroyed;
25950 +atomic_unchecked_t cm_listens_created;
25951 +atomic_unchecked_t cm_listens_destroyed;
25952 u32 cm_backlog_drops;
25953 -atomic_t cm_loopbacks;
25954 -atomic_t cm_nodes_created;
25955 -atomic_t cm_nodes_destroyed;
25956 -atomic_t cm_accel_dropped_pkts;
25957 -atomic_t cm_resets_recvd;
25958 +atomic_unchecked_t cm_loopbacks;
25959 +atomic_unchecked_t cm_nodes_created;
25960 +atomic_unchecked_t cm_nodes_destroyed;
25961 +atomic_unchecked_t cm_accel_dropped_pkts;
25962 +atomic_unchecked_t cm_resets_recvd;
25963
25964 static inline int mini_cm_accelerated(struct nes_cm_core *,
25965 struct nes_cm_node *);
25966 @@ -151,13 +151,13 @@ static struct nes_cm_ops nes_cm_api = {
25967
25968 static struct nes_cm_core *g_cm_core;
25969
25970 -atomic_t cm_connects;
25971 -atomic_t cm_accepts;
25972 -atomic_t cm_disconnects;
25973 -atomic_t cm_closes;
25974 -atomic_t cm_connecteds;
25975 -atomic_t cm_connect_reqs;
25976 -atomic_t cm_rejects;
25977 +atomic_unchecked_t cm_connects;
25978 +atomic_unchecked_t cm_accepts;
25979 +atomic_unchecked_t cm_disconnects;
25980 +atomic_unchecked_t cm_closes;
25981 +atomic_unchecked_t cm_connecteds;
25982 +atomic_unchecked_t cm_connect_reqs;
25983 +atomic_unchecked_t cm_rejects;
25984
25985
25986 /**
25987 @@ -1045,7 +1045,7 @@ static int mini_cm_dec_refcnt_listen(str
25988 kfree(listener);
25989 listener = NULL;
25990 ret = 0;
25991 - atomic_inc(&cm_listens_destroyed);
25992 + atomic_inc_unchecked(&cm_listens_destroyed);
25993 } else {
25994 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
25995 }
25996 @@ -1240,7 +1240,7 @@ static struct nes_cm_node *make_cm_node(
25997 cm_node->rem_mac);
25998
25999 add_hte_node(cm_core, cm_node);
26000 - atomic_inc(&cm_nodes_created);
26001 + atomic_inc_unchecked(&cm_nodes_created);
26002
26003 return cm_node;
26004 }
26005 @@ -1298,7 +1298,7 @@ static int rem_ref_cm_node(struct nes_cm
26006 }
26007
26008 atomic_dec(&cm_core->node_cnt);
26009 - atomic_inc(&cm_nodes_destroyed);
26010 + atomic_inc_unchecked(&cm_nodes_destroyed);
26011 nesqp = cm_node->nesqp;
26012 if (nesqp) {
26013 nesqp->cm_node = NULL;
26014 @@ -1365,7 +1365,7 @@ static int process_options(struct nes_cm
26015
26016 static void drop_packet(struct sk_buff *skb)
26017 {
26018 - atomic_inc(&cm_accel_dropped_pkts);
26019 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
26020 dev_kfree_skb_any(skb);
26021 }
26022
26023 @@ -1428,7 +1428,7 @@ static void handle_rst_pkt(struct nes_cm
26024 {
26025
26026 int reset = 0; /* whether to send reset in case of err.. */
26027 - atomic_inc(&cm_resets_recvd);
26028 + atomic_inc_unchecked(&cm_resets_recvd);
26029 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
26030 " refcnt=%d\n", cm_node, cm_node->state,
26031 atomic_read(&cm_node->ref_count));
26032 @@ -2057,7 +2057,7 @@ static struct nes_cm_node *mini_cm_conne
26033 rem_ref_cm_node(cm_node->cm_core, cm_node);
26034 return NULL;
26035 }
26036 - atomic_inc(&cm_loopbacks);
26037 + atomic_inc_unchecked(&cm_loopbacks);
26038 loopbackremotenode->loopbackpartner = cm_node;
26039 loopbackremotenode->tcp_cntxt.rcv_wscale =
26040 NES_CM_DEFAULT_RCV_WND_SCALE;
26041 @@ -2332,7 +2332,7 @@ static int mini_cm_recv_pkt(struct nes_c
26042 add_ref_cm_node(cm_node);
26043 } else if (cm_node->state == NES_CM_STATE_TSA) {
26044 rem_ref_cm_node(cm_core, cm_node);
26045 - atomic_inc(&cm_accel_dropped_pkts);
26046 + atomic_inc_unchecked(&cm_accel_dropped_pkts);
26047 dev_kfree_skb_any(skb);
26048 break;
26049 }
26050 @@ -2638,7 +2638,7 @@ static int nes_cm_disconn_true(struct ne
26051
26052 if ((cm_id) && (cm_id->event_handler)) {
26053 if (issue_disconn) {
26054 - atomic_inc(&cm_disconnects);
26055 + atomic_inc_unchecked(&cm_disconnects);
26056 cm_event.event = IW_CM_EVENT_DISCONNECT;
26057 cm_event.status = disconn_status;
26058 cm_event.local_addr = cm_id->local_addr;
26059 @@ -2660,7 +2660,7 @@ static int nes_cm_disconn_true(struct ne
26060 }
26061
26062 if (issue_close) {
26063 - atomic_inc(&cm_closes);
26064 + atomic_inc_unchecked(&cm_closes);
26065 nes_disconnect(nesqp, 1);
26066
26067 cm_id->provider_data = nesqp;
26068 @@ -2791,7 +2791,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
26069
26070 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
26071 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
26072 - atomic_inc(&cm_accepts);
26073 + atomic_inc_unchecked(&cm_accepts);
26074
26075 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
26076 netdev_refcnt_read(nesvnic->netdev));
26077 @@ -3001,7 +3001,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
26078
26079 struct nes_cm_core *cm_core;
26080
26081 - atomic_inc(&cm_rejects);
26082 + atomic_inc_unchecked(&cm_rejects);
26083 cm_node = (struct nes_cm_node *) cm_id->provider_data;
26084 loopback = cm_node->loopbackpartner;
26085 cm_core = cm_node->cm_core;
26086 @@ -3067,7 +3067,7 @@ int nes_connect(struct iw_cm_id *cm_id,
26087 ntohl(cm_id->local_addr.sin_addr.s_addr),
26088 ntohs(cm_id->local_addr.sin_port));
26089
26090 - atomic_inc(&cm_connects);
26091 + atomic_inc_unchecked(&cm_connects);
26092 nesqp->active_conn = 1;
26093
26094 /* cache the cm_id in the qp */
26095 @@ -3173,7 +3173,7 @@ int nes_create_listen(struct iw_cm_id *c
26096 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
26097 return err;
26098 }
26099 - atomic_inc(&cm_listens_created);
26100 + atomic_inc_unchecked(&cm_listens_created);
26101 }
26102
26103 cm_id->add_ref(cm_id);
26104 @@ -3278,7 +3278,7 @@ static void cm_event_connected(struct ne
26105 if (nesqp->destroyed) {
26106 return;
26107 }
26108 - atomic_inc(&cm_connecteds);
26109 + atomic_inc_unchecked(&cm_connecteds);
26110 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
26111 " local port 0x%04X. jiffies = %lu.\n",
26112 nesqp->hwqp.qp_id,
26113 @@ -3493,7 +3493,7 @@ static void cm_event_reset(struct nes_cm
26114
26115 cm_id->add_ref(cm_id);
26116 ret = cm_id->event_handler(cm_id, &cm_event);
26117 - atomic_inc(&cm_closes);
26118 + atomic_inc_unchecked(&cm_closes);
26119 cm_event.event = IW_CM_EVENT_CLOSE;
26120 cm_event.status = IW_CM_EVENT_STATUS_OK;
26121 cm_event.provider_data = cm_id->provider_data;
26122 @@ -3529,7 +3529,7 @@ static void cm_event_mpa_req(struct nes_
26123 return;
26124 cm_id = cm_node->cm_id;
26125
26126 - atomic_inc(&cm_connect_reqs);
26127 + atomic_inc_unchecked(&cm_connect_reqs);
26128 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26129 cm_node, cm_id, jiffies);
26130
26131 @@ -3567,7 +3567,7 @@ static void cm_event_mpa_reject(struct n
26132 return;
26133 cm_id = cm_node->cm_id;
26134
26135 - atomic_inc(&cm_connect_reqs);
26136 + atomic_inc_unchecked(&cm_connect_reqs);
26137 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
26138 cm_node, cm_id, jiffies);
26139
26140 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h
26141 --- linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h 2011-05-19 00:06:34.000000000 -0400
26142 +++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes.h 2011-08-05 19:44:36.000000000 -0400
26143 @@ -175,17 +175,17 @@ extern unsigned int nes_debug_level;
26144 extern unsigned int wqm_quanta;
26145 extern struct list_head nes_adapter_list;
26146
26147 -extern atomic_t cm_connects;
26148 -extern atomic_t cm_accepts;
26149 -extern atomic_t cm_disconnects;
26150 -extern atomic_t cm_closes;
26151 -extern atomic_t cm_connecteds;
26152 -extern atomic_t cm_connect_reqs;
26153 -extern atomic_t cm_rejects;
26154 -extern atomic_t mod_qp_timouts;
26155 -extern atomic_t qps_created;
26156 -extern atomic_t qps_destroyed;
26157 -extern atomic_t sw_qps_destroyed;
26158 +extern atomic_unchecked_t cm_connects;
26159 +extern atomic_unchecked_t cm_accepts;
26160 +extern atomic_unchecked_t cm_disconnects;
26161 +extern atomic_unchecked_t cm_closes;
26162 +extern atomic_unchecked_t cm_connecteds;
26163 +extern atomic_unchecked_t cm_connect_reqs;
26164 +extern atomic_unchecked_t cm_rejects;
26165 +extern atomic_unchecked_t mod_qp_timouts;
26166 +extern atomic_unchecked_t qps_created;
26167 +extern atomic_unchecked_t qps_destroyed;
26168 +extern atomic_unchecked_t sw_qps_destroyed;
26169 extern u32 mh_detected;
26170 extern u32 mh_pauses_sent;
26171 extern u32 cm_packets_sent;
26172 @@ -194,14 +194,14 @@ extern u32 cm_packets_created;
26173 extern u32 cm_packets_received;
26174 extern u32 cm_packets_dropped;
26175 extern u32 cm_packets_retrans;
26176 -extern atomic_t cm_listens_created;
26177 -extern atomic_t cm_listens_destroyed;
26178 +extern atomic_unchecked_t cm_listens_created;
26179 +extern atomic_unchecked_t cm_listens_destroyed;
26180 extern u32 cm_backlog_drops;
26181 -extern atomic_t cm_loopbacks;
26182 -extern atomic_t cm_nodes_created;
26183 -extern atomic_t cm_nodes_destroyed;
26184 -extern atomic_t cm_accel_dropped_pkts;
26185 -extern atomic_t cm_resets_recvd;
26186 +extern atomic_unchecked_t cm_loopbacks;
26187 +extern atomic_unchecked_t cm_nodes_created;
26188 +extern atomic_unchecked_t cm_nodes_destroyed;
26189 +extern atomic_unchecked_t cm_accel_dropped_pkts;
26190 +extern atomic_unchecked_t cm_resets_recvd;
26191
26192 extern u32 int_mod_timer_init;
26193 extern u32 int_mod_cq_depth_256;
26194 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c
26195 --- linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c 2011-05-19 00:06:34.000000000 -0400
26196 +++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes_nic.c 2011-08-05 19:44:36.000000000 -0400
26197 @@ -1302,31 +1302,31 @@ static void nes_netdev_get_ethtool_stats
26198 target_stat_values[++index] = mh_detected;
26199 target_stat_values[++index] = mh_pauses_sent;
26200 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
26201 - target_stat_values[++index] = atomic_read(&cm_connects);
26202 - target_stat_values[++index] = atomic_read(&cm_accepts);
26203 - target_stat_values[++index] = atomic_read(&cm_disconnects);
26204 - target_stat_values[++index] = atomic_read(&cm_connecteds);
26205 - target_stat_values[++index] = atomic_read(&cm_connect_reqs);
26206 - target_stat_values[++index] = atomic_read(&cm_rejects);
26207 - target_stat_values[++index] = atomic_read(&mod_qp_timouts);
26208 - target_stat_values[++index] = atomic_read(&qps_created);
26209 - target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
26210 - target_stat_values[++index] = atomic_read(&qps_destroyed);
26211 - target_stat_values[++index] = atomic_read(&cm_closes);
26212 + target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
26213 + target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
26214 + target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
26215 + target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
26216 + target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
26217 + target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
26218 + target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
26219 + target_stat_values[++index] = atomic_read_unchecked(&qps_created);
26220 + target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
26221 + target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
26222 + target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
26223 target_stat_values[++index] = cm_packets_sent;
26224 target_stat_values[++index] = cm_packets_bounced;
26225 target_stat_values[++index] = cm_packets_created;
26226 target_stat_values[++index] = cm_packets_received;
26227 target_stat_values[++index] = cm_packets_dropped;
26228 target_stat_values[++index] = cm_packets_retrans;
26229 - target_stat_values[++index] = atomic_read(&cm_listens_created);
26230 - target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
26231 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
26232 + target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
26233 target_stat_values[++index] = cm_backlog_drops;
26234 - target_stat_values[++index] = atomic_read(&cm_loopbacks);
26235 - target_stat_values[++index] = atomic_read(&cm_nodes_created);
26236 - target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
26237 - target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
26238 - target_stat_values[++index] = atomic_read(&cm_resets_recvd);
26239 + target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
26240 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
26241 + target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
26242 + target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
26243 + target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
26244 target_stat_values[++index] = nesadapter->free_4kpbl;
26245 target_stat_values[++index] = nesadapter->free_256pbl;
26246 target_stat_values[++index] = int_mod_timer_init;
26247 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c
26248 --- linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-19 00:06:34.000000000 -0400
26249 +++ linux-2.6.39.4/drivers/infiniband/hw/nes/nes_verbs.c 2011-08-05 19:44:36.000000000 -0400
26250 @@ -46,9 +46,9 @@
26251
26252 #include <rdma/ib_umem.h>
26253
26254 -atomic_t mod_qp_timouts;
26255 -atomic_t qps_created;
26256 -atomic_t sw_qps_destroyed;
26257 +atomic_unchecked_t mod_qp_timouts;
26258 +atomic_unchecked_t qps_created;
26259 +atomic_unchecked_t sw_qps_destroyed;
26260
26261 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
26262
26263 @@ -1141,7 +1141,7 @@ static struct ib_qp *nes_create_qp(struc
26264 if (init_attr->create_flags)
26265 return ERR_PTR(-EINVAL);
26266
26267 - atomic_inc(&qps_created);
26268 + atomic_inc_unchecked(&qps_created);
26269 switch (init_attr->qp_type) {
26270 case IB_QPT_RC:
26271 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
26272 @@ -1470,7 +1470,7 @@ static int nes_destroy_qp(struct ib_qp *
26273 struct iw_cm_event cm_event;
26274 int ret;
26275
26276 - atomic_inc(&sw_qps_destroyed);
26277 + atomic_inc_unchecked(&sw_qps_destroyed);
26278 nesqp->destroyed = 1;
26279
26280 /* Blow away the connection if it exists. */
26281 diff -urNp linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h
26282 --- linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h 2011-05-19 00:06:34.000000000 -0400
26283 +++ linux-2.6.39.4/drivers/infiniband/hw/qib/qib.h 2011-08-05 20:34:06.000000000 -0400
26284 @@ -51,6 +51,7 @@
26285 #include <linux/completion.h>
26286 #include <linux/kref.h>
26287 #include <linux/sched.h>
26288 +#include <linux/slab.h>
26289
26290 #include "qib_common.h"
26291 #include "qib_verbs.h"
26292 diff -urNp linux-2.6.39.4/drivers/input/gameport/gameport.c linux-2.6.39.4/drivers/input/gameport/gameport.c
26293 --- linux-2.6.39.4/drivers/input/gameport/gameport.c 2011-05-19 00:06:34.000000000 -0400
26294 +++ linux-2.6.39.4/drivers/input/gameport/gameport.c 2011-08-05 19:44:37.000000000 -0400
26295 @@ -488,14 +488,14 @@ EXPORT_SYMBOL(gameport_set_phys);
26296 */
26297 static void gameport_init_port(struct gameport *gameport)
26298 {
26299 - static atomic_t gameport_no = ATOMIC_INIT(0);
26300 + static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
26301
26302 __module_get(THIS_MODULE);
26303
26304 mutex_init(&gameport->drv_mutex);
26305 device_initialize(&gameport->dev);
26306 dev_set_name(&gameport->dev, "gameport%lu",
26307 - (unsigned long)atomic_inc_return(&gameport_no) - 1);
26308 + (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
26309 gameport->dev.bus = &gameport_bus;
26310 gameport->dev.release = gameport_release_port;
26311 if (gameport->parent)
26312 diff -urNp linux-2.6.39.4/drivers/input/input.c linux-2.6.39.4/drivers/input/input.c
26313 --- linux-2.6.39.4/drivers/input/input.c 2011-07-09 09:18:51.000000000 -0400
26314 +++ linux-2.6.39.4/drivers/input/input.c 2011-08-05 19:44:37.000000000 -0400
26315 @@ -1815,7 +1815,7 @@ static void input_cleanse_bitmasks(struc
26316 */
26317 int input_register_device(struct input_dev *dev)
26318 {
26319 - static atomic_t input_no = ATOMIC_INIT(0);
26320 + static atomic_unchecked_t input_no = ATOMIC_INIT(0);
26321 struct input_handler *handler;
26322 const char *path;
26323 int error;
26324 @@ -1852,7 +1852,7 @@ int input_register_device(struct input_d
26325 dev->setkeycode = input_default_setkeycode;
26326
26327 dev_set_name(&dev->dev, "input%ld",
26328 - (unsigned long) atomic_inc_return(&input_no) - 1);
26329 + (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
26330
26331 error = device_add(&dev->dev);
26332 if (error)
26333 diff -urNp linux-2.6.39.4/drivers/input/joystick/sidewinder.c linux-2.6.39.4/drivers/input/joystick/sidewinder.c
26334 --- linux-2.6.39.4/drivers/input/joystick/sidewinder.c 2011-05-19 00:06:34.000000000 -0400
26335 +++ linux-2.6.39.4/drivers/input/joystick/sidewinder.c 2011-08-05 19:44:37.000000000 -0400
26336 @@ -30,6 +30,7 @@
26337 #include <linux/kernel.h>
26338 #include <linux/module.h>
26339 #include <linux/slab.h>
26340 +#include <linux/sched.h>
26341 #include <linux/init.h>
26342 #include <linux/input.h>
26343 #include <linux/gameport.h>
26344 @@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
26345 unsigned char buf[SW_LENGTH];
26346 int i;
26347
26348 + pax_track_stack();
26349 +
26350 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
26351
26352 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
26353 diff -urNp linux-2.6.39.4/drivers/input/joystick/xpad.c linux-2.6.39.4/drivers/input/joystick/xpad.c
26354 --- linux-2.6.39.4/drivers/input/joystick/xpad.c 2011-05-19 00:06:34.000000000 -0400
26355 +++ linux-2.6.39.4/drivers/input/joystick/xpad.c 2011-08-05 19:44:37.000000000 -0400
26356 @@ -689,7 +689,7 @@ static void xpad_led_set(struct led_clas
26357
26358 static int xpad_led_probe(struct usb_xpad *xpad)
26359 {
26360 - static atomic_t led_seq = ATOMIC_INIT(0);
26361 + static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
26362 long led_no;
26363 struct xpad_led *led;
26364 struct led_classdev *led_cdev;
26365 @@ -702,7 +702,7 @@ static int xpad_led_probe(struct usb_xpa
26366 if (!led)
26367 return -ENOMEM;
26368
26369 - led_no = (long)atomic_inc_return(&led_seq) - 1;
26370 + led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
26371
26372 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
26373 led->xpad = xpad;
26374 diff -urNp linux-2.6.39.4/drivers/input/mousedev.c linux-2.6.39.4/drivers/input/mousedev.c
26375 --- linux-2.6.39.4/drivers/input/mousedev.c 2011-07-09 09:18:51.000000000 -0400
26376 +++ linux-2.6.39.4/drivers/input/mousedev.c 2011-08-05 19:44:37.000000000 -0400
26377 @@ -764,7 +764,7 @@ static ssize_t mousedev_read(struct file
26378
26379 spin_unlock_irq(&client->packet_lock);
26380
26381 - if (copy_to_user(buffer, data, count))
26382 + if (count > sizeof(data) || copy_to_user(buffer, data, count))
26383 return -EFAULT;
26384
26385 return count;
26386 diff -urNp linux-2.6.39.4/drivers/input/serio/serio.c linux-2.6.39.4/drivers/input/serio/serio.c
26387 --- linux-2.6.39.4/drivers/input/serio/serio.c 2011-05-19 00:06:34.000000000 -0400
26388 +++ linux-2.6.39.4/drivers/input/serio/serio.c 2011-08-05 19:44:37.000000000 -0400
26389 @@ -497,7 +497,7 @@ static void serio_release_port(struct de
26390 */
26391 static void serio_init_port(struct serio *serio)
26392 {
26393 - static atomic_t serio_no = ATOMIC_INIT(0);
26394 + static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
26395
26396 __module_get(THIS_MODULE);
26397
26398 @@ -508,7 +508,7 @@ static void serio_init_port(struct serio
26399 mutex_init(&serio->drv_mutex);
26400 device_initialize(&serio->dev);
26401 dev_set_name(&serio->dev, "serio%ld",
26402 - (long)atomic_inc_return(&serio_no) - 1);
26403 + (long)atomic_inc_return_unchecked(&serio_no) - 1);
26404 serio->dev.bus = &serio_bus;
26405 serio->dev.release = serio_release_port;
26406 serio->dev.groups = serio_device_attr_groups;
26407 diff -urNp linux-2.6.39.4/drivers/isdn/capi/capi.c linux-2.6.39.4/drivers/isdn/capi/capi.c
26408 --- linux-2.6.39.4/drivers/isdn/capi/capi.c 2011-05-19 00:06:34.000000000 -0400
26409 +++ linux-2.6.39.4/drivers/isdn/capi/capi.c 2011-08-05 19:44:37.000000000 -0400
26410 @@ -89,8 +89,8 @@ struct capiminor {
26411
26412 struct capi20_appl *ap;
26413 u32 ncci;
26414 - atomic_t datahandle;
26415 - atomic_t msgid;
26416 + atomic_unchecked_t datahandle;
26417 + atomic_unchecked_t msgid;
26418
26419 struct tty_port port;
26420 int ttyinstop;
26421 @@ -414,7 +414,7 @@ gen_data_b3_resp_for(struct capiminor *m
26422 capimsg_setu16(s, 2, mp->ap->applid);
26423 capimsg_setu8 (s, 4, CAPI_DATA_B3);
26424 capimsg_setu8 (s, 5, CAPI_RESP);
26425 - capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
26426 + capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
26427 capimsg_setu32(s, 8, mp->ncci);
26428 capimsg_setu16(s, 12, datahandle);
26429 }
26430 @@ -547,14 +547,14 @@ static void handle_minor_send(struct cap
26431 mp->outbytes -= len;
26432 spin_unlock_bh(&mp->outlock);
26433
26434 - datahandle = atomic_inc_return(&mp->datahandle);
26435 + datahandle = atomic_inc_return_unchecked(&mp->datahandle);
26436 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
26437 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26438 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
26439 capimsg_setu16(skb->data, 2, mp->ap->applid);
26440 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
26441 capimsg_setu8 (skb->data, 5, CAPI_REQ);
26442 - capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
26443 + capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
26444 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
26445 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
26446 capimsg_setu16(skb->data, 16, len); /* Data length */
26447 diff -urNp linux-2.6.39.4/drivers/isdn/gigaset/common.c linux-2.6.39.4/drivers/isdn/gigaset/common.c
26448 --- linux-2.6.39.4/drivers/isdn/gigaset/common.c 2011-05-19 00:06:34.000000000 -0400
26449 +++ linux-2.6.39.4/drivers/isdn/gigaset/common.c 2011-08-05 19:44:37.000000000 -0400
26450 @@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct
26451 cs->commands_pending = 0;
26452 cs->cur_at_seq = 0;
26453 cs->gotfwver = -1;
26454 - cs->open_count = 0;
26455 + local_set(&cs->open_count, 0);
26456 cs->dev = NULL;
26457 cs->tty = NULL;
26458 cs->tty_dev = NULL;
26459 diff -urNp linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h
26460 --- linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h 2011-05-19 00:06:34.000000000 -0400
26461 +++ linux-2.6.39.4/drivers/isdn/gigaset/gigaset.h 2011-08-05 19:44:37.000000000 -0400
26462 @@ -35,6 +35,7 @@
26463 #include <linux/tty_driver.h>
26464 #include <linux/list.h>
26465 #include <asm/atomic.h>
26466 +#include <asm/local.h>
26467
26468 #define GIG_VERSION {0, 5, 0, 0}
26469 #define GIG_COMPAT {0, 4, 0, 0}
26470 @@ -433,7 +434,7 @@ struct cardstate {
26471 spinlock_t cmdlock;
26472 unsigned curlen, cmdbytes;
26473
26474 - unsigned open_count;
26475 + local_t open_count;
26476 struct tty_struct *tty;
26477 struct tasklet_struct if_wake_tasklet;
26478 unsigned control_state;
26479 diff -urNp linux-2.6.39.4/drivers/isdn/gigaset/interface.c linux-2.6.39.4/drivers/isdn/gigaset/interface.c
26480 --- linux-2.6.39.4/drivers/isdn/gigaset/interface.c 2011-05-19 00:06:34.000000000 -0400
26481 +++ linux-2.6.39.4/drivers/isdn/gigaset/interface.c 2011-08-05 19:44:37.000000000 -0400
26482 @@ -160,9 +160,7 @@ static int if_open(struct tty_struct *tt
26483 return -ERESTARTSYS;
26484 tty->driver_data = cs;
26485
26486 - ++cs->open_count;
26487 -
26488 - if (cs->open_count == 1) {
26489 + if (local_inc_return(&cs->open_count) == 1) {
26490 spin_lock_irqsave(&cs->lock, flags);
26491 cs->tty = tty;
26492 spin_unlock_irqrestore(&cs->lock, flags);
26493 @@ -190,10 +188,10 @@ static void if_close(struct tty_struct *
26494
26495 if (!cs->connected)
26496 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26497 - else if (!cs->open_count)
26498 + else if (!local_read(&cs->open_count))
26499 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26500 else {
26501 - if (!--cs->open_count) {
26502 + if (!local_dec_return(&cs->open_count)) {
26503 spin_lock_irqsave(&cs->lock, flags);
26504 cs->tty = NULL;
26505 spin_unlock_irqrestore(&cs->lock, flags);
26506 @@ -228,7 +226,7 @@ static int if_ioctl(struct tty_struct *t
26507 if (!cs->connected) {
26508 gig_dbg(DEBUG_IF, "not connected");
26509 retval = -ENODEV;
26510 - } else if (!cs->open_count)
26511 + } else if (!local_read(&cs->open_count))
26512 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26513 else {
26514 retval = 0;
26515 @@ -358,7 +356,7 @@ static int if_write(struct tty_struct *t
26516 retval = -ENODEV;
26517 goto done;
26518 }
26519 - if (!cs->open_count) {
26520 + if (!local_read(&cs->open_count)) {
26521 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26522 retval = -ENODEV;
26523 goto done;
26524 @@ -411,7 +409,7 @@ static int if_write_room(struct tty_stru
26525 if (!cs->connected) {
26526 gig_dbg(DEBUG_IF, "not connected");
26527 retval = -ENODEV;
26528 - } else if (!cs->open_count)
26529 + } else if (!local_read(&cs->open_count))
26530 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26531 else if (cs->mstate != MS_LOCKED) {
26532 dev_warn(cs->dev, "can't write to unlocked device\n");
26533 @@ -441,7 +439,7 @@ static int if_chars_in_buffer(struct tty
26534
26535 if (!cs->connected)
26536 gig_dbg(DEBUG_IF, "not connected");
26537 - else if (!cs->open_count)
26538 + else if (!local_read(&cs->open_count))
26539 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26540 else if (cs->mstate != MS_LOCKED)
26541 dev_warn(cs->dev, "can't write to unlocked device\n");
26542 @@ -469,7 +467,7 @@ static void if_throttle(struct tty_struc
26543
26544 if (!cs->connected)
26545 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26546 - else if (!cs->open_count)
26547 + else if (!local_read(&cs->open_count))
26548 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26549 else
26550 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26551 @@ -493,7 +491,7 @@ static void if_unthrottle(struct tty_str
26552
26553 if (!cs->connected)
26554 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
26555 - else if (!cs->open_count)
26556 + else if (!local_read(&cs->open_count))
26557 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26558 else
26559 gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__);
26560 @@ -524,7 +522,7 @@ static void if_set_termios(struct tty_st
26561 goto out;
26562 }
26563
26564 - if (!cs->open_count) {
26565 + if (!local_read(&cs->open_count)) {
26566 dev_warn(cs->dev, "%s: device not opened\n", __func__);
26567 goto out;
26568 }
26569 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c
26570 --- linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c 2011-05-19 00:06:34.000000000 -0400
26571 +++ linux-2.6.39.4/drivers/isdn/hardware/avm/b1.c 2011-08-05 19:44:37.000000000 -0400
26572 @@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo
26573 }
26574 if (left) {
26575 if (t4file->user) {
26576 - if (copy_from_user(buf, dp, left))
26577 + if (left > sizeof buf || copy_from_user(buf, dp, left))
26578 return -EFAULT;
26579 } else {
26580 memcpy(buf, dp, left);
26581 @@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo
26582 }
26583 if (left) {
26584 if (config->user) {
26585 - if (copy_from_user(buf, dp, left))
26586 + if (left > sizeof buf || copy_from_user(buf, dp, left))
26587 return -EFAULT;
26588 } else {
26589 memcpy(buf, dp, left);
26590 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c
26591 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-19 00:06:34.000000000 -0400
26592 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/capidtmf.c 2011-08-05 19:44:37.000000000 -0400
26593 @@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
26594 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
26595 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
26596
26597 + pax_track_stack();
26598
26599 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
26600 {
26601 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c
26602 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c 2011-05-19 00:06:34.000000000 -0400
26603 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/capifunc.c 2011-08-05 19:44:37.000000000 -0400
26604 @@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
26605 IDI_SYNC_REQ req;
26606 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26607
26608 + pax_track_stack();
26609 +
26610 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26611
26612 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26613 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c
26614 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-19 00:06:34.000000000 -0400
26615 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/diddfunc.c 2011-08-05 19:44:37.000000000 -0400
26616 @@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
26617 IDI_SYNC_REQ req;
26618 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26619
26620 + pax_track_stack();
26621 +
26622 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26623
26624 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26625 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c
26626 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-19 00:06:34.000000000 -0400
26627 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/divasfunc.c 2011-08-05 19:44:37.000000000 -0400
26628 @@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
26629 IDI_SYNC_REQ req;
26630 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26631
26632 + pax_track_stack();
26633 +
26634 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26635
26636 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26637 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h
26638 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h 2011-05-19 00:06:34.000000000 -0400
26639 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/divasync.h 2011-08-05 20:34:06.000000000 -0400
26640 @@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
26641 } diva_didd_add_adapter_t;
26642 typedef struct _diva_didd_remove_adapter {
26643 IDI_CALL p_request;
26644 -} diva_didd_remove_adapter_t;
26645 +} __no_const diva_didd_remove_adapter_t;
26646 typedef struct _diva_didd_read_adapter_array {
26647 void * buffer;
26648 dword length;
26649 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c
26650 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c 2011-05-19 00:06:34.000000000 -0400
26651 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/idifunc.c 2011-08-05 19:44:37.000000000 -0400
26652 @@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
26653 IDI_SYNC_REQ req;
26654 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26655
26656 + pax_track_stack();
26657 +
26658 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26659
26660 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26661 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c
26662 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c 2011-05-19 00:06:34.000000000 -0400
26663 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/message.c 2011-08-05 19:44:37.000000000 -0400
26664 @@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
26665 dword d;
26666 word w;
26667
26668 + pax_track_stack();
26669 +
26670 a = plci->adapter;
26671 Id = ((word)plci->Id<<8)|a->Id;
26672 PUT_WORD(&SS_Ind[4],0x0000);
26673 @@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
26674 word j, n, w;
26675 dword d;
26676
26677 + pax_track_stack();
26678 +
26679
26680 for(i=0;i<8;i++) bp_parms[i].length = 0;
26681 for(i=0;i<2;i++) global_config[i].length = 0;
26682 @@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
26683 const byte llc3[] = {4,3,2,2,6,6,0};
26684 const byte header[] = {0,2,3,3,0,0,0};
26685
26686 + pax_track_stack();
26687 +
26688 for(i=0;i<8;i++) bp_parms[i].length = 0;
26689 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
26690 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
26691 @@ -14760,6 +14766,8 @@ static void group_optimization(DIVA_CAPI
26692 word appl_number_group_type[MAX_APPL];
26693 PLCI *auxplci;
26694
26695 + pax_track_stack();
26696 +
26697 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
26698
26699 if(!a->group_optimization_enabled)
26700 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c
26701 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-19 00:06:34.000000000 -0400
26702 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/mntfunc.c 2011-08-05 19:44:37.000000000 -0400
26703 @@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
26704 IDI_SYNC_REQ req;
26705 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
26706
26707 + pax_track_stack();
26708 +
26709 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
26710
26711 for (x = 0; x < MAX_DESCRIPTORS; x++) {
26712 diff -urNp linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h
26713 --- linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-05-19 00:06:34.000000000 -0400
26714 +++ linux-2.6.39.4/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-05 20:34:06.000000000 -0400
26715 @@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
26716 typedef struct _diva_os_idi_adapter_interface {
26717 diva_init_card_proc_t cleanup_adapter_proc;
26718 diva_cmd_card_proc_t cmd_proc;
26719 -} diva_os_idi_adapter_interface_t;
26720 +} __no_const diva_os_idi_adapter_interface_t;
26721
26722 typedef struct _diva_os_xdi_adapter {
26723 struct list_head link;
26724 diff -urNp linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c
26725 --- linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c 2011-05-19 00:06:34.000000000 -0400
26726 +++ linux-2.6.39.4/drivers/isdn/i4l/isdn_common.c 2011-08-05 19:44:37.000000000 -0400
26727 @@ -1292,6 +1292,8 @@ isdn_ioctl(struct file *file, uint cmd,
26728 } iocpar;
26729 void __user *argp = (void __user *)arg;
26730
26731 + pax_track_stack();
26732 +
26733 #define name iocpar.name
26734 #define bname iocpar.bname
26735 #define iocts iocpar.iocts
26736 diff -urNp linux-2.6.39.4/drivers/isdn/icn/icn.c linux-2.6.39.4/drivers/isdn/icn/icn.c
26737 --- linux-2.6.39.4/drivers/isdn/icn/icn.c 2011-05-19 00:06:34.000000000 -0400
26738 +++ linux-2.6.39.4/drivers/isdn/icn/icn.c 2011-08-05 19:44:37.000000000 -0400
26739 @@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len
26740 if (count > len)
26741 count = len;
26742 if (user) {
26743 - if (copy_from_user(msg, buf, count))
26744 + if (count > sizeof msg || copy_from_user(msg, buf, count))
26745 return -EFAULT;
26746 } else
26747 memcpy(msg, buf, count);
26748 diff -urNp linux-2.6.39.4/drivers/lguest/core.c linux-2.6.39.4/drivers/lguest/core.c
26749 --- linux-2.6.39.4/drivers/lguest/core.c 2011-05-19 00:06:34.000000000 -0400
26750 +++ linux-2.6.39.4/drivers/lguest/core.c 2011-08-05 19:44:37.000000000 -0400
26751 @@ -92,9 +92,17 @@ static __init int map_switcher(void)
26752 * it's worked so far. The end address needs +1 because __get_vm_area
26753 * allocates an extra guard page, so we need space for that.
26754 */
26755 +
26756 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
26757 + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26758 + VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
26759 + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26760 +#else
26761 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
26762 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
26763 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
26764 +#endif
26765 +
26766 if (!switcher_vma) {
26767 err = -ENOMEM;
26768 printk("lguest: could not map switcher pages high\n");
26769 @@ -119,7 +127,7 @@ static __init int map_switcher(void)
26770 * Now the Switcher is mapped at the right address, we can't fail!
26771 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
26772 */
26773 - memcpy(switcher_vma->addr, start_switcher_text,
26774 + memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
26775 end_switcher_text - start_switcher_text);
26776
26777 printk(KERN_INFO "lguest: mapped switcher at %p\n",
26778 diff -urNp linux-2.6.39.4/drivers/lguest/x86/core.c linux-2.6.39.4/drivers/lguest/x86/core.c
26779 --- linux-2.6.39.4/drivers/lguest/x86/core.c 2011-05-19 00:06:34.000000000 -0400
26780 +++ linux-2.6.39.4/drivers/lguest/x86/core.c 2011-08-05 19:44:37.000000000 -0400
26781 @@ -59,7 +59,7 @@ static struct {
26782 /* Offset from where switcher.S was compiled to where we've copied it */
26783 static unsigned long switcher_offset(void)
26784 {
26785 - return SWITCHER_ADDR - (unsigned long)start_switcher_text;
26786 + return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
26787 }
26788
26789 /* This cpu's struct lguest_pages. */
26790 @@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
26791 * These copies are pretty cheap, so we do them unconditionally: */
26792 /* Save the current Host top-level page directory.
26793 */
26794 +
26795 +#ifdef CONFIG_PAX_PER_CPU_PGD
26796 + pages->state.host_cr3 = read_cr3();
26797 +#else
26798 pages->state.host_cr3 = __pa(current->mm->pgd);
26799 +#endif
26800 +
26801 /*
26802 * Set up the Guest's page tables to see this CPU's pages (and no
26803 * other CPU's pages).
26804 @@ -547,7 +553,7 @@ void __init lguest_arch_host_init(void)
26805 * compiled-in switcher code and the high-mapped copy we just made.
26806 */
26807 for (i = 0; i < IDT_ENTRIES; i++)
26808 - default_idt_entries[i] += switcher_offset();
26809 + default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
26810
26811 /*
26812 * Set up the Switcher's per-cpu areas.
26813 @@ -630,7 +636,7 @@ void __init lguest_arch_host_init(void)
26814 * it will be undisturbed when we switch. To change %cs and jump we
26815 * need this structure to feed to Intel's "lcall" instruction.
26816 */
26817 - lguest_entry.offset = (long)switch_to_guest + switcher_offset();
26818 + lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
26819 lguest_entry.segment = LGUEST_CS;
26820
26821 /*
26822 diff -urNp linux-2.6.39.4/drivers/lguest/x86/switcher_32.S linux-2.6.39.4/drivers/lguest/x86/switcher_32.S
26823 --- linux-2.6.39.4/drivers/lguest/x86/switcher_32.S 2011-05-19 00:06:34.000000000 -0400
26824 +++ linux-2.6.39.4/drivers/lguest/x86/switcher_32.S 2011-08-05 19:44:37.000000000 -0400
26825 @@ -87,6 +87,7 @@
26826 #include <asm/page.h>
26827 #include <asm/segment.h>
26828 #include <asm/lguest.h>
26829 +#include <asm/processor-flags.h>
26830
26831 // We mark the start of the code to copy
26832 // It's placed in .text tho it's never run here
26833 @@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
26834 // Changes type when we load it: damn Intel!
26835 // For after we switch over our page tables
26836 // That entry will be read-only: we'd crash.
26837 +
26838 +#ifdef CONFIG_PAX_KERNEXEC
26839 + mov %cr0, %edx
26840 + xor $X86_CR0_WP, %edx
26841 + mov %edx, %cr0
26842 +#endif
26843 +
26844 movl $(GDT_ENTRY_TSS*8), %edx
26845 ltr %dx
26846
26847 @@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
26848 // Let's clear it again for our return.
26849 // The GDT descriptor of the Host
26850 // Points to the table after two "size" bytes
26851 - movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
26852 + movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
26853 // Clear "used" from type field (byte 5, bit 2)
26854 - andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
26855 + andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
26856 +
26857 +#ifdef CONFIG_PAX_KERNEXEC
26858 + mov %cr0, %eax
26859 + xor $X86_CR0_WP, %eax
26860 + mov %eax, %cr0
26861 +#endif
26862
26863 // Once our page table's switched, the Guest is live!
26864 // The Host fades as we run this final step.
26865 @@ -295,13 +309,12 @@ deliver_to_host:
26866 // I consulted gcc, and it gave
26867 // These instructions, which I gladly credit:
26868 leal (%edx,%ebx,8), %eax
26869 - movzwl (%eax),%edx
26870 - movl 4(%eax), %eax
26871 - xorw %ax, %ax
26872 - orl %eax, %edx
26873 + movl 4(%eax), %edx
26874 + movw (%eax), %dx
26875 // Now the address of the handler's in %edx
26876 // We call it now: its "iret" drops us home.
26877 - jmp *%edx
26878 + ljmp $__KERNEL_CS, $1f
26879 +1: jmp *%edx
26880
26881 // Every interrupt can come to us here
26882 // But we must truly tell each apart.
26883 diff -urNp linux-2.6.39.4/drivers/md/dm.c linux-2.6.39.4/drivers/md/dm.c
26884 --- linux-2.6.39.4/drivers/md/dm.c 2011-05-19 00:06:34.000000000 -0400
26885 +++ linux-2.6.39.4/drivers/md/dm.c 2011-08-05 19:44:37.000000000 -0400
26886 @@ -162,9 +162,9 @@ struct mapped_device {
26887 /*
26888 * Event handling.
26889 */
26890 - atomic_t event_nr;
26891 + atomic_unchecked_t event_nr;
26892 wait_queue_head_t eventq;
26893 - atomic_t uevent_seq;
26894 + atomic_unchecked_t uevent_seq;
26895 struct list_head uevent_list;
26896 spinlock_t uevent_lock; /* Protect access to uevent_list */
26897
26898 @@ -1836,8 +1836,8 @@ static struct mapped_device *alloc_dev(i
26899 rwlock_init(&md->map_lock);
26900 atomic_set(&md->holders, 1);
26901 atomic_set(&md->open_count, 0);
26902 - atomic_set(&md->event_nr, 0);
26903 - atomic_set(&md->uevent_seq, 0);
26904 + atomic_set_unchecked(&md->event_nr, 0);
26905 + atomic_set_unchecked(&md->uevent_seq, 0);
26906 INIT_LIST_HEAD(&md->uevent_list);
26907 spin_lock_init(&md->uevent_lock);
26908
26909 @@ -1971,7 +1971,7 @@ static void event_callback(void *context
26910
26911 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
26912
26913 - atomic_inc(&md->event_nr);
26914 + atomic_inc_unchecked(&md->event_nr);
26915 wake_up(&md->eventq);
26916 }
26917
26918 @@ -2547,18 +2547,18 @@ int dm_kobject_uevent(struct mapped_devi
26919
26920 uint32_t dm_next_uevent_seq(struct mapped_device *md)
26921 {
26922 - return atomic_add_return(1, &md->uevent_seq);
26923 + return atomic_add_return_unchecked(1, &md->uevent_seq);
26924 }
26925
26926 uint32_t dm_get_event_nr(struct mapped_device *md)
26927 {
26928 - return atomic_read(&md->event_nr);
26929 + return atomic_read_unchecked(&md->event_nr);
26930 }
26931
26932 int dm_wait_event(struct mapped_device *md, int event_nr)
26933 {
26934 return wait_event_interruptible(md->eventq,
26935 - (event_nr != atomic_read(&md->event_nr)));
26936 + (event_nr != atomic_read_unchecked(&md->event_nr)));
26937 }
26938
26939 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
26940 diff -urNp linux-2.6.39.4/drivers/md/dm-ioctl.c linux-2.6.39.4/drivers/md/dm-ioctl.c
26941 --- linux-2.6.39.4/drivers/md/dm-ioctl.c 2011-05-19 00:06:34.000000000 -0400
26942 +++ linux-2.6.39.4/drivers/md/dm-ioctl.c 2011-08-05 19:44:37.000000000 -0400
26943 @@ -1551,7 +1551,7 @@ static int validate_params(uint cmd, str
26944 cmd == DM_LIST_VERSIONS_CMD)
26945 return 0;
26946
26947 - if ((cmd == DM_DEV_CREATE_CMD)) {
26948 + if (cmd == DM_DEV_CREATE_CMD) {
26949 if (!*param->name) {
26950 DMWARN("name not supplied when creating device");
26951 return -EINVAL;
26952 diff -urNp linux-2.6.39.4/drivers/md/dm-raid1.c linux-2.6.39.4/drivers/md/dm-raid1.c
26953 --- linux-2.6.39.4/drivers/md/dm-raid1.c 2011-05-19 00:06:34.000000000 -0400
26954 +++ linux-2.6.39.4/drivers/md/dm-raid1.c 2011-08-05 19:44:37.000000000 -0400
26955 @@ -42,7 +42,7 @@ enum dm_raid1_error {
26956
26957 struct mirror {
26958 struct mirror_set *ms;
26959 - atomic_t error_count;
26960 + atomic_unchecked_t error_count;
26961 unsigned long error_type;
26962 struct dm_dev *dev;
26963 sector_t offset;
26964 @@ -187,7 +187,7 @@ static struct mirror *get_valid_mirror(s
26965 struct mirror *m;
26966
26967 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
26968 - if (!atomic_read(&m->error_count))
26969 + if (!atomic_read_unchecked(&m->error_count))
26970 return m;
26971
26972 return NULL;
26973 @@ -219,7 +219,7 @@ static void fail_mirror(struct mirror *m
26974 * simple way to tell if a device has encountered
26975 * errors.
26976 */
26977 - atomic_inc(&m->error_count);
26978 + atomic_inc_unchecked(&m->error_count);
26979
26980 if (test_and_set_bit(error_type, &m->error_type))
26981 return;
26982 @@ -410,7 +410,7 @@ static struct mirror *choose_mirror(stru
26983 struct mirror *m = get_default_mirror(ms);
26984
26985 do {
26986 - if (likely(!atomic_read(&m->error_count)))
26987 + if (likely(!atomic_read_unchecked(&m->error_count)))
26988 return m;
26989
26990 if (m-- == ms->mirror)
26991 @@ -424,7 +424,7 @@ static int default_ok(struct mirror *m)
26992 {
26993 struct mirror *default_mirror = get_default_mirror(m->ms);
26994
26995 - return !atomic_read(&default_mirror->error_count);
26996 + return !atomic_read_unchecked(&default_mirror->error_count);
26997 }
26998
26999 static int mirror_available(struct mirror_set *ms, struct bio *bio)
27000 @@ -561,7 +561,7 @@ static void do_reads(struct mirror_set *
27001 */
27002 if (likely(region_in_sync(ms, region, 1)))
27003 m = choose_mirror(ms, bio->bi_sector);
27004 - else if (m && atomic_read(&m->error_count))
27005 + else if (m && atomic_read_unchecked(&m->error_count))
27006 m = NULL;
27007
27008 if (likely(m))
27009 @@ -939,7 +939,7 @@ static int get_mirror(struct mirror_set
27010 }
27011
27012 ms->mirror[mirror].ms = ms;
27013 - atomic_set(&(ms->mirror[mirror].error_count), 0);
27014 + atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
27015 ms->mirror[mirror].error_type = 0;
27016 ms->mirror[mirror].offset = offset;
27017
27018 @@ -1347,7 +1347,7 @@ static void mirror_resume(struct dm_targ
27019 */
27020 static char device_status_char(struct mirror *m)
27021 {
27022 - if (!atomic_read(&(m->error_count)))
27023 + if (!atomic_read_unchecked(&(m->error_count)))
27024 return 'A';
27025
27026 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
27027 diff -urNp linux-2.6.39.4/drivers/md/dm-stripe.c linux-2.6.39.4/drivers/md/dm-stripe.c
27028 --- linux-2.6.39.4/drivers/md/dm-stripe.c 2011-05-19 00:06:34.000000000 -0400
27029 +++ linux-2.6.39.4/drivers/md/dm-stripe.c 2011-08-05 19:44:37.000000000 -0400
27030 @@ -20,7 +20,7 @@ struct stripe {
27031 struct dm_dev *dev;
27032 sector_t physical_start;
27033
27034 - atomic_t error_count;
27035 + atomic_unchecked_t error_count;
27036 };
27037
27038 struct stripe_c {
27039 @@ -192,7 +192,7 @@ static int stripe_ctr(struct dm_target *
27040 kfree(sc);
27041 return r;
27042 }
27043 - atomic_set(&(sc->stripe[i].error_count), 0);
27044 + atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
27045 }
27046
27047 ti->private = sc;
27048 @@ -314,7 +314,7 @@ static int stripe_status(struct dm_targe
27049 DMEMIT("%d ", sc->stripes);
27050 for (i = 0; i < sc->stripes; i++) {
27051 DMEMIT("%s ", sc->stripe[i].dev->name);
27052 - buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
27053 + buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
27054 'D' : 'A';
27055 }
27056 buffer[i] = '\0';
27057 @@ -361,8 +361,8 @@ static int stripe_end_io(struct dm_targe
27058 */
27059 for (i = 0; i < sc->stripes; i++)
27060 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
27061 - atomic_inc(&(sc->stripe[i].error_count));
27062 - if (atomic_read(&(sc->stripe[i].error_count)) <
27063 + atomic_inc_unchecked(&(sc->stripe[i].error_count));
27064 + if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
27065 DM_IO_ERROR_THRESHOLD)
27066 schedule_work(&sc->trigger_event);
27067 }
27068 diff -urNp linux-2.6.39.4/drivers/md/dm-table.c linux-2.6.39.4/drivers/md/dm-table.c
27069 --- linux-2.6.39.4/drivers/md/dm-table.c 2011-06-03 00:04:14.000000000 -0400
27070 +++ linux-2.6.39.4/drivers/md/dm-table.c 2011-08-05 19:44:37.000000000 -0400
27071 @@ -390,7 +390,7 @@ static int device_area_is_invalid(struct
27072 if (!dev_size)
27073 return 0;
27074
27075 - if ((start >= dev_size) || (start + len > dev_size)) {
27076 + if ((start >= dev_size) || (len > dev_size - start)) {
27077 DMWARN("%s: %s too small for target: "
27078 "start=%llu, len=%llu, dev_size=%llu",
27079 dm_device_name(ti->table->md), bdevname(bdev, b),
27080 diff -urNp linux-2.6.39.4/drivers/md/md.c linux-2.6.39.4/drivers/md/md.c
27081 --- linux-2.6.39.4/drivers/md/md.c 2011-07-09 09:18:51.000000000 -0400
27082 +++ linux-2.6.39.4/drivers/md/md.c 2011-08-05 19:44:37.000000000 -0400
27083 @@ -226,10 +226,10 @@ EXPORT_SYMBOL_GPL(bio_clone_mddev);
27084 * start build, activate spare
27085 */
27086 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
27087 -static atomic_t md_event_count;
27088 +static atomic_unchecked_t md_event_count;
27089 void md_new_event(mddev_t *mddev)
27090 {
27091 - atomic_inc(&md_event_count);
27092 + atomic_inc_unchecked(&md_event_count);
27093 wake_up(&md_event_waiters);
27094 }
27095 EXPORT_SYMBOL_GPL(md_new_event);
27096 @@ -239,7 +239,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
27097 */
27098 static void md_new_event_inintr(mddev_t *mddev)
27099 {
27100 - atomic_inc(&md_event_count);
27101 + atomic_inc_unchecked(&md_event_count);
27102 wake_up(&md_event_waiters);
27103 }
27104
27105 @@ -1454,7 +1454,7 @@ static int super_1_load(mdk_rdev_t *rdev
27106
27107 rdev->preferred_minor = 0xffff;
27108 rdev->data_offset = le64_to_cpu(sb->data_offset);
27109 - atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27110 + atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
27111
27112 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
27113 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
27114 @@ -1632,7 +1632,7 @@ static void super_1_sync(mddev_t *mddev,
27115 else
27116 sb->resync_offset = cpu_to_le64(0);
27117
27118 - sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
27119 + sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
27120
27121 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
27122 sb->size = cpu_to_le64(mddev->dev_sectors);
27123 @@ -2414,7 +2414,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
27124 static ssize_t
27125 errors_show(mdk_rdev_t *rdev, char *page)
27126 {
27127 - return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
27128 + return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
27129 }
27130
27131 static ssize_t
27132 @@ -2423,7 +2423,7 @@ errors_store(mdk_rdev_t *rdev, const cha
27133 char *e;
27134 unsigned long n = simple_strtoul(buf, &e, 10);
27135 if (*buf && (*e == 0 || *e == '\n')) {
27136 - atomic_set(&rdev->corrected_errors, n);
27137 + atomic_set_unchecked(&rdev->corrected_errors, n);
27138 return len;
27139 }
27140 return -EINVAL;
27141 @@ -2779,8 +2779,8 @@ void md_rdev_init(mdk_rdev_t *rdev)
27142 rdev->last_read_error.tv_sec = 0;
27143 rdev->last_read_error.tv_nsec = 0;
27144 atomic_set(&rdev->nr_pending, 0);
27145 - atomic_set(&rdev->read_errors, 0);
27146 - atomic_set(&rdev->corrected_errors, 0);
27147 + atomic_set_unchecked(&rdev->read_errors, 0);
27148 + atomic_set_unchecked(&rdev->corrected_errors, 0);
27149
27150 INIT_LIST_HEAD(&rdev->same_set);
27151 init_waitqueue_head(&rdev->blocked_wait);
27152 @@ -6388,7 +6388,7 @@ static int md_seq_show(struct seq_file *
27153
27154 spin_unlock(&pers_lock);
27155 seq_printf(seq, "\n");
27156 - mi->event = atomic_read(&md_event_count);
27157 + mi->event = atomic_read_unchecked(&md_event_count);
27158 return 0;
27159 }
27160 if (v == (void*)2) {
27161 @@ -6477,7 +6477,7 @@ static int md_seq_show(struct seq_file *
27162 chunk_kb ? "KB" : "B");
27163 if (bitmap->file) {
27164 seq_printf(seq, ", file: ");
27165 - seq_path(seq, &bitmap->file->f_path, " \t\n");
27166 + seq_path(seq, &bitmap->file->f_path, " \t\n\\");
27167 }
27168
27169 seq_printf(seq, "\n");
27170 @@ -6511,7 +6511,7 @@ static int md_seq_open(struct inode *ino
27171 else {
27172 struct seq_file *p = file->private_data;
27173 p->private = mi;
27174 - mi->event = atomic_read(&md_event_count);
27175 + mi->event = atomic_read_unchecked(&md_event_count);
27176 }
27177 return error;
27178 }
27179 @@ -6527,7 +6527,7 @@ static unsigned int mdstat_poll(struct f
27180 /* always allow read */
27181 mask = POLLIN | POLLRDNORM;
27182
27183 - if (mi->event != atomic_read(&md_event_count))
27184 + if (mi->event != atomic_read_unchecked(&md_event_count))
27185 mask |= POLLERR | POLLPRI;
27186 return mask;
27187 }
27188 @@ -6571,7 +6571,7 @@ static int is_mddev_idle(mddev_t *mddev,
27189 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
27190 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
27191 (int)part_stat_read(&disk->part0, sectors[1]) -
27192 - atomic_read(&disk->sync_io);
27193 + atomic_read_unchecked(&disk->sync_io);
27194 /* sync IO will cause sync_io to increase before the disk_stats
27195 * as sync_io is counted when a request starts, and
27196 * disk_stats is counted when it completes.
27197 diff -urNp linux-2.6.39.4/drivers/md/md.h linux-2.6.39.4/drivers/md/md.h
27198 --- linux-2.6.39.4/drivers/md/md.h 2011-05-19 00:06:34.000000000 -0400
27199 +++ linux-2.6.39.4/drivers/md/md.h 2011-08-05 19:44:37.000000000 -0400
27200 @@ -97,13 +97,13 @@ struct mdk_rdev_s
27201 * only maintained for arrays that
27202 * support hot removal
27203 */
27204 - atomic_t read_errors; /* number of consecutive read errors that
27205 + atomic_unchecked_t read_errors; /* number of consecutive read errors that
27206 * we have tried to ignore.
27207 */
27208 struct timespec last_read_error; /* monotonic time since our
27209 * last read error
27210 */
27211 - atomic_t corrected_errors; /* number of corrected read errors,
27212 + atomic_unchecked_t corrected_errors; /* number of corrected read errors,
27213 * for reporting to userspace and storing
27214 * in superblock.
27215 */
27216 @@ -342,7 +342,7 @@ static inline void rdev_dec_pending(mdk_
27217
27218 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
27219 {
27220 - atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27221 + atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
27222 }
27223
27224 struct mdk_personality
27225 diff -urNp linux-2.6.39.4/drivers/md/raid10.c linux-2.6.39.4/drivers/md/raid10.c
27226 --- linux-2.6.39.4/drivers/md/raid10.c 2011-05-19 00:06:34.000000000 -0400
27227 +++ linux-2.6.39.4/drivers/md/raid10.c 2011-08-05 19:44:37.000000000 -0400
27228 @@ -1209,7 +1209,7 @@ static void end_sync_read(struct bio *bi
27229 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
27230 set_bit(R10BIO_Uptodate, &r10_bio->state);
27231 else {
27232 - atomic_add(r10_bio->sectors,
27233 + atomic_add_unchecked(r10_bio->sectors,
27234 &conf->mirrors[d].rdev->corrected_errors);
27235 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
27236 md_error(r10_bio->mddev,
27237 @@ -1417,7 +1417,7 @@ static void check_decay_read_errors(mdde
27238 {
27239 struct timespec cur_time_mon;
27240 unsigned long hours_since_last;
27241 - unsigned int read_errors = atomic_read(&rdev->read_errors);
27242 + unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
27243
27244 ktime_get_ts(&cur_time_mon);
27245
27246 @@ -1439,9 +1439,9 @@ static void check_decay_read_errors(mdde
27247 * overflowing the shift of read_errors by hours_since_last.
27248 */
27249 if (hours_since_last >= 8 * sizeof(read_errors))
27250 - atomic_set(&rdev->read_errors, 0);
27251 + atomic_set_unchecked(&rdev->read_errors, 0);
27252 else
27253 - atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
27254 + atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
27255 }
27256
27257 /*
27258 @@ -1476,8 +1476,8 @@ static void fix_read_error(conf_t *conf,
27259 }
27260
27261 check_decay_read_errors(mddev, rdev);
27262 - atomic_inc(&rdev->read_errors);
27263 - cur_read_error_count = atomic_read(&rdev->read_errors);
27264 + atomic_inc_unchecked(&rdev->read_errors);
27265 + cur_read_error_count = atomic_read_unchecked(&rdev->read_errors);
27266 if (cur_read_error_count > max_read_errors) {
27267 rcu_read_unlock();
27268 printk(KERN_NOTICE
27269 @@ -1550,7 +1550,7 @@ static void fix_read_error(conf_t *conf,
27270 test_bit(In_sync, &rdev->flags)) {
27271 atomic_inc(&rdev->nr_pending);
27272 rcu_read_unlock();
27273 - atomic_add(s, &rdev->corrected_errors);
27274 + atomic_add_unchecked(s, &rdev->corrected_errors);
27275 if (sync_page_io(rdev,
27276 r10_bio->devs[sl].addr +
27277 sect,
27278 diff -urNp linux-2.6.39.4/drivers/md/raid1.c linux-2.6.39.4/drivers/md/raid1.c
27279 --- linux-2.6.39.4/drivers/md/raid1.c 2011-05-19 00:06:34.000000000 -0400
27280 +++ linux-2.6.39.4/drivers/md/raid1.c 2011-08-05 19:44:37.000000000 -0400
27281 @@ -1342,7 +1342,7 @@ static void sync_request_write(mddev_t *
27282 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
27283 continue;
27284 rdev = conf->mirrors[d].rdev;
27285 - atomic_add(s, &rdev->corrected_errors);
27286 + atomic_add_unchecked(s, &rdev->corrected_errors);
27287 if (sync_page_io(rdev,
27288 sect,
27289 s<<9,
27290 @@ -1488,7 +1488,7 @@ static void fix_read_error(conf_t *conf,
27291 /* Well, this device is dead */
27292 md_error(mddev, rdev);
27293 else {
27294 - atomic_add(s, &rdev->corrected_errors);
27295 + atomic_add_unchecked(s, &rdev->corrected_errors);
27296 printk(KERN_INFO
27297 "md/raid1:%s: read error corrected "
27298 "(%d sectors at %llu on %s)\n",
27299 diff -urNp linux-2.6.39.4/drivers/md/raid5.c linux-2.6.39.4/drivers/md/raid5.c
27300 --- linux-2.6.39.4/drivers/md/raid5.c 2011-06-25 12:55:22.000000000 -0400
27301 +++ linux-2.6.39.4/drivers/md/raid5.c 2011-08-05 19:44:37.000000000 -0400
27302 @@ -550,7 +550,7 @@ static void ops_run_io(struct stripe_hea
27303 bi->bi_next = NULL;
27304 if ((rw & WRITE) &&
27305 test_bit(R5_ReWrite, &sh->dev[i].flags))
27306 - atomic_add(STRIPE_SECTORS,
27307 + atomic_add_unchecked(STRIPE_SECTORS,
27308 &rdev->corrected_errors);
27309 generic_make_request(bi);
27310 } else {
27311 @@ -1596,15 +1596,15 @@ static void raid5_end_read_request(struc
27312 clear_bit(R5_ReadError, &sh->dev[i].flags);
27313 clear_bit(R5_ReWrite, &sh->dev[i].flags);
27314 }
27315 - if (atomic_read(&conf->disks[i].rdev->read_errors))
27316 - atomic_set(&conf->disks[i].rdev->read_errors, 0);
27317 + if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
27318 + atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
27319 } else {
27320 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
27321 int retry = 0;
27322 rdev = conf->disks[i].rdev;
27323
27324 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
27325 - atomic_inc(&rdev->read_errors);
27326 + atomic_inc_unchecked(&rdev->read_errors);
27327 if (conf->mddev->degraded >= conf->max_degraded)
27328 printk_rl(KERN_WARNING
27329 "md/raid:%s: read error not correctable "
27330 @@ -1622,7 +1622,7 @@ static void raid5_end_read_request(struc
27331 (unsigned long long)(sh->sector
27332 + rdev->data_offset),
27333 bdn);
27334 - else if (atomic_read(&rdev->read_errors)
27335 + else if (atomic_read_unchecked(&rdev->read_errors)
27336 > conf->max_nr_stripes)
27337 printk(KERN_WARNING
27338 "md/raid:%s: Too many read errors, failing device %s.\n",
27339 @@ -1947,6 +1947,7 @@ static sector_t compute_blocknr(struct s
27340 sector_t r_sector;
27341 struct stripe_head sh2;
27342
27343 + pax_track_stack();
27344
27345 chunk_offset = sector_div(new_sector, sectors_per_chunk);
27346 stripe = new_sector;
27347 diff -urNp linux-2.6.39.4/drivers/media/common/saa7146_hlp.c linux-2.6.39.4/drivers/media/common/saa7146_hlp.c
27348 --- linux-2.6.39.4/drivers/media/common/saa7146_hlp.c 2011-05-19 00:06:34.000000000 -0400
27349 +++ linux-2.6.39.4/drivers/media/common/saa7146_hlp.c 2011-08-05 19:44:37.000000000 -0400
27350 @@ -353,6 +353,8 @@ static void calculate_clipping_registers
27351
27352 int x[32], y[32], w[32], h[32];
27353
27354 + pax_track_stack();
27355 +
27356 /* clear out memory */
27357 memset(&line_list[0], 0x00, sizeof(u32)*32);
27358 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
27359 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
27360 --- linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-19 00:06:34.000000000 -0400
27361 +++ linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-08-05 19:44:37.000000000 -0400
27362 @@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
27363 u8 buf[HOST_LINK_BUF_SIZE];
27364 int i;
27365
27366 + pax_track_stack();
27367 +
27368 dprintk("%s\n", __func__);
27369
27370 /* check if we have space for a link buf in the rx_buffer */
27371 @@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
27372 unsigned long timeout;
27373 int written;
27374
27375 + pax_track_stack();
27376 +
27377 dprintk("%s\n", __func__);
27378
27379 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
27380 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h
27381 --- linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-05-19 00:06:34.000000000 -0400
27382 +++ linux-2.6.39.4/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-05 20:34:06.000000000 -0400
27383 @@ -73,7 +73,7 @@ struct dvb_demux_feed {
27384 union {
27385 dmx_ts_cb ts;
27386 dmx_section_cb sec;
27387 - } cb;
27388 + } __no_const cb;
27389
27390 struct dvb_demux *demux;
27391 void *priv;
27392 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c
27393 --- linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-05-19 00:06:34.000000000 -0400
27394 +++ linux-2.6.39.4/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-05 20:34:06.000000000 -0400
27395 @@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapt
27396 const struct dvb_device *template, void *priv, int type)
27397 {
27398 struct dvb_device *dvbdev;
27399 - struct file_operations *dvbdevfops;
27400 + file_operations_no_const *dvbdevfops;
27401 struct device *clsdev;
27402 int minor;
27403 int id;
27404 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c
27405 --- linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-05-19 00:06:34.000000000 -0400
27406 +++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-05 20:34:06.000000000 -0400
27407 @@ -1059,7 +1059,7 @@ static struct dib0070_config dib7070p_di
27408 struct dib0700_adapter_state {
27409 int (*set_param_save) (struct dvb_frontend *,
27410 struct dvb_frontend_parameters *);
27411 -};
27412 +} __no_const;
27413
27414 static int dib7070_set_param_override(struct dvb_frontend *fe,
27415 struct dvb_frontend_parameters *fep)
27416 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c
27417 --- linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-19 00:06:34.000000000 -0400
27418 +++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-08-05 19:44:37.000000000 -0400
27419 @@ -391,6 +391,8 @@ int dib0700_download_firmware(struct usb
27420
27421 u8 buf[260];
27422
27423 + pax_track_stack();
27424 +
27425 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
27426 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",
27427 hx.addr, hx.len, hx.chk);
27428 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c
27429 --- linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-05-19 00:06:34.000000000 -0400
27430 +++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/dw2102.c 2011-08-05 20:34:06.000000000 -0400
27431 @@ -95,7 +95,7 @@ struct su3000_state {
27432
27433 struct s6x0_state {
27434 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
27435 -};
27436 +} __no_const;
27437
27438 /* debug */
27439 static int dvb_usb_dw2102_debug;
27440 diff -urNp linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c
27441 --- linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-05-19 00:06:34.000000000 -0400
27442 +++ linux-2.6.39.4/drivers/media/dvb/dvb-usb/lmedm04.c 2011-08-05 19:44:37.000000000 -0400
27443 @@ -663,6 +663,7 @@ static int lme2510_download_firmware(str
27444 packet_size = 0x31;
27445 len_in = 1;
27446
27447 + pax_track_stack();
27448
27449 info("FRM Starting Firmware Download");
27450
27451 @@ -715,6 +716,8 @@ static void lme_coldreset(struct usb_dev
27452 int ret = 0, len_in;
27453 u8 data[512] = {0};
27454
27455 + pax_track_stack();
27456 +
27457 data[0] = 0x0a;
27458 len_in = 1;
27459 info("FRM Firmware Cold Reset");
27460 diff -urNp linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h
27461 --- linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h 2011-05-19 00:06:34.000000000 -0400
27462 +++ linux-2.6.39.4/drivers/media/dvb/frontends/dib3000.h 2011-08-05 20:34:06.000000000 -0400
27463 @@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
27464 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
27465 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
27466 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
27467 -};
27468 +} __no_const;
27469
27470 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
27471 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
27472 diff -urNp linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c
27473 --- linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c 2011-05-19 00:06:34.000000000 -0400
27474 +++ linux-2.6.39.4/drivers/media/dvb/frontends/mb86a16.c 2011-08-05 19:44:37.000000000 -0400
27475 @@ -1060,6 +1060,8 @@ static int mb86a16_set_fe(struct mb86a16
27476 int ret = -1;
27477 int sync;
27478
27479 + pax_track_stack();
27480 +
27481 dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
27482
27483 fcp = 3000;
27484 diff -urNp linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c
27485 --- linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c 2011-05-19 00:06:34.000000000 -0400
27486 +++ linux-2.6.39.4/drivers/media/dvb/frontends/or51211.c 2011-08-05 19:44:37.000000000 -0400
27487 @@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
27488 u8 tudata[585];
27489 int i;
27490
27491 + pax_track_stack();
27492 +
27493 dprintk("Firmware is %zd bytes\n",fw->size);
27494
27495 /* Get eprom data */
27496 diff -urNp linux-2.6.39.4/drivers/media/radio/radio-cadet.c linux-2.6.39.4/drivers/media/radio/radio-cadet.c
27497 --- linux-2.6.39.4/drivers/media/radio/radio-cadet.c 2011-05-19 00:06:34.000000000 -0400
27498 +++ linux-2.6.39.4/drivers/media/radio/radio-cadet.c 2011-08-05 19:44:37.000000000 -0400
27499 @@ -349,7 +349,7 @@ static ssize_t cadet_read(struct file *f
27500 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
27501 mutex_unlock(&dev->lock);
27502
27503 - if (copy_to_user(data, readbuf, i))
27504 + if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
27505 return -EFAULT;
27506 return i;
27507 }
27508 diff -urNp linux-2.6.39.4/drivers/media/rc/rc-main.c linux-2.6.39.4/drivers/media/rc/rc-main.c
27509 --- linux-2.6.39.4/drivers/media/rc/rc-main.c 2011-05-19 00:06:34.000000000 -0400
27510 +++ linux-2.6.39.4/drivers/media/rc/rc-main.c 2011-08-05 19:44:37.000000000 -0400
27511 @@ -996,7 +996,7 @@ EXPORT_SYMBOL_GPL(rc_free_device);
27512
27513 int rc_register_device(struct rc_dev *dev)
27514 {
27515 - static atomic_t devno = ATOMIC_INIT(0);
27516 + static atomic_unchecked_t devno = ATOMIC_INIT(0);
27517 struct rc_map *rc_map;
27518 const char *path;
27519 int rc;
27520 @@ -1019,7 +1019,7 @@ int rc_register_device(struct rc_dev *de
27521 if (dev->close)
27522 dev->input_dev->close = ir_close;
27523
27524 - dev->devno = (unsigned long)(atomic_inc_return(&devno) - 1);
27525 + dev->devno = (unsigned long)(atomic_inc_return_unchecked(&devno) - 1);
27526 dev_set_name(&dev->dev, "rc%ld", dev->devno);
27527 dev_set_drvdata(&dev->dev, dev);
27528 rc = device_add(&dev->dev);
27529 diff -urNp linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c
27530 --- linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c 2011-05-19 00:06:34.000000000 -0400
27531 +++ linux-2.6.39.4/drivers/media/video/cx18/cx18-driver.c 2011-08-05 19:44:37.000000000 -0400
27532 @@ -61,7 +61,7 @@ static struct pci_device_id cx18_pci_tbl
27533
27534 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
27535
27536 -static atomic_t cx18_instance = ATOMIC_INIT(0);
27537 +static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
27538
27539 /* Parameter declarations */
27540 static int cardtype[CX18_MAX_CARDS];
27541 @@ -327,6 +327,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
27542 struct i2c_client c;
27543 u8 eedata[256];
27544
27545 + pax_track_stack();
27546 +
27547 memset(&c, 0, sizeof(c));
27548 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
27549 c.adapter = &cx->i2c_adap[0];
27550 @@ -892,7 +894,7 @@ static int __devinit cx18_probe(struct p
27551 struct cx18 *cx;
27552
27553 /* FIXME - module parameter arrays constrain max instances */
27554 - i = atomic_inc_return(&cx18_instance) - 1;
27555 + i = atomic_inc_return_unchecked(&cx18_instance) - 1;
27556 if (i >= CX18_MAX_CARDS) {
27557 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
27558 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
27559 diff -urNp linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c
27560 --- linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c 2011-05-19 00:06:34.000000000 -0400
27561 +++ linux-2.6.39.4/drivers/media/video/cx23885/cx23885-input.c 2011-08-05 19:44:37.000000000 -0400
27562 @@ -53,6 +53,8 @@ static void cx23885_input_process_measur
27563 bool handle = false;
27564 struct ir_raw_event ir_core_event[64];
27565
27566 + pax_track_stack();
27567 +
27568 do {
27569 num = 0;
27570 v4l2_subdev_call(dev->sd_ir, ir, rx_read, (u8 *) ir_core_event,
27571 diff -urNp linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c
27572 --- linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c 2011-05-19 00:06:34.000000000 -0400
27573 +++ linux-2.6.39.4/drivers/media/video/ivtv/ivtv-driver.c 2011-08-05 19:44:37.000000000 -0400
27574 @@ -80,7 +80,7 @@ static struct pci_device_id ivtv_pci_tbl
27575 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
27576
27577 /* ivtv instance counter */
27578 -static atomic_t ivtv_instance = ATOMIC_INIT(0);
27579 +static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
27580
27581 /* Parameter declarations */
27582 static int cardtype[IVTV_MAX_CARDS];
27583 diff -urNp linux-2.6.39.4/drivers/media/video/omap24xxcam.c linux-2.6.39.4/drivers/media/video/omap24xxcam.c
27584 --- linux-2.6.39.4/drivers/media/video/omap24xxcam.c 2011-05-19 00:06:34.000000000 -0400
27585 +++ linux-2.6.39.4/drivers/media/video/omap24xxcam.c 2011-08-05 19:44:37.000000000 -0400
27586 @@ -403,7 +403,7 @@ static void omap24xxcam_vbq_complete(str
27587 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
27588
27589 do_gettimeofday(&vb->ts);
27590 - vb->field_count = atomic_add_return(2, &fh->field_count);
27591 + vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
27592 if (csr & csr_error) {
27593 vb->state = VIDEOBUF_ERROR;
27594 if (!atomic_read(&fh->cam->in_reset)) {
27595 diff -urNp linux-2.6.39.4/drivers/media/video/omap24xxcam.h linux-2.6.39.4/drivers/media/video/omap24xxcam.h
27596 --- linux-2.6.39.4/drivers/media/video/omap24xxcam.h 2011-05-19 00:06:34.000000000 -0400
27597 +++ linux-2.6.39.4/drivers/media/video/omap24xxcam.h 2011-08-05 19:44:37.000000000 -0400
27598 @@ -533,7 +533,7 @@ struct omap24xxcam_fh {
27599 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
27600 struct videobuf_queue vbq;
27601 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
27602 - atomic_t field_count; /* field counter for videobuf_buffer */
27603 + atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
27604 /* accessing cam here doesn't need serialisation: it's constant */
27605 struct omap24xxcam_device *cam;
27606 };
27607 diff -urNp linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
27608 --- linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-19 00:06:34.000000000 -0400
27609 +++ linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-08-05 19:44:37.000000000 -0400
27610 @@ -120,6 +120,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
27611 u8 *eeprom;
27612 struct tveeprom tvdata;
27613
27614 + pax_track_stack();
27615 +
27616 memset(&tvdata,0,sizeof(tvdata));
27617
27618 eeprom = pvr2_eeprom_fetch(hdw);
27619 diff -urNp linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
27620 --- linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-05-19 00:06:34.000000000 -0400
27621 +++ linux-2.6.39.4/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h 2011-08-05 20:34:06.000000000 -0400
27622 @@ -196,7 +196,7 @@ struct pvr2_hdw {
27623
27624 /* I2C stuff */
27625 struct i2c_adapter i2c_adap;
27626 - struct i2c_algorithm i2c_algo;
27627 + i2c_algorithm_no_const i2c_algo;
27628 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
27629 int i2c_cx25840_hack_state;
27630 int i2c_linked;
27631 diff -urNp linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c
27632 --- linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c 2011-05-19 00:06:34.000000000 -0400
27633 +++ linux-2.6.39.4/drivers/media/video/saa7134/saa6752hs.c 2011-08-05 19:44:37.000000000 -0400
27634 @@ -682,6 +682,8 @@ static int saa6752hs_init(struct v4l2_su
27635 unsigned char localPAT[256];
27636 unsigned char localPMT[256];
27637
27638 + pax_track_stack();
27639 +
27640 /* Set video format - must be done first as it resets other settings */
27641 set_reg8(client, 0x41, h->video_format);
27642
27643 diff -urNp linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c
27644 --- linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-19 00:06:34.000000000 -0400
27645 +++ linux-2.6.39.4/drivers/media/video/saa7164/saa7164-cmd.c 2011-08-05 19:44:37.000000000 -0400
27646 @@ -88,6 +88,8 @@ int saa7164_irq_dequeue(struct saa7164_d
27647 u8 tmp[512];
27648 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27649
27650 + pax_track_stack();
27651 +
27652 /* While any outstand message on the bus exists... */
27653 do {
27654
27655 @@ -141,6 +143,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
27656 u8 tmp[512];
27657 dprintk(DBGLVL_CMD, "%s()\n", __func__);
27658
27659 + pax_track_stack();
27660 +
27661 while (loop) {
27662
27663 struct tmComResInfo tRsp = { 0, 0, 0, 0, 0, 0 };
27664 diff -urNp linux-2.6.39.4/drivers/media/video/timblogiw.c linux-2.6.39.4/drivers/media/video/timblogiw.c
27665 --- linux-2.6.39.4/drivers/media/video/timblogiw.c 2011-05-19 00:06:34.000000000 -0400
27666 +++ linux-2.6.39.4/drivers/media/video/timblogiw.c 2011-08-05 20:34:06.000000000 -0400
27667 @@ -746,7 +746,7 @@ static int timblogiw_mmap(struct file *f
27668
27669 /* Platform device functions */
27670
27671 -static __devinitconst struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
27672 +static __devinitdata struct v4l2_ioctl_ops timblogiw_ioctl_ops = {
27673 .vidioc_querycap = timblogiw_querycap,
27674 .vidioc_enum_fmt_vid_cap = timblogiw_enum_fmt,
27675 .vidioc_g_fmt_vid_cap = timblogiw_g_fmt,
27676 @@ -768,7 +768,7 @@ static __devinitconst struct v4l2_ioctl_
27677 .vidioc_enum_framesizes = timblogiw_enum_framesizes,
27678 };
27679
27680 -static __devinitconst struct v4l2_file_operations timblogiw_fops = {
27681 +static __devinitdata struct v4l2_file_operations timblogiw_fops = {
27682 .owner = THIS_MODULE,
27683 .open = timblogiw_open,
27684 .release = timblogiw_close,
27685 diff -urNp linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c
27686 --- linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c 2011-05-19 00:06:34.000000000 -0400
27687 +++ linux-2.6.39.4/drivers/media/video/usbvision/usbvision-core.c 2011-08-05 19:44:37.000000000 -0400
27688 @@ -799,6 +799,8 @@ static enum parse_state usbvision_parse_
27689 unsigned char rv, gv, bv;
27690 static unsigned char *Y, *U, *V;
27691
27692 + pax_track_stack();
27693 +
27694 frame = usbvision->cur_frame;
27695 image_size = frame->frmwidth * frame->frmheight;
27696 if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
27697 diff -urNp linux-2.6.39.4/drivers/media/video/v4l2-device.c linux-2.6.39.4/drivers/media/video/v4l2-device.c
27698 --- linux-2.6.39.4/drivers/media/video/v4l2-device.c 2011-05-19 00:06:34.000000000 -0400
27699 +++ linux-2.6.39.4/drivers/media/video/v4l2-device.c 2011-08-05 19:44:37.000000000 -0400
27700 @@ -71,9 +71,9 @@ int v4l2_device_put(struct v4l2_device *
27701 EXPORT_SYMBOL_GPL(v4l2_device_put);
27702
27703 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
27704 - atomic_t *instance)
27705 + atomic_unchecked_t *instance)
27706 {
27707 - int num = atomic_inc_return(instance) - 1;
27708 + int num = atomic_inc_return_unchecked(instance) - 1;
27709 int len = strlen(basename);
27710
27711 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
27712 diff -urNp linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c
27713 --- linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c 2011-05-19 00:06:34.000000000 -0400
27714 +++ linux-2.6.39.4/drivers/media/video/videobuf-dma-sg.c 2011-08-05 19:44:37.000000000 -0400
27715 @@ -606,6 +606,8 @@ void *videobuf_sg_alloc(size_t size)
27716 {
27717 struct videobuf_queue q;
27718
27719 + pax_track_stack();
27720 +
27721 /* Required to make generic handler to call __videobuf_alloc */
27722 q.int_ops = &sg_ops;
27723
27724 diff -urNp linux-2.6.39.4/drivers/message/fusion/mptbase.c linux-2.6.39.4/drivers/message/fusion/mptbase.c
27725 --- linux-2.6.39.4/drivers/message/fusion/mptbase.c 2011-05-19 00:06:34.000000000 -0400
27726 +++ linux-2.6.39.4/drivers/message/fusion/mptbase.c 2011-08-05 20:34:06.000000000 -0400
27727 @@ -6683,8 +6683,13 @@ static int mpt_iocinfo_proc_show(struct
27728 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
27729 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
27730
27731 +#ifdef CONFIG_GRKERNSEC_HIDESYM
27732 + seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
27733 +#else
27734 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
27735 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
27736 +#endif
27737 +
27738 /*
27739 * Rounding UP to nearest 4-kB boundary here...
27740 */
27741 diff -urNp linux-2.6.39.4/drivers/message/fusion/mptsas.c linux-2.6.39.4/drivers/message/fusion/mptsas.c
27742 --- linux-2.6.39.4/drivers/message/fusion/mptsas.c 2011-05-19 00:06:34.000000000 -0400
27743 +++ linux-2.6.39.4/drivers/message/fusion/mptsas.c 2011-08-05 19:44:37.000000000 -0400
27744 @@ -439,6 +439,23 @@ mptsas_is_end_device(struct mptsas_devin
27745 return 0;
27746 }
27747
27748 +static inline void
27749 +mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27750 +{
27751 + if (phy_info->port_details) {
27752 + phy_info->port_details->rphy = rphy;
27753 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27754 + ioc->name, rphy));
27755 + }
27756 +
27757 + if (rphy) {
27758 + dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27759 + &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27760 + dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27761 + ioc->name, rphy, rphy->dev.release));
27762 + }
27763 +}
27764 +
27765 /* no mutex */
27766 static void
27767 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
27768 @@ -477,23 +494,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
27769 return NULL;
27770 }
27771
27772 -static inline void
27773 -mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
27774 -{
27775 - if (phy_info->port_details) {
27776 - phy_info->port_details->rphy = rphy;
27777 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
27778 - ioc->name, rphy));
27779 - }
27780 -
27781 - if (rphy) {
27782 - dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
27783 - &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
27784 - dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
27785 - ioc->name, rphy, rphy->dev.release));
27786 - }
27787 -}
27788 -
27789 static inline struct sas_port *
27790 mptsas_get_port(struct mptsas_phyinfo *phy_info)
27791 {
27792 diff -urNp linux-2.6.39.4/drivers/message/fusion/mptscsih.c linux-2.6.39.4/drivers/message/fusion/mptscsih.c
27793 --- linux-2.6.39.4/drivers/message/fusion/mptscsih.c 2011-05-19 00:06:34.000000000 -0400
27794 +++ linux-2.6.39.4/drivers/message/fusion/mptscsih.c 2011-08-05 19:44:37.000000000 -0400
27795 @@ -1268,15 +1268,16 @@ mptscsih_info(struct Scsi_Host *SChost)
27796
27797 h = shost_priv(SChost);
27798
27799 - if (h) {
27800 - if (h->info_kbuf == NULL)
27801 - if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27802 - return h->info_kbuf;
27803 - h->info_kbuf[0] = '\0';
27804 + if (!h)
27805 + return NULL;
27806
27807 - mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27808 - h->info_kbuf[size-1] = '\0';
27809 - }
27810 + if (h->info_kbuf == NULL)
27811 + if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
27812 + return h->info_kbuf;
27813 + h->info_kbuf[0] = '\0';
27814 +
27815 + mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
27816 + h->info_kbuf[size-1] = '\0';
27817
27818 return h->info_kbuf;
27819 }
27820 diff -urNp linux-2.6.39.4/drivers/message/i2o/i2o_config.c linux-2.6.39.4/drivers/message/i2o/i2o_config.c
27821 --- linux-2.6.39.4/drivers/message/i2o/i2o_config.c 2011-05-19 00:06:34.000000000 -0400
27822 +++ linux-2.6.39.4/drivers/message/i2o/i2o_config.c 2011-08-05 19:44:37.000000000 -0400
27823 @@ -781,6 +781,8 @@ static int i2o_cfg_passthru(unsigned lon
27824 struct i2o_message *msg;
27825 unsigned int iop;
27826
27827 + pax_track_stack();
27828 +
27829 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
27830 return -EFAULT;
27831
27832 diff -urNp linux-2.6.39.4/drivers/message/i2o/i2o_proc.c linux-2.6.39.4/drivers/message/i2o/i2o_proc.c
27833 --- linux-2.6.39.4/drivers/message/i2o/i2o_proc.c 2011-05-19 00:06:34.000000000 -0400
27834 +++ linux-2.6.39.4/drivers/message/i2o/i2o_proc.c 2011-08-05 19:44:37.000000000 -0400
27835 @@ -255,13 +255,6 @@ static char *scsi_devices[] = {
27836 "Array Controller Device"
27837 };
27838
27839 -static char *chtostr(u8 * chars, int n)
27840 -{
27841 - char tmp[256];
27842 - tmp[0] = 0;
27843 - return strncat(tmp, (char *)chars, n);
27844 -}
27845 -
27846 static int i2o_report_query_status(struct seq_file *seq, int block_status,
27847 char *group)
27848 {
27849 @@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct
27850
27851 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
27852 seq_printf(seq, "%-#8x", ddm_table.module_id);
27853 - seq_printf(seq, "%-29s",
27854 - chtostr(ddm_table.module_name_version, 28));
27855 + seq_printf(seq, "%-.28s", ddm_table.module_name_version);
27856 seq_printf(seq, "%9d ", ddm_table.data_size);
27857 seq_printf(seq, "%8d", ddm_table.code_size);
27858
27859 @@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s
27860
27861 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
27862 seq_printf(seq, "%-#8x", dst->module_id);
27863 - seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
27864 - seq_printf(seq, "%-9s", chtostr(dst->date, 8));
27865 + seq_printf(seq, "%-.28s", dst->module_name_version);
27866 + seq_printf(seq, "%-.8s", dst->date);
27867 seq_printf(seq, "%8d ", dst->module_size);
27868 seq_printf(seq, "%8d ", dst->mpb_size);
27869 seq_printf(seq, "0x%04x", dst->module_flags);
27870 @@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str
27871 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
27872 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
27873 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
27874 - seq_printf(seq, "Vendor info : %s\n",
27875 - chtostr((u8 *) (work32 + 2), 16));
27876 - seq_printf(seq, "Product info : %s\n",
27877 - chtostr((u8 *) (work32 + 6), 16));
27878 - seq_printf(seq, "Description : %s\n",
27879 - chtostr((u8 *) (work32 + 10), 16));
27880 - seq_printf(seq, "Product rev. : %s\n",
27881 - chtostr((u8 *) (work32 + 14), 8));
27882 + seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
27883 + seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
27884 + seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
27885 + seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
27886
27887 seq_printf(seq, "Serial number : ");
27888 print_serial_number(seq, (u8 *) (work32 + 16),
27889 @@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str
27890 }
27891
27892 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
27893 - seq_printf(seq, "Module name : %s\n",
27894 - chtostr(result.module_name, 24));
27895 - seq_printf(seq, "Module revision : %s\n",
27896 - chtostr(result.module_rev, 8));
27897 + seq_printf(seq, "Module name : %.24s\n", result.module_name);
27898 + seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
27899
27900 seq_printf(seq, "Serial number : ");
27901 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
27902 @@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq
27903 return 0;
27904 }
27905
27906 - seq_printf(seq, "Device name : %s\n",
27907 - chtostr(result.device_name, 64));
27908 - seq_printf(seq, "Service name : %s\n",
27909 - chtostr(result.service_name, 64));
27910 - seq_printf(seq, "Physical name : %s\n",
27911 - chtostr(result.physical_location, 64));
27912 - seq_printf(seq, "Instance number : %s\n",
27913 - chtostr(result.instance_number, 4));
27914 + seq_printf(seq, "Device name : %.64s\n", result.device_name);
27915 + seq_printf(seq, "Service name : %.64s\n", result.service_name);
27916 + seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
27917 + seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
27918
27919 return 0;
27920 }
27921 diff -urNp linux-2.6.39.4/drivers/message/i2o/iop.c linux-2.6.39.4/drivers/message/i2o/iop.c
27922 --- linux-2.6.39.4/drivers/message/i2o/iop.c 2011-05-19 00:06:34.000000000 -0400
27923 +++ linux-2.6.39.4/drivers/message/i2o/iop.c 2011-08-05 19:44:37.000000000 -0400
27924 @@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
27925
27926 spin_lock_irqsave(&c->context_list_lock, flags);
27927
27928 - if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
27929 - atomic_inc(&c->context_list_counter);
27930 + if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
27931 + atomic_inc_unchecked(&c->context_list_counter);
27932
27933 - entry->context = atomic_read(&c->context_list_counter);
27934 + entry->context = atomic_read_unchecked(&c->context_list_counter);
27935
27936 list_add(&entry->list, &c->context_list);
27937
27938 @@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(voi
27939
27940 #if BITS_PER_LONG == 64
27941 spin_lock_init(&c->context_list_lock);
27942 - atomic_set(&c->context_list_counter, 0);
27943 + atomic_set_unchecked(&c->context_list_counter, 0);
27944 INIT_LIST_HEAD(&c->context_list);
27945 #endif
27946
27947 diff -urNp linux-2.6.39.4/drivers/mfd/abx500-core.c linux-2.6.39.4/drivers/mfd/abx500-core.c
27948 --- linux-2.6.39.4/drivers/mfd/abx500-core.c 2011-05-19 00:06:34.000000000 -0400
27949 +++ linux-2.6.39.4/drivers/mfd/abx500-core.c 2011-08-05 20:34:06.000000000 -0400
27950 @@ -14,7 +14,7 @@ static LIST_HEAD(abx500_list);
27951
27952 struct abx500_device_entry {
27953 struct list_head list;
27954 - struct abx500_ops ops;
27955 + abx500_ops_no_const ops;
27956 struct device *dev;
27957 };
27958
27959 diff -urNp linux-2.6.39.4/drivers/mfd/janz-cmodio.c linux-2.6.39.4/drivers/mfd/janz-cmodio.c
27960 --- linux-2.6.39.4/drivers/mfd/janz-cmodio.c 2011-05-19 00:06:34.000000000 -0400
27961 +++ linux-2.6.39.4/drivers/mfd/janz-cmodio.c 2011-08-05 19:44:37.000000000 -0400
27962 @@ -13,6 +13,7 @@
27963
27964 #include <linux/kernel.h>
27965 #include <linux/module.h>
27966 +#include <linux/slab.h>
27967 #include <linux/init.h>
27968 #include <linux/pci.h>
27969 #include <linux/interrupt.h>
27970 diff -urNp linux-2.6.39.4/drivers/mfd/wm8350-i2c.c linux-2.6.39.4/drivers/mfd/wm8350-i2c.c
27971 --- linux-2.6.39.4/drivers/mfd/wm8350-i2c.c 2011-05-19 00:06:34.000000000 -0400
27972 +++ linux-2.6.39.4/drivers/mfd/wm8350-i2c.c 2011-08-05 19:44:37.000000000 -0400
27973 @@ -44,6 +44,8 @@ static int wm8350_i2c_write_device(struc
27974 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
27975 int ret;
27976
27977 + pax_track_stack();
27978 +
27979 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
27980 return -EINVAL;
27981
27982 diff -urNp linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c
27983 --- linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-05-19 00:06:34.000000000 -0400
27984 +++ linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.c 2011-08-05 19:44:37.000000000 -0400
27985 @@ -435,7 +435,7 @@ static irqreturn_t lis302dl_interrupt(in
27986 * the lid is closed. This leads to interrupts as soon as a little move
27987 * is done.
27988 */
27989 - atomic_inc(&lis3_dev.count);
27990 + atomic_inc_unchecked(&lis3_dev.count);
27991
27992 wake_up_interruptible(&lis3_dev.misc_wait);
27993 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
27994 @@ -518,7 +518,7 @@ static int lis3lv02d_misc_open(struct in
27995 if (lis3_dev.pm_dev)
27996 pm_runtime_get_sync(lis3_dev.pm_dev);
27997
27998 - atomic_set(&lis3_dev.count, 0);
27999 + atomic_set_unchecked(&lis3_dev.count, 0);
28000 return 0;
28001 }
28002
28003 @@ -545,7 +545,7 @@ static ssize_t lis3lv02d_misc_read(struc
28004 add_wait_queue(&lis3_dev.misc_wait, &wait);
28005 while (true) {
28006 set_current_state(TASK_INTERRUPTIBLE);
28007 - data = atomic_xchg(&lis3_dev.count, 0);
28008 + data = atomic_xchg_unchecked(&lis3_dev.count, 0);
28009 if (data)
28010 break;
28011
28012 @@ -583,7 +583,7 @@ out:
28013 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
28014 {
28015 poll_wait(file, &lis3_dev.misc_wait, wait);
28016 - if (atomic_read(&lis3_dev.count))
28017 + if (atomic_read_unchecked(&lis3_dev.count))
28018 return POLLIN | POLLRDNORM;
28019 return 0;
28020 }
28021 diff -urNp linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h
28022 --- linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-05-19 00:06:34.000000000 -0400
28023 +++ linux-2.6.39.4/drivers/misc/lis3lv02d/lis3lv02d.h 2011-08-05 19:44:37.000000000 -0400
28024 @@ -265,7 +265,7 @@ struct lis3lv02d {
28025 struct input_polled_dev *idev; /* input device */
28026 struct platform_device *pdev; /* platform device */
28027 struct regulator_bulk_data regulators[2];
28028 - atomic_t count; /* interrupt count after last read */
28029 + atomic_unchecked_t count; /* interrupt count after last read */
28030 union axis_conversion ac; /* hw -> logical axis */
28031 int mapped_btns[3];
28032
28033 diff -urNp linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c
28034 --- linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c 2011-05-19 00:06:34.000000000 -0400
28035 +++ linux-2.6.39.4/drivers/misc/sgi-gru/gruhandles.c 2011-08-05 19:44:37.000000000 -0400
28036 @@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op
28037 unsigned long nsec;
28038
28039 nsec = CLKS2NSEC(clks);
28040 - atomic_long_inc(&mcs_op_statistics[op].count);
28041 - atomic_long_add(nsec, &mcs_op_statistics[op].total);
28042 + atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
28043 + atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
28044 if (mcs_op_statistics[op].max < nsec)
28045 mcs_op_statistics[op].max = nsec;
28046 }
28047 diff -urNp linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c
28048 --- linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c 2011-05-19 00:06:34.000000000 -0400
28049 +++ linux-2.6.39.4/drivers/misc/sgi-gru/gruprocfs.c 2011-08-05 19:44:37.000000000 -0400
28050 @@ -32,9 +32,9 @@
28051
28052 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
28053
28054 -static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
28055 +static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
28056 {
28057 - unsigned long val = atomic_long_read(v);
28058 + unsigned long val = atomic_long_read_unchecked(v);
28059
28060 seq_printf(s, "%16lu %s\n", val, id);
28061 }
28062 @@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se
28063
28064 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
28065 for (op = 0; op < mcsop_last; op++) {
28066 - count = atomic_long_read(&mcs_op_statistics[op].count);
28067 - total = atomic_long_read(&mcs_op_statistics[op].total);
28068 + count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
28069 + total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
28070 max = mcs_op_statistics[op].max;
28071 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
28072 count ? total / count : 0, max);
28073 diff -urNp linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h
28074 --- linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h 2011-05-19 00:06:34.000000000 -0400
28075 +++ linux-2.6.39.4/drivers/misc/sgi-gru/grutables.h 2011-08-05 19:44:37.000000000 -0400
28076 @@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
28077 * GRU statistics.
28078 */
28079 struct gru_stats_s {
28080 - atomic_long_t vdata_alloc;
28081 - atomic_long_t vdata_free;
28082 - atomic_long_t gts_alloc;
28083 - atomic_long_t gts_free;
28084 - atomic_long_t gms_alloc;
28085 - atomic_long_t gms_free;
28086 - atomic_long_t gts_double_allocate;
28087 - atomic_long_t assign_context;
28088 - atomic_long_t assign_context_failed;
28089 - atomic_long_t free_context;
28090 - atomic_long_t load_user_context;
28091 - atomic_long_t load_kernel_context;
28092 - atomic_long_t lock_kernel_context;
28093 - atomic_long_t unlock_kernel_context;
28094 - atomic_long_t steal_user_context;
28095 - atomic_long_t steal_kernel_context;
28096 - atomic_long_t steal_context_failed;
28097 - atomic_long_t nopfn;
28098 - atomic_long_t asid_new;
28099 - atomic_long_t asid_next;
28100 - atomic_long_t asid_wrap;
28101 - atomic_long_t asid_reuse;
28102 - atomic_long_t intr;
28103 - atomic_long_t intr_cbr;
28104 - atomic_long_t intr_tfh;
28105 - atomic_long_t intr_spurious;
28106 - atomic_long_t intr_mm_lock_failed;
28107 - atomic_long_t call_os;
28108 - atomic_long_t call_os_wait_queue;
28109 - atomic_long_t user_flush_tlb;
28110 - atomic_long_t user_unload_context;
28111 - atomic_long_t user_exception;
28112 - atomic_long_t set_context_option;
28113 - atomic_long_t check_context_retarget_intr;
28114 - atomic_long_t check_context_unload;
28115 - atomic_long_t tlb_dropin;
28116 - atomic_long_t tlb_preload_page;
28117 - atomic_long_t tlb_dropin_fail_no_asid;
28118 - atomic_long_t tlb_dropin_fail_upm;
28119 - atomic_long_t tlb_dropin_fail_invalid;
28120 - atomic_long_t tlb_dropin_fail_range_active;
28121 - atomic_long_t tlb_dropin_fail_idle;
28122 - atomic_long_t tlb_dropin_fail_fmm;
28123 - atomic_long_t tlb_dropin_fail_no_exception;
28124 - atomic_long_t tfh_stale_on_fault;
28125 - atomic_long_t mmu_invalidate_range;
28126 - atomic_long_t mmu_invalidate_page;
28127 - atomic_long_t flush_tlb;
28128 - atomic_long_t flush_tlb_gru;
28129 - atomic_long_t flush_tlb_gru_tgh;
28130 - atomic_long_t flush_tlb_gru_zero_asid;
28131 -
28132 - atomic_long_t copy_gpa;
28133 - atomic_long_t read_gpa;
28134 -
28135 - atomic_long_t mesq_receive;
28136 - atomic_long_t mesq_receive_none;
28137 - atomic_long_t mesq_send;
28138 - atomic_long_t mesq_send_failed;
28139 - atomic_long_t mesq_noop;
28140 - atomic_long_t mesq_send_unexpected_error;
28141 - atomic_long_t mesq_send_lb_overflow;
28142 - atomic_long_t mesq_send_qlimit_reached;
28143 - atomic_long_t mesq_send_amo_nacked;
28144 - atomic_long_t mesq_send_put_nacked;
28145 - atomic_long_t mesq_page_overflow;
28146 - atomic_long_t mesq_qf_locked;
28147 - atomic_long_t mesq_qf_noop_not_full;
28148 - atomic_long_t mesq_qf_switch_head_failed;
28149 - atomic_long_t mesq_qf_unexpected_error;
28150 - atomic_long_t mesq_noop_unexpected_error;
28151 - atomic_long_t mesq_noop_lb_overflow;
28152 - atomic_long_t mesq_noop_qlimit_reached;
28153 - atomic_long_t mesq_noop_amo_nacked;
28154 - atomic_long_t mesq_noop_put_nacked;
28155 - atomic_long_t mesq_noop_page_overflow;
28156 + atomic_long_unchecked_t vdata_alloc;
28157 + atomic_long_unchecked_t vdata_free;
28158 + atomic_long_unchecked_t gts_alloc;
28159 + atomic_long_unchecked_t gts_free;
28160 + atomic_long_unchecked_t gms_alloc;
28161 + atomic_long_unchecked_t gms_free;
28162 + atomic_long_unchecked_t gts_double_allocate;
28163 + atomic_long_unchecked_t assign_context;
28164 + atomic_long_unchecked_t assign_context_failed;
28165 + atomic_long_unchecked_t free_context;
28166 + atomic_long_unchecked_t load_user_context;
28167 + atomic_long_unchecked_t load_kernel_context;
28168 + atomic_long_unchecked_t lock_kernel_context;
28169 + atomic_long_unchecked_t unlock_kernel_context;
28170 + atomic_long_unchecked_t steal_user_context;
28171 + atomic_long_unchecked_t steal_kernel_context;
28172 + atomic_long_unchecked_t steal_context_failed;
28173 + atomic_long_unchecked_t nopfn;
28174 + atomic_long_unchecked_t asid_new;
28175 + atomic_long_unchecked_t asid_next;
28176 + atomic_long_unchecked_t asid_wrap;
28177 + atomic_long_unchecked_t asid_reuse;
28178 + atomic_long_unchecked_t intr;
28179 + atomic_long_unchecked_t intr_cbr;
28180 + atomic_long_unchecked_t intr_tfh;
28181 + atomic_long_unchecked_t intr_spurious;
28182 + atomic_long_unchecked_t intr_mm_lock_failed;
28183 + atomic_long_unchecked_t call_os;
28184 + atomic_long_unchecked_t call_os_wait_queue;
28185 + atomic_long_unchecked_t user_flush_tlb;
28186 + atomic_long_unchecked_t user_unload_context;
28187 + atomic_long_unchecked_t user_exception;
28188 + atomic_long_unchecked_t set_context_option;
28189 + atomic_long_unchecked_t check_context_retarget_intr;
28190 + atomic_long_unchecked_t check_context_unload;
28191 + atomic_long_unchecked_t tlb_dropin;
28192 + atomic_long_unchecked_t tlb_preload_page;
28193 + atomic_long_unchecked_t tlb_dropin_fail_no_asid;
28194 + atomic_long_unchecked_t tlb_dropin_fail_upm;
28195 + atomic_long_unchecked_t tlb_dropin_fail_invalid;
28196 + atomic_long_unchecked_t tlb_dropin_fail_range_active;
28197 + atomic_long_unchecked_t tlb_dropin_fail_idle;
28198 + atomic_long_unchecked_t tlb_dropin_fail_fmm;
28199 + atomic_long_unchecked_t tlb_dropin_fail_no_exception;
28200 + atomic_long_unchecked_t tfh_stale_on_fault;
28201 + atomic_long_unchecked_t mmu_invalidate_range;
28202 + atomic_long_unchecked_t mmu_invalidate_page;
28203 + atomic_long_unchecked_t flush_tlb;
28204 + atomic_long_unchecked_t flush_tlb_gru;
28205 + atomic_long_unchecked_t flush_tlb_gru_tgh;
28206 + atomic_long_unchecked_t flush_tlb_gru_zero_asid;
28207 +
28208 + atomic_long_unchecked_t copy_gpa;
28209 + atomic_long_unchecked_t read_gpa;
28210 +
28211 + atomic_long_unchecked_t mesq_receive;
28212 + atomic_long_unchecked_t mesq_receive_none;
28213 + atomic_long_unchecked_t mesq_send;
28214 + atomic_long_unchecked_t mesq_send_failed;
28215 + atomic_long_unchecked_t mesq_noop;
28216 + atomic_long_unchecked_t mesq_send_unexpected_error;
28217 + atomic_long_unchecked_t mesq_send_lb_overflow;
28218 + atomic_long_unchecked_t mesq_send_qlimit_reached;
28219 + atomic_long_unchecked_t mesq_send_amo_nacked;
28220 + atomic_long_unchecked_t mesq_send_put_nacked;
28221 + atomic_long_unchecked_t mesq_page_overflow;
28222 + atomic_long_unchecked_t mesq_qf_locked;
28223 + atomic_long_unchecked_t mesq_qf_noop_not_full;
28224 + atomic_long_unchecked_t mesq_qf_switch_head_failed;
28225 + atomic_long_unchecked_t mesq_qf_unexpected_error;
28226 + atomic_long_unchecked_t mesq_noop_unexpected_error;
28227 + atomic_long_unchecked_t mesq_noop_lb_overflow;
28228 + atomic_long_unchecked_t mesq_noop_qlimit_reached;
28229 + atomic_long_unchecked_t mesq_noop_amo_nacked;
28230 + atomic_long_unchecked_t mesq_noop_put_nacked;
28231 + atomic_long_unchecked_t mesq_noop_page_overflow;
28232
28233 };
28234
28235 @@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start
28236 tghop_invalidate, mcsop_last};
28237
28238 struct mcs_op_statistic {
28239 - atomic_long_t count;
28240 - atomic_long_t total;
28241 + atomic_long_unchecked_t count;
28242 + atomic_long_unchecked_t total;
28243 unsigned long max;
28244 };
28245
28246 @@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st
28247
28248 #define STAT(id) do { \
28249 if (gru_options & OPT_STATS) \
28250 - atomic_long_inc(&gru_stats.id); \
28251 + atomic_long_inc_unchecked(&gru_stats.id); \
28252 } while (0)
28253
28254 #ifdef CONFIG_SGI_GRU_DEBUG
28255 diff -urNp linux-2.6.39.4/drivers/misc/sgi-xp/xp.h linux-2.6.39.4/drivers/misc/sgi-xp/xp.h
28256 --- linux-2.6.39.4/drivers/misc/sgi-xp/xp.h 2011-05-19 00:06:34.000000000 -0400
28257 +++ linux-2.6.39.4/drivers/misc/sgi-xp/xp.h 2011-08-05 20:34:06.000000000 -0400
28258 @@ -289,7 +289,7 @@ struct xpc_interface {
28259 xpc_notify_func, void *);
28260 void (*received) (short, int, void *);
28261 enum xp_retval (*partid_to_nasids) (short, void *);
28262 -};
28263 +} __no_const;
28264
28265 extern struct xpc_interface xpc_interface;
28266
28267 diff -urNp linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c
28268 --- linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-19 00:06:34.000000000 -0400
28269 +++ linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0001.c 2011-08-05 19:44:37.000000000 -0400
28270 @@ -757,6 +757,8 @@ static int chip_ready (struct map_info *
28271 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
28272 unsigned long timeo = jiffies + HZ;
28273
28274 + pax_track_stack();
28275 +
28276 /* Prevent setting state FL_SYNCING for chip in suspended state. */
28277 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
28278 goto sleep;
28279 @@ -1657,6 +1659,8 @@ static int __xipram do_write_buffer(stru
28280 unsigned long initial_adr;
28281 int initial_len = len;
28282
28283 + pax_track_stack();
28284 +
28285 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
28286 adr += chip->start;
28287 initial_adr = adr;
28288 @@ -1875,6 +1879,8 @@ static int __xipram do_erase_oneblock(st
28289 int retries = 3;
28290 int ret;
28291
28292 + pax_track_stack();
28293 +
28294 adr += chip->start;
28295
28296 retry:
28297 diff -urNp linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c
28298 --- linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-19 00:06:34.000000000 -0400
28299 +++ linux-2.6.39.4/drivers/mtd/chips/cfi_cmdset_0020.c 2011-08-05 19:44:37.000000000 -0400
28300 @@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
28301 unsigned long cmd_addr;
28302 struct cfi_private *cfi = map->fldrv_priv;
28303
28304 + pax_track_stack();
28305 +
28306 adr += chip->start;
28307
28308 /* Ensure cmd read/writes are aligned. */
28309 @@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
28310 DECLARE_WAITQUEUE(wait, current);
28311 int wbufsize, z;
28312
28313 + pax_track_stack();
28314 +
28315 /* M58LW064A requires bus alignment for buffer wriets -- saw */
28316 if (adr & (map_bankwidth(map)-1))
28317 return -EINVAL;
28318 @@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
28319 DECLARE_WAITQUEUE(wait, current);
28320 int ret = 0;
28321
28322 + pax_track_stack();
28323 +
28324 adr += chip->start;
28325
28326 /* Let's determine this according to the interleave only once */
28327 @@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
28328 unsigned long timeo = jiffies + HZ;
28329 DECLARE_WAITQUEUE(wait, current);
28330
28331 + pax_track_stack();
28332 +
28333 adr += chip->start;
28334
28335 /* Let's determine this according to the interleave only once */
28336 @@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
28337 unsigned long timeo = jiffies + HZ;
28338 DECLARE_WAITQUEUE(wait, current);
28339
28340 + pax_track_stack();
28341 +
28342 adr += chip->start;
28343
28344 /* Let's determine this according to the interleave only once */
28345 diff -urNp linux-2.6.39.4/drivers/mtd/devices/doc2000.c linux-2.6.39.4/drivers/mtd/devices/doc2000.c
28346 --- linux-2.6.39.4/drivers/mtd/devices/doc2000.c 2011-05-19 00:06:34.000000000 -0400
28347 +++ linux-2.6.39.4/drivers/mtd/devices/doc2000.c 2011-08-05 19:44:37.000000000 -0400
28348 @@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
28349
28350 /* The ECC will not be calculated correctly if less than 512 is written */
28351 /* DBB-
28352 - if (len != 0x200 && eccbuf)
28353 + if (len != 0x200)
28354 printk(KERN_WARNING
28355 "ECC needs a full sector write (adr: %lx size %lx)\n",
28356 (long) to, (long) len);
28357 diff -urNp linux-2.6.39.4/drivers/mtd/devices/doc2001.c linux-2.6.39.4/drivers/mtd/devices/doc2001.c
28358 --- linux-2.6.39.4/drivers/mtd/devices/doc2001.c 2011-05-19 00:06:34.000000000 -0400
28359 +++ linux-2.6.39.4/drivers/mtd/devices/doc2001.c 2011-08-05 19:44:37.000000000 -0400
28360 @@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
28361 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
28362
28363 /* Don't allow read past end of device */
28364 - if (from >= this->totlen)
28365 + if (from >= this->totlen || !len)
28366 return -EINVAL;
28367
28368 /* Don't allow a single read to cross a 512-byte block boundary */
28369 diff -urNp linux-2.6.39.4/drivers/mtd/ftl.c linux-2.6.39.4/drivers/mtd/ftl.c
28370 --- linux-2.6.39.4/drivers/mtd/ftl.c 2011-05-19 00:06:34.000000000 -0400
28371 +++ linux-2.6.39.4/drivers/mtd/ftl.c 2011-08-05 19:44:37.000000000 -0400
28372 @@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
28373 loff_t offset;
28374 uint16_t srcunitswap = cpu_to_le16(srcunit);
28375
28376 + pax_track_stack();
28377 +
28378 eun = &part->EUNInfo[srcunit];
28379 xfer = &part->XferInfo[xferunit];
28380 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
28381 diff -urNp linux-2.6.39.4/drivers/mtd/inftlcore.c linux-2.6.39.4/drivers/mtd/inftlcore.c
28382 --- linux-2.6.39.4/drivers/mtd/inftlcore.c 2011-05-19 00:06:34.000000000 -0400
28383 +++ linux-2.6.39.4/drivers/mtd/inftlcore.c 2011-08-05 19:44:37.000000000 -0400
28384 @@ -259,6 +259,8 @@ static u16 INFTL_foldchain(struct INFTLr
28385 struct inftl_oob oob;
28386 size_t retlen;
28387
28388 + pax_track_stack();
28389 +
28390 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
28391 "pending=%d)\n", inftl, thisVUC, pendingblock);
28392
28393 diff -urNp linux-2.6.39.4/drivers/mtd/inftlmount.c linux-2.6.39.4/drivers/mtd/inftlmount.c
28394 --- linux-2.6.39.4/drivers/mtd/inftlmount.c 2011-05-19 00:06:34.000000000 -0400
28395 +++ linux-2.6.39.4/drivers/mtd/inftlmount.c 2011-08-05 19:44:37.000000000 -0400
28396 @@ -53,6 +53,8 @@ static int find_boot_record(struct INFTL
28397 struct INFTLPartition *ip;
28398 size_t retlen;
28399
28400 + pax_track_stack();
28401 +
28402 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
28403
28404 /*
28405 diff -urNp linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c
28406 --- linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c 2011-05-19 00:06:34.000000000 -0400
28407 +++ linux-2.6.39.4/drivers/mtd/lpddr/qinfo_probe.c 2011-08-05 19:44:37.000000000 -0400
28408 @@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
28409 {
28410 map_word pfow_val[4];
28411
28412 + pax_track_stack();
28413 +
28414 /* Check identification string */
28415 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
28416 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
28417 diff -urNp linux-2.6.39.4/drivers/mtd/mtdchar.c linux-2.6.39.4/drivers/mtd/mtdchar.c
28418 --- linux-2.6.39.4/drivers/mtd/mtdchar.c 2011-05-19 00:06:34.000000000 -0400
28419 +++ linux-2.6.39.4/drivers/mtd/mtdchar.c 2011-08-05 19:44:37.000000000 -0400
28420 @@ -560,6 +560,8 @@ static int mtd_ioctl(struct file *file,
28421 u_long size;
28422 struct mtd_info_user info;
28423
28424 + pax_track_stack();
28425 +
28426 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
28427
28428 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
28429 diff -urNp linux-2.6.39.4/drivers/mtd/nand/denali.c linux-2.6.39.4/drivers/mtd/nand/denali.c
28430 --- linux-2.6.39.4/drivers/mtd/nand/denali.c 2011-05-19 00:06:34.000000000 -0400
28431 +++ linux-2.6.39.4/drivers/mtd/nand/denali.c 2011-08-05 19:44:37.000000000 -0400
28432 @@ -25,6 +25,7 @@
28433 #include <linux/pci.h>
28434 #include <linux/mtd/mtd.h>
28435 #include <linux/module.h>
28436 +#include <linux/slab.h>
28437
28438 #include "denali.h"
28439
28440 diff -urNp linux-2.6.39.4/drivers/mtd/nftlcore.c linux-2.6.39.4/drivers/mtd/nftlcore.c
28441 --- linux-2.6.39.4/drivers/mtd/nftlcore.c 2011-05-19 00:06:34.000000000 -0400
28442 +++ linux-2.6.39.4/drivers/mtd/nftlcore.c 2011-08-05 19:44:37.000000000 -0400
28443 @@ -264,6 +264,8 @@ static u16 NFTL_foldchain (struct NFTLre
28444 int inplace = 1;
28445 size_t retlen;
28446
28447 + pax_track_stack();
28448 +
28449 memset(BlockMap, 0xff, sizeof(BlockMap));
28450 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
28451
28452 diff -urNp linux-2.6.39.4/drivers/mtd/nftlmount.c linux-2.6.39.4/drivers/mtd/nftlmount.c
28453 --- linux-2.6.39.4/drivers/mtd/nftlmount.c 2011-05-19 00:06:34.000000000 -0400
28454 +++ linux-2.6.39.4/drivers/mtd/nftlmount.c 2011-08-05 19:44:37.000000000 -0400
28455 @@ -24,6 +24,7 @@
28456 #include <asm/errno.h>
28457 #include <linux/delay.h>
28458 #include <linux/slab.h>
28459 +#include <linux/sched.h>
28460 #include <linux/mtd/mtd.h>
28461 #include <linux/mtd/nand.h>
28462 #include <linux/mtd/nftl.h>
28463 @@ -45,6 +46,8 @@ static int find_boot_record(struct NFTLr
28464 struct mtd_info *mtd = nftl->mbd.mtd;
28465 unsigned int i;
28466
28467 + pax_track_stack();
28468 +
28469 /* Assume logical EraseSize == physical erasesize for starting the scan.
28470 We'll sort it out later if we find a MediaHeader which says otherwise */
28471 /* Actually, we won't. The new DiskOnChip driver has already scanned
28472 diff -urNp linux-2.6.39.4/drivers/mtd/ubi/build.c linux-2.6.39.4/drivers/mtd/ubi/build.c
28473 --- linux-2.6.39.4/drivers/mtd/ubi/build.c 2011-05-19 00:06:34.000000000 -0400
28474 +++ linux-2.6.39.4/drivers/mtd/ubi/build.c 2011-08-05 19:44:37.000000000 -0400
28475 @@ -1287,7 +1287,7 @@ module_exit(ubi_exit);
28476 static int __init bytes_str_to_int(const char *str)
28477 {
28478 char *endp;
28479 - unsigned long result;
28480 + unsigned long result, scale = 1;
28481
28482 result = simple_strtoul(str, &endp, 0);
28483 if (str == endp || result >= INT_MAX) {
28484 @@ -1298,11 +1298,11 @@ static int __init bytes_str_to_int(const
28485
28486 switch (*endp) {
28487 case 'G':
28488 - result *= 1024;
28489 + scale *= 1024;
28490 case 'M':
28491 - result *= 1024;
28492 + scale *= 1024;
28493 case 'K':
28494 - result *= 1024;
28495 + scale *= 1024;
28496 if (endp[1] == 'i' && endp[2] == 'B')
28497 endp += 2;
28498 case '\0':
28499 @@ -1313,7 +1313,13 @@ static int __init bytes_str_to_int(const
28500 return -EINVAL;
28501 }
28502
28503 - return result;
28504 + if ((intoverflow_t)result*scale >= INT_MAX) {
28505 + printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
28506 + str);
28507 + return -EINVAL;
28508 + }
28509 +
28510 + return result*scale;
28511 }
28512
28513 /**
28514 diff -urNp linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c
28515 --- linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c 2011-05-19 00:06:34.000000000 -0400
28516 +++ linux-2.6.39.4/drivers/net/bna/bfa_ioc_ct.c 2011-08-05 20:34:06.000000000 -0400
28517 @@ -48,7 +48,21 @@ static void bfa_ioc_ct_sync_ack(struct b
28518 static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
28519 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
28520
28521 -static struct bfa_ioc_hwif nw_hwif_ct;
28522 +static struct bfa_ioc_hwif nw_hwif_ct = {
28523 + .ioc_pll_init = bfa_ioc_ct_pll_init,
28524 + .ioc_firmware_lock = bfa_ioc_ct_firmware_lock,
28525 + .ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock,
28526 + .ioc_reg_init = bfa_ioc_ct_reg_init,
28527 + .ioc_map_port = bfa_ioc_ct_map_port,
28528 + .ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set,
28529 + .ioc_notify_fail = bfa_ioc_ct_notify_fail,
28530 + .ioc_ownership_reset = bfa_ioc_ct_ownership_reset,
28531 + .ioc_sync_start = bfa_ioc_ct_sync_start,
28532 + .ioc_sync_join = bfa_ioc_ct_sync_join,
28533 + .ioc_sync_leave = bfa_ioc_ct_sync_leave,
28534 + .ioc_sync_ack = bfa_ioc_ct_sync_ack,
28535 + .ioc_sync_complete = bfa_ioc_ct_sync_complete
28536 +};
28537
28538 /**
28539 * Called from bfa_ioc_attach() to map asic specific calls.
28540 @@ -56,20 +70,6 @@ static struct bfa_ioc_hwif nw_hwif_ct;
28541 void
28542 bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
28543 {
28544 - nw_hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
28545 - nw_hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
28546 - nw_hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
28547 - nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
28548 - nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
28549 - nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
28550 - nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
28551 - nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
28552 - nw_hwif_ct.ioc_sync_start = bfa_ioc_ct_sync_start;
28553 - nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
28554 - nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
28555 - nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
28556 - nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
28557 -
28558 ioc->ioc_hwif = &nw_hwif_ct;
28559 }
28560
28561 diff -urNp linux-2.6.39.4/drivers/net/bna/bnad.c linux-2.6.39.4/drivers/net/bna/bnad.c
28562 --- linux-2.6.39.4/drivers/net/bna/bnad.c 2011-05-19 00:06:34.000000000 -0400
28563 +++ linux-2.6.39.4/drivers/net/bna/bnad.c 2011-08-05 20:34:06.000000000 -0400
28564 @@ -1681,7 +1681,14 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28565 struct bna_intr_info *intr_info =
28566 &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
28567 struct bna_tx_config *tx_config = &bnad->tx_config[tx_id];
28568 - struct bna_tx_event_cbfn tx_cbfn;
28569 + static struct bna_tx_event_cbfn tx_cbfn = {
28570 + /* Initialize the tx event handlers */
28571 + .tcb_setup_cbfn = bnad_cb_tcb_setup,
28572 + .tcb_destroy_cbfn = bnad_cb_tcb_destroy,
28573 + .tx_stall_cbfn = bnad_cb_tx_stall,
28574 + .tx_resume_cbfn = bnad_cb_tx_resume,
28575 + .tx_cleanup_cbfn = bnad_cb_tx_cleanup
28576 + };
28577 struct bna_tx *tx;
28578 unsigned long flags;
28579
28580 @@ -1690,13 +1697,6 @@ bnad_setup_tx(struct bnad *bnad, uint tx
28581 tx_config->txq_depth = bnad->txq_depth;
28582 tx_config->tx_type = BNA_TX_T_REGULAR;
28583
28584 - /* Initialize the tx event handlers */
28585 - tx_cbfn.tcb_setup_cbfn = bnad_cb_tcb_setup;
28586 - tx_cbfn.tcb_destroy_cbfn = bnad_cb_tcb_destroy;
28587 - tx_cbfn.tx_stall_cbfn = bnad_cb_tx_stall;
28588 - tx_cbfn.tx_resume_cbfn = bnad_cb_tx_resume;
28589 - tx_cbfn.tx_cleanup_cbfn = bnad_cb_tx_cleanup;
28590 -
28591 /* Get BNA's resource requirement for one tx object */
28592 spin_lock_irqsave(&bnad->bna_lock, flags);
28593 bna_tx_res_req(bnad->num_txq_per_tx,
28594 @@ -1827,21 +1827,21 @@ bnad_setup_rx(struct bnad *bnad, uint rx
28595 struct bna_intr_info *intr_info =
28596 &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
28597 struct bna_rx_config *rx_config = &bnad->rx_config[rx_id];
28598 - struct bna_rx_event_cbfn rx_cbfn;
28599 + static struct bna_rx_event_cbfn rx_cbfn = {
28600 + /* Initialize the Rx event handlers */
28601 + .rcb_setup_cbfn = bnad_cb_rcb_setup,
28602 + .rcb_destroy_cbfn = bnad_cb_rcb_destroy,
28603 + .ccb_setup_cbfn = bnad_cb_ccb_setup,
28604 + .ccb_destroy_cbfn = bnad_cb_ccb_destroy,
28605 + .rx_cleanup_cbfn = bnad_cb_rx_cleanup,
28606 + .rx_post_cbfn = bnad_cb_rx_post
28607 + };
28608 struct bna_rx *rx;
28609 unsigned long flags;
28610
28611 /* Initialize the Rx object configuration */
28612 bnad_init_rx_config(bnad, rx_config);
28613
28614 - /* Initialize the Rx event handlers */
28615 - rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
28616 - rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
28617 - rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
28618 - rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
28619 - rx_cbfn.rx_cleanup_cbfn = bnad_cb_rx_cleanup;
28620 - rx_cbfn.rx_post_cbfn = bnad_cb_rx_post;
28621 -
28622 /* Get BNA's resource requirement for one Rx object */
28623 spin_lock_irqsave(&bnad->bna_lock, flags);
28624 bna_rx_res_req(rx_config, res_info);
28625 diff -urNp linux-2.6.39.4/drivers/net/bnx2.c linux-2.6.39.4/drivers/net/bnx2.c
28626 --- linux-2.6.39.4/drivers/net/bnx2.c 2011-05-19 00:06:34.000000000 -0400
28627 +++ linux-2.6.39.4/drivers/net/bnx2.c 2011-08-05 19:44:37.000000000 -0400
28628 @@ -5828,6 +5828,8 @@ bnx2_test_nvram(struct bnx2 *bp)
28629 int rc = 0;
28630 u32 magic, csum;
28631
28632 + pax_track_stack();
28633 +
28634 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
28635 goto test_nvram_done;
28636
28637 diff -urNp linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c
28638 --- linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-05-19 00:06:34.000000000 -0400
28639 +++ linux-2.6.39.4/drivers/net/bnx2x/bnx2x_ethtool.c 2011-08-05 19:44:37.000000000 -0400
28640 @@ -1788,6 +1788,8 @@ static int bnx2x_test_nvram(struct bnx2x
28641 int i, rc;
28642 u32 magic, crc;
28643
28644 + pax_track_stack();
28645 +
28646 if (BP_NOMCP(bp))
28647 return 0;
28648
28649 diff -urNp linux-2.6.39.4/drivers/net/cxgb3/l2t.h linux-2.6.39.4/drivers/net/cxgb3/l2t.h
28650 --- linux-2.6.39.4/drivers/net/cxgb3/l2t.h 2011-05-19 00:06:34.000000000 -0400
28651 +++ linux-2.6.39.4/drivers/net/cxgb3/l2t.h 2011-08-05 20:34:06.000000000 -0400
28652 @@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
28653 */
28654 struct l2t_skb_cb {
28655 arp_failure_handler_func arp_failure_handler;
28656 -};
28657 +} __no_const;
28658
28659 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
28660
28661 diff -urNp linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c
28662 --- linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c 2011-05-19 00:06:34.000000000 -0400
28663 +++ linux-2.6.39.4/drivers/net/cxgb4/cxgb4_main.c 2011-08-05 19:44:37.000000000 -0400
28664 @@ -3428,6 +3428,8 @@ static int __devinit enable_msix(struct
28665 unsigned int nchan = adap->params.nports;
28666 struct msix_entry entries[MAX_INGQ + 1];
28667
28668 + pax_track_stack();
28669 +
28670 for (i = 0; i < ARRAY_SIZE(entries); ++i)
28671 entries[i].entry = i;
28672
28673 diff -urNp linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c
28674 --- linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c 2011-05-19 00:06:34.000000000 -0400
28675 +++ linux-2.6.39.4/drivers/net/cxgb4/t4_hw.c 2011-08-05 19:44:37.000000000 -0400
28676 @@ -362,6 +362,8 @@ static int get_vpd_params(struct adapter
28677 u8 vpd[VPD_LEN], csum;
28678 unsigned int vpdr_len, kw_offset, id_len;
28679
28680 + pax_track_stack();
28681 +
28682 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
28683 if (ret < 0)
28684 return ret;
28685 diff -urNp linux-2.6.39.4/drivers/net/e1000e/82571.c linux-2.6.39.4/drivers/net/e1000e/82571.c
28686 --- linux-2.6.39.4/drivers/net/e1000e/82571.c 2011-05-19 00:06:34.000000000 -0400
28687 +++ linux-2.6.39.4/drivers/net/e1000e/82571.c 2011-08-05 20:34:06.000000000 -0400
28688 @@ -239,7 +239,7 @@ static s32 e1000_init_mac_params_82571(s
28689 {
28690 struct e1000_hw *hw = &adapter->hw;
28691 struct e1000_mac_info *mac = &hw->mac;
28692 - struct e1000_mac_operations *func = &mac->ops;
28693 + e1000_mac_operations_no_const *func = &mac->ops;
28694 u32 swsm = 0;
28695 u32 swsm2 = 0;
28696 bool force_clear_smbi = false;
28697 diff -urNp linux-2.6.39.4/drivers/net/e1000e/es2lan.c linux-2.6.39.4/drivers/net/e1000e/es2lan.c
28698 --- linux-2.6.39.4/drivers/net/e1000e/es2lan.c 2011-05-19 00:06:34.000000000 -0400
28699 +++ linux-2.6.39.4/drivers/net/e1000e/es2lan.c 2011-08-05 20:34:06.000000000 -0400
28700 @@ -205,7 +205,7 @@ static s32 e1000_init_mac_params_80003es
28701 {
28702 struct e1000_hw *hw = &adapter->hw;
28703 struct e1000_mac_info *mac = &hw->mac;
28704 - struct e1000_mac_operations *func = &mac->ops;
28705 + e1000_mac_operations_no_const *func = &mac->ops;
28706
28707 /* Set media type */
28708 switch (adapter->pdev->device) {
28709 diff -urNp linux-2.6.39.4/drivers/net/e1000e/hw.h linux-2.6.39.4/drivers/net/e1000e/hw.h
28710 --- linux-2.6.39.4/drivers/net/e1000e/hw.h 2011-05-19 00:06:34.000000000 -0400
28711 +++ linux-2.6.39.4/drivers/net/e1000e/hw.h 2011-08-05 20:34:06.000000000 -0400
28712 @@ -775,6 +775,7 @@ struct e1000_mac_operations {
28713 void (*write_vfta)(struct e1000_hw *, u32, u32);
28714 s32 (*read_mac_addr)(struct e1000_hw *);
28715 };
28716 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28717
28718 /* Function pointers for the PHY. */
28719 struct e1000_phy_operations {
28720 @@ -798,6 +799,7 @@ struct e1000_phy_operations {
28721 void (*power_up)(struct e1000_hw *);
28722 void (*power_down)(struct e1000_hw *);
28723 };
28724 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28725
28726 /* Function pointers for the NVM. */
28727 struct e1000_nvm_operations {
28728 @@ -809,9 +811,10 @@ struct e1000_nvm_operations {
28729 s32 (*validate)(struct e1000_hw *);
28730 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
28731 };
28732 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28733
28734 struct e1000_mac_info {
28735 - struct e1000_mac_operations ops;
28736 + e1000_mac_operations_no_const ops;
28737 u8 addr[ETH_ALEN];
28738 u8 perm_addr[ETH_ALEN];
28739
28740 @@ -852,7 +855,7 @@ struct e1000_mac_info {
28741 };
28742
28743 struct e1000_phy_info {
28744 - struct e1000_phy_operations ops;
28745 + e1000_phy_operations_no_const ops;
28746
28747 enum e1000_phy_type type;
28748
28749 @@ -886,7 +889,7 @@ struct e1000_phy_info {
28750 };
28751
28752 struct e1000_nvm_info {
28753 - struct e1000_nvm_operations ops;
28754 + e1000_nvm_operations_no_const ops;
28755
28756 enum e1000_nvm_type type;
28757 enum e1000_nvm_override override;
28758 diff -urNp linux-2.6.39.4/drivers/net/hamradio/6pack.c linux-2.6.39.4/drivers/net/hamradio/6pack.c
28759 --- linux-2.6.39.4/drivers/net/hamradio/6pack.c 2011-07-09 09:18:51.000000000 -0400
28760 +++ linux-2.6.39.4/drivers/net/hamradio/6pack.c 2011-08-05 19:44:37.000000000 -0400
28761 @@ -463,6 +463,8 @@ static void sixpack_receive_buf(struct t
28762 unsigned char buf[512];
28763 int count1;
28764
28765 + pax_track_stack();
28766 +
28767 if (!count)
28768 return;
28769
28770 diff -urNp linux-2.6.39.4/drivers/net/igb/e1000_hw.h linux-2.6.39.4/drivers/net/igb/e1000_hw.h
28771 --- linux-2.6.39.4/drivers/net/igb/e1000_hw.h 2011-05-19 00:06:34.000000000 -0400
28772 +++ linux-2.6.39.4/drivers/net/igb/e1000_hw.h 2011-08-05 20:34:06.000000000 -0400
28773 @@ -314,6 +314,7 @@ struct e1000_mac_operations {
28774 s32 (*read_mac_addr)(struct e1000_hw *);
28775 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
28776 };
28777 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28778
28779 struct e1000_phy_operations {
28780 s32 (*acquire)(struct e1000_hw *);
28781 @@ -330,6 +331,7 @@ struct e1000_phy_operations {
28782 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
28783 s32 (*write_reg)(struct e1000_hw *, u32, u16);
28784 };
28785 +typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
28786
28787 struct e1000_nvm_operations {
28788 s32 (*acquire)(struct e1000_hw *);
28789 @@ -339,6 +341,7 @@ struct e1000_nvm_operations {
28790 s32 (*update)(struct e1000_hw *);
28791 s32 (*validate)(struct e1000_hw *);
28792 };
28793 +typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
28794
28795 struct e1000_info {
28796 s32 (*get_invariants)(struct e1000_hw *);
28797 @@ -350,7 +353,7 @@ struct e1000_info {
28798 extern const struct e1000_info e1000_82575_info;
28799
28800 struct e1000_mac_info {
28801 - struct e1000_mac_operations ops;
28802 + e1000_mac_operations_no_const ops;
28803
28804 u8 addr[6];
28805 u8 perm_addr[6];
28806 @@ -388,7 +391,7 @@ struct e1000_mac_info {
28807 };
28808
28809 struct e1000_phy_info {
28810 - struct e1000_phy_operations ops;
28811 + e1000_phy_operations_no_const ops;
28812
28813 enum e1000_phy_type type;
28814
28815 @@ -423,7 +426,7 @@ struct e1000_phy_info {
28816 };
28817
28818 struct e1000_nvm_info {
28819 - struct e1000_nvm_operations ops;
28820 + e1000_nvm_operations_no_const ops;
28821 enum e1000_nvm_type type;
28822 enum e1000_nvm_override override;
28823
28824 @@ -468,6 +471,7 @@ struct e1000_mbx_operations {
28825 s32 (*check_for_ack)(struct e1000_hw *, u16);
28826 s32 (*check_for_rst)(struct e1000_hw *, u16);
28827 };
28828 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28829
28830 struct e1000_mbx_stats {
28831 u32 msgs_tx;
28832 @@ -479,7 +483,7 @@ struct e1000_mbx_stats {
28833 };
28834
28835 struct e1000_mbx_info {
28836 - struct e1000_mbx_operations ops;
28837 + e1000_mbx_operations_no_const ops;
28838 struct e1000_mbx_stats stats;
28839 u32 timeout;
28840 u32 usec_delay;
28841 diff -urNp linux-2.6.39.4/drivers/net/igbvf/vf.h linux-2.6.39.4/drivers/net/igbvf/vf.h
28842 --- linux-2.6.39.4/drivers/net/igbvf/vf.h 2011-05-19 00:06:34.000000000 -0400
28843 +++ linux-2.6.39.4/drivers/net/igbvf/vf.h 2011-08-05 20:34:06.000000000 -0400
28844 @@ -189,9 +189,10 @@ struct e1000_mac_operations {
28845 s32 (*read_mac_addr)(struct e1000_hw *);
28846 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
28847 };
28848 +typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
28849
28850 struct e1000_mac_info {
28851 - struct e1000_mac_operations ops;
28852 + e1000_mac_operations_no_const ops;
28853 u8 addr[6];
28854 u8 perm_addr[6];
28855
28856 @@ -213,6 +214,7 @@ struct e1000_mbx_operations {
28857 s32 (*check_for_ack)(struct e1000_hw *);
28858 s32 (*check_for_rst)(struct e1000_hw *);
28859 };
28860 +typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
28861
28862 struct e1000_mbx_stats {
28863 u32 msgs_tx;
28864 @@ -224,7 +226,7 @@ struct e1000_mbx_stats {
28865 };
28866
28867 struct e1000_mbx_info {
28868 - struct e1000_mbx_operations ops;
28869 + e1000_mbx_operations_no_const ops;
28870 struct e1000_mbx_stats stats;
28871 u32 timeout;
28872 u32 usec_delay;
28873 diff -urNp linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c
28874 --- linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c 2011-05-19 00:06:34.000000000 -0400
28875 +++ linux-2.6.39.4/drivers/net/ixgb/ixgb_main.c 2011-08-05 19:44:37.000000000 -0400
28876 @@ -1069,6 +1069,8 @@ ixgb_set_multi(struct net_device *netdev
28877 u32 rctl;
28878 int i;
28879
28880 + pax_track_stack();
28881 +
28882 /* Check for Promiscuous and All Multicast modes */
28883
28884 rctl = IXGB_READ_REG(hw, RCTL);
28885 diff -urNp linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c
28886 --- linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c 2011-05-19 00:06:34.000000000 -0400
28887 +++ linux-2.6.39.4/drivers/net/ixgb/ixgb_param.c 2011-08-05 19:44:37.000000000 -0400
28888 @@ -261,6 +261,9 @@ void __devinit
28889 ixgb_check_options(struct ixgb_adapter *adapter)
28890 {
28891 int bd = adapter->bd_number;
28892 +
28893 + pax_track_stack();
28894 +
28895 if (bd >= IXGB_MAX_NIC) {
28896 pr_notice("Warning: no configuration for board #%i\n", bd);
28897 pr_notice("Using defaults for all values\n");
28898 diff -urNp linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h
28899 --- linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h 2011-05-19 00:06:34.000000000 -0400
28900 +++ linux-2.6.39.4/drivers/net/ixgbe/ixgbe_type.h 2011-08-05 20:34:06.000000000 -0400
28901 @@ -2496,6 +2496,7 @@ struct ixgbe_eeprom_operations {
28902 s32 (*update_checksum)(struct ixgbe_hw *);
28903 u16 (*calc_checksum)(struct ixgbe_hw *);
28904 };
28905 +typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
28906
28907 struct ixgbe_mac_operations {
28908 s32 (*init_hw)(struct ixgbe_hw *);
28909 @@ -2551,6 +2552,7 @@ struct ixgbe_mac_operations {
28910 /* Flow Control */
28911 s32 (*fc_enable)(struct ixgbe_hw *, s32);
28912 };
28913 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28914
28915 struct ixgbe_phy_operations {
28916 s32 (*identify)(struct ixgbe_hw *);
28917 @@ -2570,9 +2572,10 @@ struct ixgbe_phy_operations {
28918 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
28919 s32 (*check_overtemp)(struct ixgbe_hw *);
28920 };
28921 +typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
28922
28923 struct ixgbe_eeprom_info {
28924 - struct ixgbe_eeprom_operations ops;
28925 + ixgbe_eeprom_operations_no_const ops;
28926 enum ixgbe_eeprom_type type;
28927 u32 semaphore_delay;
28928 u16 word_size;
28929 @@ -2581,7 +2584,7 @@ struct ixgbe_eeprom_info {
28930
28931 #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01
28932 struct ixgbe_mac_info {
28933 - struct ixgbe_mac_operations ops;
28934 + ixgbe_mac_operations_no_const ops;
28935 enum ixgbe_mac_type type;
28936 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28937 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
28938 @@ -2608,7 +2611,7 @@ struct ixgbe_mac_info {
28939 };
28940
28941 struct ixgbe_phy_info {
28942 - struct ixgbe_phy_operations ops;
28943 + ixgbe_phy_operations_no_const ops;
28944 struct mdio_if_info mdio;
28945 enum ixgbe_phy_type type;
28946 u32 id;
28947 @@ -2636,6 +2639,7 @@ struct ixgbe_mbx_operations {
28948 s32 (*check_for_ack)(struct ixgbe_hw *, u16);
28949 s32 (*check_for_rst)(struct ixgbe_hw *, u16);
28950 };
28951 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28952
28953 struct ixgbe_mbx_stats {
28954 u32 msgs_tx;
28955 @@ -2647,7 +2651,7 @@ struct ixgbe_mbx_stats {
28956 };
28957
28958 struct ixgbe_mbx_info {
28959 - struct ixgbe_mbx_operations ops;
28960 + ixgbe_mbx_operations_no_const ops;
28961 struct ixgbe_mbx_stats stats;
28962 u32 timeout;
28963 u32 usec_delay;
28964 diff -urNp linux-2.6.39.4/drivers/net/ixgbevf/vf.h linux-2.6.39.4/drivers/net/ixgbevf/vf.h
28965 --- linux-2.6.39.4/drivers/net/ixgbevf/vf.h 2011-05-19 00:06:34.000000000 -0400
28966 +++ linux-2.6.39.4/drivers/net/ixgbevf/vf.h 2011-08-05 20:34:06.000000000 -0400
28967 @@ -69,6 +69,7 @@ struct ixgbe_mac_operations {
28968 s32 (*clear_vfta)(struct ixgbe_hw *);
28969 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
28970 };
28971 +typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
28972
28973 enum ixgbe_mac_type {
28974 ixgbe_mac_unknown = 0,
28975 @@ -78,7 +79,7 @@ enum ixgbe_mac_type {
28976 };
28977
28978 struct ixgbe_mac_info {
28979 - struct ixgbe_mac_operations ops;
28980 + ixgbe_mac_operations_no_const ops;
28981 u8 addr[6];
28982 u8 perm_addr[6];
28983
28984 @@ -102,6 +103,7 @@ struct ixgbe_mbx_operations {
28985 s32 (*check_for_ack)(struct ixgbe_hw *);
28986 s32 (*check_for_rst)(struct ixgbe_hw *);
28987 };
28988 +typedef struct ixgbe_mbx_operations __no_const ixgbe_mbx_operations_no_const;
28989
28990 struct ixgbe_mbx_stats {
28991 u32 msgs_tx;
28992 @@ -113,7 +115,7 @@ struct ixgbe_mbx_stats {
28993 };
28994
28995 struct ixgbe_mbx_info {
28996 - struct ixgbe_mbx_operations ops;
28997 + ixgbe_mbx_operations_no_const ops;
28998 struct ixgbe_mbx_stats stats;
28999 u32 timeout;
29000 u32 udelay;
29001 diff -urNp linux-2.6.39.4/drivers/net/ksz884x.c linux-2.6.39.4/drivers/net/ksz884x.c
29002 --- linux-2.6.39.4/drivers/net/ksz884x.c 2011-05-19 00:06:34.000000000 -0400
29003 +++ linux-2.6.39.4/drivers/net/ksz884x.c 2011-08-05 20:34:06.000000000 -0400
29004 @@ -6536,6 +6536,8 @@ static void netdev_get_ethtool_stats(str
29005 int rc;
29006 u64 counter[TOTAL_PORT_COUNTER_NUM];
29007
29008 + pax_track_stack();
29009 +
29010 mutex_lock(&hw_priv->lock);
29011 n = SWITCH_PORT_NUM;
29012 for (i = 0, p = port->first_port; i < port->mib_port_cnt; i++, p++) {
29013 diff -urNp linux-2.6.39.4/drivers/net/mlx4/main.c linux-2.6.39.4/drivers/net/mlx4/main.c
29014 --- linux-2.6.39.4/drivers/net/mlx4/main.c 2011-05-19 00:06:34.000000000 -0400
29015 +++ linux-2.6.39.4/drivers/net/mlx4/main.c 2011-08-05 19:44:37.000000000 -0400
29016 @@ -40,6 +40,7 @@
29017 #include <linux/dma-mapping.h>
29018 #include <linux/slab.h>
29019 #include <linux/io-mapping.h>
29020 +#include <linux/sched.h>
29021
29022 #include <linux/mlx4/device.h>
29023 #include <linux/mlx4/doorbell.h>
29024 @@ -764,6 +765,8 @@ static int mlx4_init_hca(struct mlx4_dev
29025 u64 icm_size;
29026 int err;
29027
29028 + pax_track_stack();
29029 +
29030 err = mlx4_QUERY_FW(dev);
29031 if (err) {
29032 if (err == -EACCES)
29033 diff -urNp linux-2.6.39.4/drivers/net/niu.c linux-2.6.39.4/drivers/net/niu.c
29034 --- linux-2.6.39.4/drivers/net/niu.c 2011-05-19 00:06:34.000000000 -0400
29035 +++ linux-2.6.39.4/drivers/net/niu.c 2011-08-05 19:44:37.000000000 -0400
29036 @@ -9067,6 +9067,8 @@ static void __devinit niu_try_msix(struc
29037 int i, num_irqs, err;
29038 u8 first_ldg;
29039
29040 + pax_track_stack();
29041 +
29042 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
29043 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
29044 ldg_num_map[i] = first_ldg + i;
29045 diff -urNp linux-2.6.39.4/drivers/net/pcnet32.c linux-2.6.39.4/drivers/net/pcnet32.c
29046 --- linux-2.6.39.4/drivers/net/pcnet32.c 2011-05-19 00:06:34.000000000 -0400
29047 +++ linux-2.6.39.4/drivers/net/pcnet32.c 2011-08-05 20:34:06.000000000 -0400
29048 @@ -82,7 +82,7 @@ static int cards_found;
29049 /*
29050 * VLB I/O addresses
29051 */
29052 -static unsigned int pcnet32_portlist[] __initdata =
29053 +static unsigned int pcnet32_portlist[] __devinitdata =
29054 { 0x300, 0x320, 0x340, 0x360, 0 };
29055
29056 static int pcnet32_debug;
29057 @@ -270,7 +270,7 @@ struct pcnet32_private {
29058 struct sk_buff **rx_skbuff;
29059 dma_addr_t *tx_dma_addr;
29060 dma_addr_t *rx_dma_addr;
29061 - struct pcnet32_access a;
29062 + struct pcnet32_access *a;
29063 spinlock_t lock; /* Guard lock */
29064 unsigned int cur_rx, cur_tx; /* The next free ring entry */
29065 unsigned int rx_ring_size; /* current rx ring size */
29066 @@ -460,9 +460,9 @@ static void pcnet32_netif_start(struct n
29067 u16 val;
29068
29069 netif_wake_queue(dev);
29070 - val = lp->a.read_csr(ioaddr, CSR3);
29071 + val = lp->a->read_csr(ioaddr, CSR3);
29072 val &= 0x00ff;
29073 - lp->a.write_csr(ioaddr, CSR3, val);
29074 + lp->a->write_csr(ioaddr, CSR3, val);
29075 napi_enable(&lp->napi);
29076 }
29077
29078 @@ -730,7 +730,7 @@ static u32 pcnet32_get_link(struct net_d
29079 r = mii_link_ok(&lp->mii_if);
29080 } else if (lp->chip_version >= PCNET32_79C970A) {
29081 ulong ioaddr = dev->base_addr; /* card base I/O address */
29082 - r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
29083 + r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
29084 } else { /* can not detect link on really old chips */
29085 r = 1;
29086 }
29087 @@ -792,7 +792,7 @@ static int pcnet32_set_ringparam(struct
29088 pcnet32_netif_stop(dev);
29089
29090 spin_lock_irqsave(&lp->lock, flags);
29091 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29092 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29093
29094 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
29095
29096 @@ -868,7 +868,7 @@ static void pcnet32_ethtool_test(struct
29097 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
29098 {
29099 struct pcnet32_private *lp = netdev_priv(dev);
29100 - struct pcnet32_access *a = &lp->a; /* access to registers */
29101 + struct pcnet32_access *a = lp->a; /* access to registers */
29102 ulong ioaddr = dev->base_addr; /* card base I/O address */
29103 struct sk_buff *skb; /* sk buff */
29104 int x, i; /* counters */
29105 @@ -888,21 +888,21 @@ static int pcnet32_loopback_test(struct
29106 pcnet32_netif_stop(dev);
29107
29108 spin_lock_irqsave(&lp->lock, flags);
29109 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29110 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
29111
29112 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
29113
29114 /* Reset the PCNET32 */
29115 - lp->a.reset(ioaddr);
29116 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29117 + lp->a->reset(ioaddr);
29118 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29119
29120 /* switch pcnet32 to 32bit mode */
29121 - lp->a.write_bcr(ioaddr, 20, 2);
29122 + lp->a->write_bcr(ioaddr, 20, 2);
29123
29124 /* purge & init rings but don't actually restart */
29125 pcnet32_restart(dev, 0x0000);
29126
29127 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29128 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29129
29130 /* Initialize Transmit buffers. */
29131 size = data_len + 15;
29132 @@ -947,10 +947,10 @@ static int pcnet32_loopback_test(struct
29133
29134 /* set int loopback in CSR15 */
29135 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
29136 - lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
29137 + lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
29138
29139 teststatus = cpu_to_le16(0x8000);
29140 - lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
29141 + lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
29142
29143 /* Check status of descriptors */
29144 for (x = 0; x < numbuffs; x++) {
29145 @@ -969,7 +969,7 @@ static int pcnet32_loopback_test(struct
29146 }
29147 }
29148
29149 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29150 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
29151 wmb();
29152 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
29153 netdev_printk(KERN_DEBUG, dev, "RX loopback packets:\n");
29154 @@ -1015,7 +1015,7 @@ clean_up:
29155 pcnet32_restart(dev, CSR0_NORMAL);
29156 } else {
29157 pcnet32_purge_rx_ring(dev);
29158 - lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
29159 + lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
29160 }
29161 spin_unlock_irqrestore(&lp->lock, flags);
29162
29163 @@ -1025,7 +1025,7 @@ clean_up:
29164 static void pcnet32_led_blink_callback(struct net_device *dev)
29165 {
29166 struct pcnet32_private *lp = netdev_priv(dev);
29167 - struct pcnet32_access *a = &lp->a;
29168 + struct pcnet32_access *a = lp->a;
29169 ulong ioaddr = dev->base_addr;
29170 unsigned long flags;
29171 int i;
29172 @@ -1041,7 +1041,7 @@ static void pcnet32_led_blink_callback(s
29173 static int pcnet32_phys_id(struct net_device *dev, u32 data)
29174 {
29175 struct pcnet32_private *lp = netdev_priv(dev);
29176 - struct pcnet32_access *a = &lp->a;
29177 + struct pcnet32_access *a = lp->a;
29178 ulong ioaddr = dev->base_addr;
29179 unsigned long flags;
29180 int i, regs[4];
29181 @@ -1085,7 +1085,7 @@ static int pcnet32_suspend(struct net_de
29182 {
29183 int csr5;
29184 struct pcnet32_private *lp = netdev_priv(dev);
29185 - struct pcnet32_access *a = &lp->a;
29186 + struct pcnet32_access *a = lp->a;
29187 ulong ioaddr = dev->base_addr;
29188 int ticks;
29189
29190 @@ -1342,8 +1342,8 @@ static int pcnet32_poll(struct napi_stru
29191 spin_lock_irqsave(&lp->lock, flags);
29192 if (pcnet32_tx(dev)) {
29193 /* reset the chip to clear the error condition, then restart */
29194 - lp->a.reset(ioaddr);
29195 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29196 + lp->a->reset(ioaddr);
29197 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29198 pcnet32_restart(dev, CSR0_START);
29199 netif_wake_queue(dev);
29200 }
29201 @@ -1355,12 +1355,12 @@ static int pcnet32_poll(struct napi_stru
29202 __napi_complete(napi);
29203
29204 /* clear interrupt masks */
29205 - val = lp->a.read_csr(ioaddr, CSR3);
29206 + val = lp->a->read_csr(ioaddr, CSR3);
29207 val &= 0x00ff;
29208 - lp->a.write_csr(ioaddr, CSR3, val);
29209 + lp->a->write_csr(ioaddr, CSR3, val);
29210
29211 /* Set interrupt enable. */
29212 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
29213 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
29214
29215 spin_unlock_irqrestore(&lp->lock, flags);
29216 }
29217 @@ -1383,7 +1383,7 @@ static void pcnet32_get_regs(struct net_
29218 int i, csr0;
29219 u16 *buff = ptr;
29220 struct pcnet32_private *lp = netdev_priv(dev);
29221 - struct pcnet32_access *a = &lp->a;
29222 + struct pcnet32_access *a = lp->a;
29223 ulong ioaddr = dev->base_addr;
29224 unsigned long flags;
29225
29226 @@ -1419,9 +1419,9 @@ static void pcnet32_get_regs(struct net_
29227 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
29228 if (lp->phymask & (1 << j)) {
29229 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
29230 - lp->a.write_bcr(ioaddr, 33,
29231 + lp->a->write_bcr(ioaddr, 33,
29232 (j << 5) | i);
29233 - *buff++ = lp->a.read_bcr(ioaddr, 34);
29234 + *buff++ = lp->a->read_bcr(ioaddr, 34);
29235 }
29236 }
29237 }
29238 @@ -1803,7 +1803,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29239 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
29240 lp->options |= PCNET32_PORT_FD;
29241
29242 - lp->a = *a;
29243 + lp->a = a;
29244
29245 /* prior to register_netdev, dev->name is not yet correct */
29246 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
29247 @@ -1862,7 +1862,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29248 if (lp->mii) {
29249 /* lp->phycount and lp->phymask are set to 0 by memset above */
29250
29251 - lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29252 + lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
29253 /* scan for PHYs */
29254 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29255 unsigned short id1, id2;
29256 @@ -1882,7 +1882,7 @@ pcnet32_probe1(unsigned long ioaddr, int
29257 pr_info("Found PHY %04x:%04x at address %d\n",
29258 id1, id2, i);
29259 }
29260 - lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29261 + lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
29262 if (lp->phycount > 1)
29263 lp->options |= PCNET32_PORT_MII;
29264 }
29265 @@ -2038,10 +2038,10 @@ static int pcnet32_open(struct net_devic
29266 }
29267
29268 /* Reset the PCNET32 */
29269 - lp->a.reset(ioaddr);
29270 + lp->a->reset(ioaddr);
29271
29272 /* switch pcnet32 to 32bit mode */
29273 - lp->a.write_bcr(ioaddr, 20, 2);
29274 + lp->a->write_bcr(ioaddr, 20, 2);
29275
29276 netif_printk(lp, ifup, KERN_DEBUG, dev,
29277 "%s() irq %d tx/rx rings %#x/%#x init %#x\n",
29278 @@ -2050,14 +2050,14 @@ static int pcnet32_open(struct net_devic
29279 (u32) (lp->init_dma_addr));
29280
29281 /* set/reset autoselect bit */
29282 - val = lp->a.read_bcr(ioaddr, 2) & ~2;
29283 + val = lp->a->read_bcr(ioaddr, 2) & ~2;
29284 if (lp->options & PCNET32_PORT_ASEL)
29285 val |= 2;
29286 - lp->a.write_bcr(ioaddr, 2, val);
29287 + lp->a->write_bcr(ioaddr, 2, val);
29288
29289 /* handle full duplex setting */
29290 if (lp->mii_if.full_duplex) {
29291 - val = lp->a.read_bcr(ioaddr, 9) & ~3;
29292 + val = lp->a->read_bcr(ioaddr, 9) & ~3;
29293 if (lp->options & PCNET32_PORT_FD) {
29294 val |= 1;
29295 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
29296 @@ -2067,14 +2067,14 @@ static int pcnet32_open(struct net_devic
29297 if (lp->chip_version == 0x2627)
29298 val |= 3;
29299 }
29300 - lp->a.write_bcr(ioaddr, 9, val);
29301 + lp->a->write_bcr(ioaddr, 9, val);
29302 }
29303
29304 /* set/reset GPSI bit in test register */
29305 - val = lp->a.read_csr(ioaddr, 124) & ~0x10;
29306 + val = lp->a->read_csr(ioaddr, 124) & ~0x10;
29307 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
29308 val |= 0x10;
29309 - lp->a.write_csr(ioaddr, 124, val);
29310 + lp->a->write_csr(ioaddr, 124, val);
29311
29312 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
29313 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
29314 @@ -2093,24 +2093,24 @@ static int pcnet32_open(struct net_devic
29315 * duplex, and/or enable auto negotiation, and clear DANAS
29316 */
29317 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
29318 - lp->a.write_bcr(ioaddr, 32,
29319 - lp->a.read_bcr(ioaddr, 32) | 0x0080);
29320 + lp->a->write_bcr(ioaddr, 32,
29321 + lp->a->read_bcr(ioaddr, 32) | 0x0080);
29322 /* disable Auto Negotiation, set 10Mpbs, HD */
29323 - val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
29324 + val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
29325 if (lp->options & PCNET32_PORT_FD)
29326 val |= 0x10;
29327 if (lp->options & PCNET32_PORT_100)
29328 val |= 0x08;
29329 - lp->a.write_bcr(ioaddr, 32, val);
29330 + lp->a->write_bcr(ioaddr, 32, val);
29331 } else {
29332 if (lp->options & PCNET32_PORT_ASEL) {
29333 - lp->a.write_bcr(ioaddr, 32,
29334 - lp->a.read_bcr(ioaddr,
29335 + lp->a->write_bcr(ioaddr, 32,
29336 + lp->a->read_bcr(ioaddr,
29337 32) | 0x0080);
29338 /* enable auto negotiate, setup, disable fd */
29339 - val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
29340 + val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
29341 val |= 0x20;
29342 - lp->a.write_bcr(ioaddr, 32, val);
29343 + lp->a->write_bcr(ioaddr, 32, val);
29344 }
29345 }
29346 } else {
29347 @@ -2123,10 +2123,10 @@ static int pcnet32_open(struct net_devic
29348 * There is really no good other way to handle multiple PHYs
29349 * other than turning off all automatics
29350 */
29351 - val = lp->a.read_bcr(ioaddr, 2);
29352 - lp->a.write_bcr(ioaddr, 2, val & ~2);
29353 - val = lp->a.read_bcr(ioaddr, 32);
29354 - lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29355 + val = lp->a->read_bcr(ioaddr, 2);
29356 + lp->a->write_bcr(ioaddr, 2, val & ~2);
29357 + val = lp->a->read_bcr(ioaddr, 32);
29358 + lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
29359
29360 if (!(lp->options & PCNET32_PORT_ASEL)) {
29361 /* setup ecmd */
29362 @@ -2136,7 +2136,7 @@ static int pcnet32_open(struct net_devic
29363 ecmd.speed =
29364 lp->
29365 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
29366 - bcr9 = lp->a.read_bcr(ioaddr, 9);
29367 + bcr9 = lp->a->read_bcr(ioaddr, 9);
29368
29369 if (lp->options & PCNET32_PORT_FD) {
29370 ecmd.duplex = DUPLEX_FULL;
29371 @@ -2145,7 +2145,7 @@ static int pcnet32_open(struct net_devic
29372 ecmd.duplex = DUPLEX_HALF;
29373 bcr9 |= ~(1 << 0);
29374 }
29375 - lp->a.write_bcr(ioaddr, 9, bcr9);
29376 + lp->a->write_bcr(ioaddr, 9, bcr9);
29377 }
29378
29379 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
29380 @@ -2176,9 +2176,9 @@ static int pcnet32_open(struct net_devic
29381
29382 #ifdef DO_DXSUFLO
29383 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
29384 - val = lp->a.read_csr(ioaddr, CSR3);
29385 + val = lp->a->read_csr(ioaddr, CSR3);
29386 val |= 0x40;
29387 - lp->a.write_csr(ioaddr, CSR3, val);
29388 + lp->a->write_csr(ioaddr, CSR3, val);
29389 }
29390 #endif
29391
29392 @@ -2194,11 +2194,11 @@ static int pcnet32_open(struct net_devic
29393 napi_enable(&lp->napi);
29394
29395 /* Re-initialize the PCNET32, and start it when done. */
29396 - lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29397 - lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29398 + lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
29399 + lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
29400
29401 - lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29402 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29403 + lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
29404 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29405
29406 netif_start_queue(dev);
29407
29408 @@ -2210,19 +2210,19 @@ static int pcnet32_open(struct net_devic
29409
29410 i = 0;
29411 while (i++ < 100)
29412 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29413 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29414 break;
29415 /*
29416 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
29417 * reports that doing so triggers a bug in the '974.
29418 */
29419 - lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
29420 + lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
29421
29422 netif_printk(lp, ifup, KERN_DEBUG, dev,
29423 "pcnet32 open after %d ticks, init block %#x csr0 %4.4x\n",
29424 i,
29425 (u32) (lp->init_dma_addr),
29426 - lp->a.read_csr(ioaddr, CSR0));
29427 + lp->a->read_csr(ioaddr, CSR0));
29428
29429 spin_unlock_irqrestore(&lp->lock, flags);
29430
29431 @@ -2236,7 +2236,7 @@ err_free_ring:
29432 * Switch back to 16bit mode to avoid problems with dumb
29433 * DOS packet driver after a warm reboot
29434 */
29435 - lp->a.write_bcr(ioaddr, 20, 4);
29436 + lp->a->write_bcr(ioaddr, 20, 4);
29437
29438 err_free_irq:
29439 spin_unlock_irqrestore(&lp->lock, flags);
29440 @@ -2341,7 +2341,7 @@ static void pcnet32_restart(struct net_d
29441
29442 /* wait for stop */
29443 for (i = 0; i < 100; i++)
29444 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
29445 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
29446 break;
29447
29448 if (i >= 100)
29449 @@ -2353,13 +2353,13 @@ static void pcnet32_restart(struct net_d
29450 return;
29451
29452 /* ReInit Ring */
29453 - lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
29454 + lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
29455 i = 0;
29456 while (i++ < 1000)
29457 - if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
29458 + if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
29459 break;
29460
29461 - lp->a.write_csr(ioaddr, CSR0, csr0_bits);
29462 + lp->a->write_csr(ioaddr, CSR0, csr0_bits);
29463 }
29464
29465 static void pcnet32_tx_timeout(struct net_device *dev)
29466 @@ -2371,8 +2371,8 @@ static void pcnet32_tx_timeout(struct ne
29467 /* Transmitter timeout, serious problems. */
29468 if (pcnet32_debug & NETIF_MSG_DRV)
29469 pr_err("%s: transmit timed out, status %4.4x, resetting\n",
29470 - dev->name, lp->a.read_csr(ioaddr, CSR0));
29471 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29472 + dev->name, lp->a->read_csr(ioaddr, CSR0));
29473 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29474 dev->stats.tx_errors++;
29475 if (netif_msg_tx_err(lp)) {
29476 int i;
29477 @@ -2415,7 +2415,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29478
29479 netif_printk(lp, tx_queued, KERN_DEBUG, dev,
29480 "%s() called, csr0 %4.4x\n",
29481 - __func__, lp->a.read_csr(ioaddr, CSR0));
29482 + __func__, lp->a->read_csr(ioaddr, CSR0));
29483
29484 /* Default status -- will not enable Successful-TxDone
29485 * interrupt when that option is available to us.
29486 @@ -2445,7 +2445,7 @@ static netdev_tx_t pcnet32_start_xmit(st
29487 dev->stats.tx_bytes += skb->len;
29488
29489 /* Trigger an immediate send poll. */
29490 - lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29491 + lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
29492
29493 if (lp->tx_ring[(entry + 1) & lp->tx_mod_mask].base != 0) {
29494 lp->tx_full = 1;
29495 @@ -2470,16 +2470,16 @@ pcnet32_interrupt(int irq, void *dev_id)
29496
29497 spin_lock(&lp->lock);
29498
29499 - csr0 = lp->a.read_csr(ioaddr, CSR0);
29500 + csr0 = lp->a->read_csr(ioaddr, CSR0);
29501 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
29502 if (csr0 == 0xffff)
29503 break; /* PCMCIA remove happened */
29504 /* Acknowledge all of the current interrupt sources ASAP. */
29505 - lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29506 + lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
29507
29508 netif_printk(lp, intr, KERN_DEBUG, dev,
29509 "interrupt csr0=%#2.2x new csr=%#2.2x\n",
29510 - csr0, lp->a.read_csr(ioaddr, CSR0));
29511 + csr0, lp->a->read_csr(ioaddr, CSR0));
29512
29513 /* Log misc errors. */
29514 if (csr0 & 0x4000)
29515 @@ -2506,19 +2506,19 @@ pcnet32_interrupt(int irq, void *dev_id)
29516 if (napi_schedule_prep(&lp->napi)) {
29517 u16 val;
29518 /* set interrupt masks */
29519 - val = lp->a.read_csr(ioaddr, CSR3);
29520 + val = lp->a->read_csr(ioaddr, CSR3);
29521 val |= 0x5f00;
29522 - lp->a.write_csr(ioaddr, CSR3, val);
29523 + lp->a->write_csr(ioaddr, CSR3, val);
29524
29525 __napi_schedule(&lp->napi);
29526 break;
29527 }
29528 - csr0 = lp->a.read_csr(ioaddr, CSR0);
29529 + csr0 = lp->a->read_csr(ioaddr, CSR0);
29530 }
29531
29532 netif_printk(lp, intr, KERN_DEBUG, dev,
29533 "exiting interrupt, csr0=%#4.4x\n",
29534 - lp->a.read_csr(ioaddr, CSR0));
29535 + lp->a->read_csr(ioaddr, CSR0));
29536
29537 spin_unlock(&lp->lock);
29538
29539 @@ -2538,20 +2538,20 @@ static int pcnet32_close(struct net_devi
29540
29541 spin_lock_irqsave(&lp->lock, flags);
29542
29543 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29544 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29545
29546 netif_printk(lp, ifdown, KERN_DEBUG, dev,
29547 "Shutting down ethercard, status was %2.2x\n",
29548 - lp->a.read_csr(ioaddr, CSR0));
29549 + lp->a->read_csr(ioaddr, CSR0));
29550
29551 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
29552 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29553 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29554
29555 /*
29556 * Switch back to 16bit mode to avoid problems with dumb
29557 * DOS packet driver after a warm reboot
29558 */
29559 - lp->a.write_bcr(ioaddr, 20, 4);
29560 + lp->a->write_bcr(ioaddr, 20, 4);
29561
29562 spin_unlock_irqrestore(&lp->lock, flags);
29563
29564 @@ -2574,7 +2574,7 @@ static struct net_device_stats *pcnet32_
29565 unsigned long flags;
29566
29567 spin_lock_irqsave(&lp->lock, flags);
29568 - dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
29569 + dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
29570 spin_unlock_irqrestore(&lp->lock, flags);
29571
29572 return &dev->stats;
29573 @@ -2596,10 +2596,10 @@ static void pcnet32_load_multicast(struc
29574 if (dev->flags & IFF_ALLMULTI) {
29575 ib->filter[0] = cpu_to_le32(~0U);
29576 ib->filter[1] = cpu_to_le32(~0U);
29577 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29578 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29579 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29580 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29581 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
29582 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
29583 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
29584 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
29585 return;
29586 }
29587 /* clear the multicast filter */
29588 @@ -2619,7 +2619,7 @@ static void pcnet32_load_multicast(struc
29589 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
29590 }
29591 for (i = 0; i < 4; i++)
29592 - lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
29593 + lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
29594 le16_to_cpu(mcast_table[i]));
29595 }
29596
29597 @@ -2634,28 +2634,28 @@ static void pcnet32_set_multicast_list(s
29598
29599 spin_lock_irqsave(&lp->lock, flags);
29600 suspended = pcnet32_suspend(dev, &flags, 0);
29601 - csr15 = lp->a.read_csr(ioaddr, CSR15);
29602 + csr15 = lp->a->read_csr(ioaddr, CSR15);
29603 if (dev->flags & IFF_PROMISC) {
29604 /* Log any net taps. */
29605 netif_info(lp, hw, dev, "Promiscuous mode enabled\n");
29606 lp->init_block->mode =
29607 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
29608 7);
29609 - lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
29610 + lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
29611 } else {
29612 lp->init_block->mode =
29613 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
29614 - lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29615 + lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
29616 pcnet32_load_multicast(dev);
29617 }
29618
29619 if (suspended) {
29620 int csr5;
29621 /* clear SUSPEND (SPND) - CSR5 bit 0 */
29622 - csr5 = lp->a.read_csr(ioaddr, CSR5);
29623 - lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29624 + csr5 = lp->a->read_csr(ioaddr, CSR5);
29625 + lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
29626 } else {
29627 - lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
29628 + lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
29629 pcnet32_restart(dev, CSR0_NORMAL);
29630 netif_wake_queue(dev);
29631 }
29632 @@ -2673,8 +2673,8 @@ static int mdio_read(struct net_device *
29633 if (!lp->mii)
29634 return 0;
29635
29636 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29637 - val_out = lp->a.read_bcr(ioaddr, 34);
29638 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29639 + val_out = lp->a->read_bcr(ioaddr, 34);
29640
29641 return val_out;
29642 }
29643 @@ -2688,8 +2688,8 @@ static void mdio_write(struct net_device
29644 if (!lp->mii)
29645 return;
29646
29647 - lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29648 - lp->a.write_bcr(ioaddr, 34, val);
29649 + lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
29650 + lp->a->write_bcr(ioaddr, 34, val);
29651 }
29652
29653 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
29654 @@ -2766,7 +2766,7 @@ static void pcnet32_check_media(struct n
29655 curr_link = mii_link_ok(&lp->mii_if);
29656 } else {
29657 ulong ioaddr = dev->base_addr; /* card base I/O address */
29658 - curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
29659 + curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
29660 }
29661 if (!curr_link) {
29662 if (prev_link || verbose) {
29663 @@ -2789,13 +2789,13 @@ static void pcnet32_check_media(struct n
29664 (ecmd.duplex == DUPLEX_FULL)
29665 ? "full" : "half");
29666 }
29667 - bcr9 = lp->a.read_bcr(dev->base_addr, 9);
29668 + bcr9 = lp->a->read_bcr(dev->base_addr, 9);
29669 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
29670 if (lp->mii_if.full_duplex)
29671 bcr9 |= (1 << 0);
29672 else
29673 bcr9 &= ~(1 << 0);
29674 - lp->a.write_bcr(dev->base_addr, 9, bcr9);
29675 + lp->a->write_bcr(dev->base_addr, 9, bcr9);
29676 }
29677 } else {
29678 netif_info(lp, link, dev, "link up\n");
29679 diff -urNp linux-2.6.39.4/drivers/net/ppp_generic.c linux-2.6.39.4/drivers/net/ppp_generic.c
29680 --- linux-2.6.39.4/drivers/net/ppp_generic.c 2011-05-19 00:06:34.000000000 -0400
29681 +++ linux-2.6.39.4/drivers/net/ppp_generic.c 2011-08-05 19:44:37.000000000 -0400
29682 @@ -987,7 +987,6 @@ ppp_net_ioctl(struct net_device *dev, st
29683 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
29684 struct ppp_stats stats;
29685 struct ppp_comp_stats cstats;
29686 - char *vers;
29687
29688 switch (cmd) {
29689 case SIOCGPPPSTATS:
29690 @@ -1009,8 +1008,7 @@ ppp_net_ioctl(struct net_device *dev, st
29691 break;
29692
29693 case SIOCGPPPVER:
29694 - vers = PPP_VERSION;
29695 - if (copy_to_user(addr, vers, strlen(vers) + 1))
29696 + if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
29697 break;
29698 err = 0;
29699 break;
29700 diff -urNp linux-2.6.39.4/drivers/net/r8169.c linux-2.6.39.4/drivers/net/r8169.c
29701 --- linux-2.6.39.4/drivers/net/r8169.c 2011-05-19 00:06:34.000000000 -0400
29702 +++ linux-2.6.39.4/drivers/net/r8169.c 2011-08-05 20:34:06.000000000 -0400
29703 @@ -552,12 +552,12 @@ struct rtl8169_private {
29704 struct mdio_ops {
29705 void (*write)(void __iomem *, int, int);
29706 int (*read)(void __iomem *, int);
29707 - } mdio_ops;
29708 + } __no_const mdio_ops;
29709
29710 struct pll_power_ops {
29711 void (*down)(struct rtl8169_private *);
29712 void (*up)(struct rtl8169_private *);
29713 - } pll_power_ops;
29714 + } __no_const pll_power_ops;
29715
29716 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
29717 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
29718 diff -urNp linux-2.6.39.4/drivers/net/tg3.h linux-2.6.39.4/drivers/net/tg3.h
29719 --- linux-2.6.39.4/drivers/net/tg3.h 2011-05-19 00:06:34.000000000 -0400
29720 +++ linux-2.6.39.4/drivers/net/tg3.h 2011-08-05 19:44:37.000000000 -0400
29721 @@ -131,6 +131,7 @@
29722 #define CHIPREV_ID_5750_A0 0x4000
29723 #define CHIPREV_ID_5750_A1 0x4001
29724 #define CHIPREV_ID_5750_A3 0x4003
29725 +#define CHIPREV_ID_5750_C1 0x4201
29726 #define CHIPREV_ID_5750_C2 0x4202
29727 #define CHIPREV_ID_5752_A0_HW 0x5000
29728 #define CHIPREV_ID_5752_A0 0x6000
29729 diff -urNp linux-2.6.39.4/drivers/net/tokenring/abyss.c linux-2.6.39.4/drivers/net/tokenring/abyss.c
29730 --- linux-2.6.39.4/drivers/net/tokenring/abyss.c 2011-05-19 00:06:34.000000000 -0400
29731 +++ linux-2.6.39.4/drivers/net/tokenring/abyss.c 2011-08-05 20:34:06.000000000 -0400
29732 @@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
29733
29734 static int __init abyss_init (void)
29735 {
29736 - abyss_netdev_ops = tms380tr_netdev_ops;
29737 + pax_open_kernel();
29738 + memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29739
29740 - abyss_netdev_ops.ndo_open = abyss_open;
29741 - abyss_netdev_ops.ndo_stop = abyss_close;
29742 + *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
29743 + *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
29744 + pax_close_kernel();
29745
29746 return pci_register_driver(&abyss_driver);
29747 }
29748 diff -urNp linux-2.6.39.4/drivers/net/tokenring/madgemc.c linux-2.6.39.4/drivers/net/tokenring/madgemc.c
29749 --- linux-2.6.39.4/drivers/net/tokenring/madgemc.c 2011-05-19 00:06:34.000000000 -0400
29750 +++ linux-2.6.39.4/drivers/net/tokenring/madgemc.c 2011-08-05 20:34:06.000000000 -0400
29751 @@ -744,9 +744,11 @@ static struct mca_driver madgemc_driver
29752
29753 static int __init madgemc_init (void)
29754 {
29755 - madgemc_netdev_ops = tms380tr_netdev_ops;
29756 - madgemc_netdev_ops.ndo_open = madgemc_open;
29757 - madgemc_netdev_ops.ndo_stop = madgemc_close;
29758 + pax_open_kernel();
29759 + memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29760 + *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
29761 + *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
29762 + pax_close_kernel();
29763
29764 return mca_register_driver (&madgemc_driver);
29765 }
29766 diff -urNp linux-2.6.39.4/drivers/net/tokenring/proteon.c linux-2.6.39.4/drivers/net/tokenring/proteon.c
29767 --- linux-2.6.39.4/drivers/net/tokenring/proteon.c 2011-05-19 00:06:34.000000000 -0400
29768 +++ linux-2.6.39.4/drivers/net/tokenring/proteon.c 2011-08-05 20:34:06.000000000 -0400
29769 @@ -353,9 +353,11 @@ static int __init proteon_init(void)
29770 struct platform_device *pdev;
29771 int i, num = 0, err = 0;
29772
29773 - proteon_netdev_ops = tms380tr_netdev_ops;
29774 - proteon_netdev_ops.ndo_open = proteon_open;
29775 - proteon_netdev_ops.ndo_stop = tms380tr_close;
29776 + pax_open_kernel();
29777 + memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29778 + *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
29779 + *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
29780 + pax_close_kernel();
29781
29782 err = platform_driver_register(&proteon_driver);
29783 if (err)
29784 diff -urNp linux-2.6.39.4/drivers/net/tokenring/skisa.c linux-2.6.39.4/drivers/net/tokenring/skisa.c
29785 --- linux-2.6.39.4/drivers/net/tokenring/skisa.c 2011-05-19 00:06:34.000000000 -0400
29786 +++ linux-2.6.39.4/drivers/net/tokenring/skisa.c 2011-08-05 20:34:06.000000000 -0400
29787 @@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
29788 struct platform_device *pdev;
29789 int i, num = 0, err = 0;
29790
29791 - sk_isa_netdev_ops = tms380tr_netdev_ops;
29792 - sk_isa_netdev_ops.ndo_open = sk_isa_open;
29793 - sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29794 + pax_open_kernel();
29795 + memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
29796 + *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
29797 + *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
29798 + pax_close_kernel();
29799
29800 err = platform_driver_register(&sk_isa_driver);
29801 if (err)
29802 diff -urNp linux-2.6.39.4/drivers/net/tulip/de2104x.c linux-2.6.39.4/drivers/net/tulip/de2104x.c
29803 --- linux-2.6.39.4/drivers/net/tulip/de2104x.c 2011-05-19 00:06:34.000000000 -0400
29804 +++ linux-2.6.39.4/drivers/net/tulip/de2104x.c 2011-08-05 19:44:37.000000000 -0400
29805 @@ -1817,6 +1817,8 @@ static void __devinit de21041_get_srom_i
29806 struct de_srom_info_leaf *il;
29807 void *bufp;
29808
29809 + pax_track_stack();
29810 +
29811 /* download entire eeprom */
29812 for (i = 0; i < DE_EEPROM_WORDS; i++)
29813 ((__le16 *)ee_data)[i] =
29814 diff -urNp linux-2.6.39.4/drivers/net/tulip/de4x5.c linux-2.6.39.4/drivers/net/tulip/de4x5.c
29815 --- linux-2.6.39.4/drivers/net/tulip/de4x5.c 2011-05-19 00:06:34.000000000 -0400
29816 +++ linux-2.6.39.4/drivers/net/tulip/de4x5.c 2011-08-05 19:44:37.000000000 -0400
29817 @@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru
29818 for (i=0; i<ETH_ALEN; i++) {
29819 tmp.addr[i] = dev->dev_addr[i];
29820 }
29821 - if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29822 + if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
29823 break;
29824
29825 case DE4X5_SET_HWADDR: /* Set the hardware address */
29826 @@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru
29827 spin_lock_irqsave(&lp->lock, flags);
29828 memcpy(&statbuf, &lp->pktStats, ioc->len);
29829 spin_unlock_irqrestore(&lp->lock, flags);
29830 - if (copy_to_user(ioc->data, &statbuf, ioc->len))
29831 + if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
29832 return -EFAULT;
29833 break;
29834 }
29835 diff -urNp linux-2.6.39.4/drivers/net/usb/hso.c linux-2.6.39.4/drivers/net/usb/hso.c
29836 --- linux-2.6.39.4/drivers/net/usb/hso.c 2011-05-19 00:06:34.000000000 -0400
29837 +++ linux-2.6.39.4/drivers/net/usb/hso.c 2011-08-05 19:44:37.000000000 -0400
29838 @@ -71,7 +71,7 @@
29839 #include <asm/byteorder.h>
29840 #include <linux/serial_core.h>
29841 #include <linux/serial.h>
29842 -
29843 +#include <asm/local.h>
29844
29845 #define MOD_AUTHOR "Option Wireless"
29846 #define MOD_DESCRIPTION "USB High Speed Option driver"
29847 @@ -257,7 +257,7 @@ struct hso_serial {
29848
29849 /* from usb_serial_port */
29850 struct tty_struct *tty;
29851 - int open_count;
29852 + local_t open_count;
29853 spinlock_t serial_lock;
29854
29855 int (*write_data) (struct hso_serial *serial);
29856 @@ -1190,7 +1190,7 @@ static void put_rxbuf_data_and_resubmit_
29857 struct urb *urb;
29858
29859 urb = serial->rx_urb[0];
29860 - if (serial->open_count > 0) {
29861 + if (local_read(&serial->open_count) > 0) {
29862 count = put_rxbuf_data(urb, serial);
29863 if (count == -1)
29864 return;
29865 @@ -1226,7 +1226,7 @@ static void hso_std_serial_read_bulk_cal
29866 DUMP1(urb->transfer_buffer, urb->actual_length);
29867
29868 /* Anyone listening? */
29869 - if (serial->open_count == 0)
29870 + if (local_read(&serial->open_count) == 0)
29871 return;
29872
29873 if (status == 0) {
29874 @@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
29875 spin_unlock_irq(&serial->serial_lock);
29876
29877 /* check for port already opened, if not set the termios */
29878 - serial->open_count++;
29879 - if (serial->open_count == 1) {
29880 + if (local_inc_return(&serial->open_count) == 1) {
29881 serial->rx_state = RX_IDLE;
29882 /* Force default termio settings */
29883 _hso_serial_set_termios(tty, NULL);
29884 @@ -1324,7 +1323,7 @@ static int hso_serial_open(struct tty_st
29885 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
29886 if (result) {
29887 hso_stop_serial_device(serial->parent);
29888 - serial->open_count--;
29889 + local_dec(&serial->open_count);
29890 kref_put(&serial->parent->ref, hso_serial_ref_free);
29891 }
29892 } else {
29893 @@ -1361,10 +1360,10 @@ static void hso_serial_close(struct tty_
29894
29895 /* reset the rts and dtr */
29896 /* do the actual close */
29897 - serial->open_count--;
29898 + local_dec(&serial->open_count);
29899
29900 - if (serial->open_count <= 0) {
29901 - serial->open_count = 0;
29902 + if (local_read(&serial->open_count) <= 0) {
29903 + local_set(&serial->open_count, 0);
29904 spin_lock_irq(&serial->serial_lock);
29905 if (serial->tty == tty) {
29906 serial->tty->driver_data = NULL;
29907 @@ -1446,7 +1445,7 @@ static void hso_serial_set_termios(struc
29908
29909 /* the actual setup */
29910 spin_lock_irqsave(&serial->serial_lock, flags);
29911 - if (serial->open_count)
29912 + if (local_read(&serial->open_count))
29913 _hso_serial_set_termios(tty, old);
29914 else
29915 tty->termios = old;
29916 @@ -1905,7 +1904,7 @@ static void intr_callback(struct urb *ur
29917 D1("Pending read interrupt on port %d\n", i);
29918 spin_lock(&serial->serial_lock);
29919 if (serial->rx_state == RX_IDLE &&
29920 - serial->open_count > 0) {
29921 + local_read(&serial->open_count) > 0) {
29922 /* Setup and send a ctrl req read on
29923 * port i */
29924 if (!serial->rx_urb_filled[0]) {
29925 @@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
29926 /* Start all serial ports */
29927 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
29928 if (serial_table[i] && (serial_table[i]->interface == iface)) {
29929 - if (dev2ser(serial_table[i])->open_count) {
29930 + if (local_read(&dev2ser(serial_table[i])->open_count)) {
29931 result =
29932 hso_start_serial_device(serial_table[i], GFP_NOIO);
29933 hso_kick_transmit(dev2ser(serial_table[i]));
29934 diff -urNp linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c
29935 --- linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-05-19 00:06:34.000000000 -0400
29936 +++ linux-2.6.39.4/drivers/net/vmxnet3/vmxnet3_ethtool.c 2011-08-05 19:44:37.000000000 -0400
29937 @@ -631,8 +631,7 @@ vmxnet3_set_rss_indir(struct net_device
29938 * Return with error code if any of the queue indices
29939 * is out of range
29940 */
29941 - if (p->ring_index[i] < 0 ||
29942 - p->ring_index[i] >= adapter->num_rx_queues)
29943 + if (p->ring_index[i] >= adapter->num_rx_queues)
29944 return -EINVAL;
29945 }
29946
29947 diff -urNp linux-2.6.39.4/drivers/net/vxge/vxge-config.h linux-2.6.39.4/drivers/net/vxge/vxge-config.h
29948 --- linux-2.6.39.4/drivers/net/vxge/vxge-config.h 2011-05-19 00:06:34.000000000 -0400
29949 +++ linux-2.6.39.4/drivers/net/vxge/vxge-config.h 2011-08-05 20:34:06.000000000 -0400
29950 @@ -508,7 +508,7 @@ struct vxge_hw_uld_cbs {
29951 void (*link_down)(struct __vxge_hw_device *devh);
29952 void (*crit_err)(struct __vxge_hw_device *devh,
29953 enum vxge_hw_event type, u64 ext_data);
29954 -};
29955 +} __no_const;
29956
29957 /*
29958 * struct __vxge_hw_blockpool_entry - Block private data structure
29959 diff -urNp linux-2.6.39.4/drivers/net/vxge/vxge-main.c linux-2.6.39.4/drivers/net/vxge/vxge-main.c
29960 --- linux-2.6.39.4/drivers/net/vxge/vxge-main.c 2011-05-19 00:06:34.000000000 -0400
29961 +++ linux-2.6.39.4/drivers/net/vxge/vxge-main.c 2011-08-05 19:44:37.000000000 -0400
29962 @@ -97,6 +97,8 @@ static inline void VXGE_COMPLETE_VPATH_T
29963 struct sk_buff *completed[NR_SKB_COMPLETED];
29964 int more;
29965
29966 + pax_track_stack();
29967 +
29968 do {
29969 more = 0;
29970 skb_ptr = completed;
29971 @@ -1927,6 +1929,8 @@ static enum vxge_hw_status vxge_rth_conf
29972 u8 mtable[256] = {0}; /* CPU to vpath mapping */
29973 int index;
29974
29975 + pax_track_stack();
29976 +
29977 /*
29978 * Filling
29979 * - itable with bucket numbers
29980 diff -urNp linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h
29981 --- linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h 2011-05-19 00:06:34.000000000 -0400
29982 +++ linux-2.6.39.4/drivers/net/vxge/vxge-traffic.h 2011-08-05 20:34:06.000000000 -0400
29983 @@ -2088,7 +2088,7 @@ struct vxge_hw_mempool_cbs {
29984 struct vxge_hw_mempool_dma *dma_object,
29985 u32 index,
29986 u32 is_last);
29987 -};
29988 +} __no_const;
29989
29990 #define VXGE_HW_VIRTUAL_PATH_HANDLE(vpath) \
29991 ((struct __vxge_hw_vpath_handle *)(vpath)->vpath_handles.next)
29992 diff -urNp linux-2.6.39.4/drivers/net/wan/cycx_x25.c linux-2.6.39.4/drivers/net/wan/cycx_x25.c
29993 --- linux-2.6.39.4/drivers/net/wan/cycx_x25.c 2011-05-19 00:06:34.000000000 -0400
29994 +++ linux-2.6.39.4/drivers/net/wan/cycx_x25.c 2011-08-05 19:44:37.000000000 -0400
29995 @@ -1018,6 +1018,8 @@ static void hex_dump(char *msg, unsigned
29996 unsigned char hex[1024],
29997 * phex = hex;
29998
29999 + pax_track_stack();
30000 +
30001 if (len >= (sizeof(hex) / 2))
30002 len = (sizeof(hex) / 2) - 1;
30003
30004 diff -urNp linux-2.6.39.4/drivers/net/wan/hdlc_x25.c linux-2.6.39.4/drivers/net/wan/hdlc_x25.c
30005 --- linux-2.6.39.4/drivers/net/wan/hdlc_x25.c 2011-05-19 00:06:34.000000000 -0400
30006 +++ linux-2.6.39.4/drivers/net/wan/hdlc_x25.c 2011-08-05 20:34:06.000000000 -0400
30007 @@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
30008
30009 static int x25_open(struct net_device *dev)
30010 {
30011 - struct lapb_register_struct cb;
30012 + static struct lapb_register_struct cb = {
30013 + .connect_confirmation = x25_connected,
30014 + .connect_indication = x25_connected,
30015 + .disconnect_confirmation = x25_disconnected,
30016 + .disconnect_indication = x25_disconnected,
30017 + .data_indication = x25_data_indication,
30018 + .data_transmit = x25_data_transmit
30019 + };
30020 int result;
30021
30022 - cb.connect_confirmation = x25_connected;
30023 - cb.connect_indication = x25_connected;
30024 - cb.disconnect_confirmation = x25_disconnected;
30025 - cb.disconnect_indication = x25_disconnected;
30026 - cb.data_indication = x25_data_indication;
30027 - cb.data_transmit = x25_data_transmit;
30028 -
30029 result = lapb_register(dev, &cb);
30030 if (result != LAPB_OK)
30031 return result;
30032 diff -urNp linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c
30033 --- linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c 2011-05-19 00:06:34.000000000 -0400
30034 +++ linux-2.6.39.4/drivers/net/wimax/i2400m/usb-fw.c 2011-08-05 19:44:37.000000000 -0400
30035 @@ -287,6 +287,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
30036 int do_autopm = 1;
30037 DECLARE_COMPLETION_ONSTACK(notif_completion);
30038
30039 + pax_track_stack();
30040 +
30041 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
30042 i2400m, ack, ack_size);
30043 BUG_ON(_ack == i2400m->bm_ack_buf);
30044 diff -urNp linux-2.6.39.4/drivers/net/wireless/airo.c linux-2.6.39.4/drivers/net/wireless/airo.c
30045 --- linux-2.6.39.4/drivers/net/wireless/airo.c 2011-05-19 00:06:34.000000000 -0400
30046 +++ linux-2.6.39.4/drivers/net/wireless/airo.c 2011-08-05 19:44:37.000000000 -0400
30047 @@ -3001,6 +3001,8 @@ static void airo_process_scan_results (s
30048 BSSListElement * loop_net;
30049 BSSListElement * tmp_net;
30050
30051 + pax_track_stack();
30052 +
30053 /* Blow away current list of scan results */
30054 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
30055 list_move_tail (&loop_net->list, &ai->network_free_list);
30056 @@ -3792,6 +3794,8 @@ static u16 setup_card(struct airo_info *
30057 WepKeyRid wkr;
30058 int rc;
30059
30060 + pax_track_stack();
30061 +
30062 memset( &mySsid, 0, sizeof( mySsid ) );
30063 kfree (ai->flash);
30064 ai->flash = NULL;
30065 @@ -4760,6 +4764,8 @@ static int proc_stats_rid_open( struct i
30066 __le32 *vals = stats.vals;
30067 int len;
30068
30069 + pax_track_stack();
30070 +
30071 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
30072 return -ENOMEM;
30073 data = file->private_data;
30074 @@ -5483,6 +5489,8 @@ static int proc_BSSList_open( struct ino
30075 /* If doLoseSync is not 1, we won't do a Lose Sync */
30076 int doLoseSync = -1;
30077
30078 + pax_track_stack();
30079 +
30080 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
30081 return -ENOMEM;
30082 data = file->private_data;
30083 @@ -7190,6 +7198,8 @@ static int airo_get_aplist(struct net_de
30084 int i;
30085 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
30086
30087 + pax_track_stack();
30088 +
30089 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
30090 if (!qual)
30091 return -ENOMEM;
30092 @@ -7750,6 +7760,8 @@ static void airo_read_wireless_stats(str
30093 CapabilityRid cap_rid;
30094 __le32 *vals = stats_rid.vals;
30095
30096 + pax_track_stack();
30097 +
30098 /* Get stats out of the card */
30099 clear_bit(JOB_WSTATS, &local->jobs);
30100 if (local->power.event) {
30101 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c
30102 --- linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c 2011-05-19 00:06:34.000000000 -0400
30103 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath5k/debug.c 2011-08-05 19:44:37.000000000 -0400
30104 @@ -204,6 +204,8 @@ static ssize_t read_file_beacon(struct f
30105 unsigned int v;
30106 u64 tsf;
30107
30108 + pax_track_stack();
30109 +
30110 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
30111 len += snprintf(buf+len, sizeof(buf)-len,
30112 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
30113 @@ -323,6 +325,8 @@ static ssize_t read_file_debug(struct fi
30114 unsigned int len = 0;
30115 unsigned int i;
30116
30117 + pax_track_stack();
30118 +
30119 len += snprintf(buf+len, sizeof(buf)-len,
30120 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
30121
30122 @@ -384,6 +388,8 @@ static ssize_t read_file_antenna(struct
30123 unsigned int i;
30124 unsigned int v;
30125
30126 + pax_track_stack();
30127 +
30128 len += snprintf(buf+len, sizeof(buf)-len, "antenna mode\t%d\n",
30129 sc->ah->ah_ant_mode);
30130 len += snprintf(buf+len, sizeof(buf)-len, "default antenna\t%d\n",
30131 @@ -494,6 +500,8 @@ static ssize_t read_file_misc(struct fil
30132 unsigned int len = 0;
30133 u32 filt = ath5k_hw_get_rx_filter(sc->ah);
30134
30135 + pax_track_stack();
30136 +
30137 len += snprintf(buf+len, sizeof(buf)-len, "bssid-mask: %pM\n",
30138 sc->bssidmask);
30139 len += snprintf(buf+len, sizeof(buf)-len, "filter-flags: 0x%x ",
30140 @@ -550,6 +558,8 @@ static ssize_t read_file_frameerrors(str
30141 unsigned int len = 0;
30142 int i;
30143
30144 + pax_track_stack();
30145 +
30146 len += snprintf(buf+len, sizeof(buf)-len,
30147 "RX\n---------------------\n");
30148 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
30149 @@ -667,6 +677,8 @@ static ssize_t read_file_ani(struct file
30150 char buf[700];
30151 unsigned int len = 0;
30152
30153 + pax_track_stack();
30154 +
30155 len += snprintf(buf+len, sizeof(buf)-len,
30156 "HW has PHY error counters:\t%s\n",
30157 sc->ah->ah_capabilities.cap_has_phyerr_counters ?
30158 @@ -827,6 +839,8 @@ static ssize_t read_file_queue(struct fi
30159 struct ath5k_buf *bf, *bf0;
30160 int i, n;
30161
30162 + pax_track_stack();
30163 +
30164 len += snprintf(buf+len, sizeof(buf)-len,
30165 "available txbuffers: %d\n", sc->txbuf_len);
30166
30167 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c
30168 --- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-05-19 00:06:34.000000000 -0400
30169 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_calib.c 2011-08-05 19:44:37.000000000 -0400
30170 @@ -734,6 +734,8 @@ static void ar9003_hw_tx_iq_cal(struct a
30171 s32 i, j, ip, im, nmeasurement;
30172 u8 nchains = get_streams(common->tx_chainmask);
30173
30174 + pax_track_stack();
30175 +
30176 for (ip = 0; ip < MPASS; ip++) {
30177 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
30178 AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
30179 @@ -856,6 +858,8 @@ static void ar9003_hw_tx_iq_cal_post_pro
30180 int i, ip, im, j;
30181 int nmeasurement;
30182
30183 + pax_track_stack();
30184 +
30185 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
30186 if (ah->txchainmask & (1 << i))
30187 num_chains++;
30188 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
30189 --- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-05-19 00:06:34.000000000 -0400
30190 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/ar9003_paprd.c 2011-08-05 19:44:37.000000000 -0400
30191 @@ -356,6 +356,8 @@ static bool create_pa_curve(u32 *data_L,
30192 int theta_low_bin = 0;
30193 int i;
30194
30195 + pax_track_stack();
30196 +
30197 /* disregard any bin that contains <= 16 samples */
30198 thresh_accum_cnt = 16;
30199 scale_factor = 5;
30200 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c
30201 --- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c 2011-05-19 00:06:34.000000000 -0400
30202 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/debug.c 2011-08-05 19:44:37.000000000 -0400
30203 @@ -335,6 +335,8 @@ static ssize_t read_file_interrupt(struc
30204 char buf[512];
30205 unsigned int len = 0;
30206
30207 + pax_track_stack();
30208 +
30209 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
30210 len += snprintf(buf + len, sizeof(buf) - len,
30211 "%8s: %10u\n", "RXLP", sc->debug.stats.istats.rxlp);
30212 @@ -422,6 +424,8 @@ static ssize_t read_file_wiphy(struct fi
30213 u8 addr[ETH_ALEN];
30214 u32 tmp;
30215
30216 + pax_track_stack();
30217 +
30218 len += snprintf(buf + len, sizeof(buf) - len,
30219 "%s (chan=%d center-freq: %d MHz channel-type: %d (%s))\n",
30220 wiphy_name(sc->hw->wiphy),
30221 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c
30222 --- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c 2011-05-19 00:06:34.000000000 -0400
30223 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/htc_drv_main.c 2011-08-05 20:34:06.000000000 -0400
30224 @@ -737,6 +737,8 @@ static ssize_t read_file_tgt_stats(struc
30225 unsigned int len = 0;
30226 int ret = 0;
30227
30228 + pax_track_stack();
30229 +
30230 memset(&cmd_rsp, 0, sizeof(cmd_rsp));
30231
30232 WMI_CMD(WMI_TGT_STATS_CMDID);
30233 @@ -782,6 +784,8 @@ static ssize_t read_file_xmit(struct fil
30234 char buf[512];
30235 unsigned int len = 0;
30236
30237 + pax_track_stack();
30238 +
30239 len += snprintf(buf + len, sizeof(buf) - len,
30240 "%20s : %10u\n", "Buffers queued",
30241 priv->debug.tx_stats.buf_queued);
30242 @@ -831,6 +835,8 @@ static ssize_t read_file_recv(struct fil
30243 char buf[512];
30244 unsigned int len = 0;
30245
30246 + pax_track_stack();
30247 +
30248 len += snprintf(buf + len, sizeof(buf) - len,
30249 "%20s : %10u\n", "SKBs allocated",
30250 priv->debug.rx_stats.skb_allocated);
30251 diff -urNp linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h
30252 --- linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h 2011-05-19 00:06:34.000000000 -0400
30253 +++ linux-2.6.39.4/drivers/net/wireless/ath/ath9k/hw.h 2011-08-05 20:34:06.000000000 -0400
30254 @@ -592,7 +592,7 @@ struct ath_hw_private_ops {
30255
30256 /* ANI */
30257 void (*ani_cache_ini_regs)(struct ath_hw *ah);
30258 -};
30259 +} __no_const;
30260
30261 /**
30262 * struct ath_hw_ops - callbacks used by hardware code and driver code
30263 @@ -642,7 +642,7 @@ struct ath_hw_ops {
30264 u32 burstDuration);
30265 void (*set11n_virtualmorefrag)(struct ath_hw *ah, void *ds,
30266 u32 vmf);
30267 -};
30268 +} __no_const;
30269
30270 struct ath_nf_limits {
30271 s16 max;
30272 diff -urNp linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c
30273 --- linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-19 00:06:34.000000000 -0400
30274 +++ linux-2.6.39.4/drivers/net/wireless/ipw2x00/ipw2100.c 2011-08-05 19:44:37.000000000 -0400
30275 @@ -2100,6 +2100,8 @@ static int ipw2100_set_essid(struct ipw2
30276 int err;
30277 DECLARE_SSID_BUF(ssid);
30278
30279 + pax_track_stack();
30280 +
30281 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
30282
30283 if (ssid_len)
30284 @@ -5449,6 +5451,8 @@ static int ipw2100_set_key(struct ipw210
30285 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
30286 int err;
30287
30288 + pax_track_stack();
30289 +
30290 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
30291 idx, keylen, len);
30292
30293 diff -urNp linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c
30294 --- linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-19 00:06:34.000000000 -0400
30295 +++ linux-2.6.39.4/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-08-05 19:44:37.000000000 -0400
30296 @@ -1565,6 +1565,8 @@ static void libipw_process_probe_respons
30297 unsigned long flags;
30298 DECLARE_SSID_BUF(ssid);
30299
30300 + pax_track_stack();
30301 +
30302 LIBIPW_DEBUG_SCAN("'%s' (%pM"
30303 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
30304 print_ssid(ssid, info_element->data, info_element->len),
30305 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c
30306 --- linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-05-19 00:06:34.000000000 -0400
30307 +++ linux-2.6.39.4/drivers/net/wireless/iwlegacy/iwl3945-base.c 2011-08-05 20:34:06.000000000 -0400
30308 @@ -3958,7 +3958,9 @@ static int iwl3945_pci_probe(struct pci_
30309 */
30310 if (iwl3945_mod_params.disable_hw_scan) {
30311 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
30312 - iwl3945_hw_ops.hw_scan = NULL;
30313 + pax_open_kernel();
30314 + *(void **)&iwl3945_hw_ops.hw_scan = NULL;
30315 + pax_close_kernel();
30316 }
30317
30318 IWL_DEBUG_INFO(priv, "*** LOAD DRIVER ***\n");
30319 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c
30320 --- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-06-25 12:55:22.000000000 -0400
30321 +++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-08-05 20:34:06.000000000 -0400
30322 @@ -3974,7 +3974,9 @@ static int iwl_pci_probe(struct pci_dev
30323 if (cfg->mod_params->disable_hw_scan) {
30324 dev_printk(KERN_DEBUG, &(pdev->dev),
30325 "sw scan support is deprecated\n");
30326 - iwlagn_hw_ops.hw_scan = NULL;
30327 + pax_open_kernel();
30328 + *(void **)&iwlagn_hw_ops.hw_scan = NULL;
30329 + pax_close_kernel();
30330 }
30331
30332 hw = iwl_alloc_all(cfg);
30333 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
30334 --- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-19 00:06:34.000000000 -0400
30335 +++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-08-05 19:44:37.000000000 -0400
30336 @@ -883,6 +883,8 @@ static void rs_tx_status(void *priv_r, s
30337 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
30338 struct iwl_rxon_context *ctx = sta_priv->common.ctx;
30339
30340 + pax_track_stack();
30341 +
30342 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
30343
30344 /* Treat uninitialized rate scaling data same as non-existing. */
30345 @@ -2894,6 +2896,8 @@ static void rs_fill_link_cmd(struct iwl_
30346 container_of(lq_sta, struct iwl_station_priv, lq_sta);
30347 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
30348
30349 + pax_track_stack();
30350 +
30351 /* Override starting rate (index 0) if needed for debug purposes */
30352 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
30353
30354 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c
30355 --- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-19 00:06:34.000000000 -0400
30356 +++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-08-05 19:44:37.000000000 -0400
30357 @@ -549,6 +549,8 @@ static ssize_t iwl_dbgfs_status_read(str
30358 int pos = 0;
30359 const size_t bufsz = sizeof(buf);
30360
30361 + pax_track_stack();
30362 +
30363 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
30364 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
30365 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INT_ENABLED:\t %d\n",
30366 @@ -681,6 +683,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
30367 char buf[256 * NUM_IWL_RXON_CTX];
30368 const size_t bufsz = sizeof(buf);
30369
30370 + pax_track_stack();
30371 +
30372 for_each_context(priv, ctx) {
30373 pos += scnprintf(buf + pos, bufsz - pos, "context %d:\n",
30374 ctx->ctxid);
30375 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h
30376 --- linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-05-19 00:06:34.000000000 -0400
30377 +++ linux-2.6.39.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-08-05 19:44:37.000000000 -0400
30378 @@ -68,8 +68,8 @@ do {
30379 } while (0)
30380
30381 #else
30382 -#define IWL_DEBUG(__priv, level, fmt, args...)
30383 -#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
30384 +#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
30385 +#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
30386 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
30387 const void *p, u32 len)
30388 {}
30389 diff -urNp linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c
30390 --- linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-19 00:06:34.000000000 -0400
30391 +++ linux-2.6.39.4/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-08-05 19:44:37.000000000 -0400
30392 @@ -327,6 +327,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
30393 int buf_len = 512;
30394 size_t len = 0;
30395
30396 + pax_track_stack();
30397 +
30398 if (*ppos != 0)
30399 return 0;
30400 if (count < sizeof(buf))
30401 diff -urNp linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c
30402 --- linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c 2011-05-19 00:06:34.000000000 -0400
30403 +++ linux-2.6.39.4/drivers/net/wireless/mac80211_hwsim.c 2011-08-05 20:34:06.000000000 -0400
30404 @@ -1260,9 +1260,11 @@ static int __init init_mac80211_hwsim(vo
30405 return -EINVAL;
30406
30407 if (fake_hw_scan) {
30408 - mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30409 - mac80211_hwsim_ops.sw_scan_start = NULL;
30410 - mac80211_hwsim_ops.sw_scan_complete = NULL;
30411 + pax_open_kernel();
30412 + *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
30413 + *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
30414 + *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
30415 + pax_close_kernel();
30416 }
30417
30418 spin_lock_init(&hwsim_radio_lock);
30419 diff -urNp linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c
30420 --- linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c 2011-05-19 00:06:34.000000000 -0400
30421 +++ linux-2.6.39.4/drivers/net/wireless/rndis_wlan.c 2011-08-05 19:44:37.000000000 -0400
30422 @@ -1277,7 +1277,7 @@ static int set_rts_threshold(struct usbn
30423
30424 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
30425
30426 - if (rts_threshold < 0 || rts_threshold > 2347)
30427 + if (rts_threshold > 2347)
30428 rts_threshold = 2347;
30429
30430 tmp = cpu_to_le32(rts_threshold);
30431 diff -urNp linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c
30432 --- linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-05-19 00:06:34.000000000 -0400
30433 +++ linux-2.6.39.4/drivers/net/wireless/rtlwifi/rtl8192c/phy_common.c 2011-08-05 19:44:37.000000000 -0400
30434 @@ -827,6 +827,8 @@ static bool _rtl92c_phy_sw_chnl_step_by_
30435 u8 rfpath;
30436 u8 num_total_rfpath = rtlphy->num_total_rfpath;
30437
30438 + pax_track_stack();
30439 +
30440 precommoncmdcnt = 0;
30441 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
30442 MAX_PRECMD_CNT,
30443 diff -urNp linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h
30444 --- linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h 2011-05-19 00:06:34.000000000 -0400
30445 +++ linux-2.6.39.4/drivers/net/wireless/wl1251/wl1251.h 2011-08-05 20:34:06.000000000 -0400
30446 @@ -260,7 +260,7 @@ struct wl1251_if_operations {
30447 void (*reset)(struct wl1251 *wl);
30448 void (*enable_irq)(struct wl1251 *wl);
30449 void (*disable_irq)(struct wl1251 *wl);
30450 -};
30451 +} __no_const;
30452
30453 struct wl1251 {
30454 struct ieee80211_hw *hw;
30455 diff -urNp linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c
30456 --- linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c 2011-05-19 00:06:34.000000000 -0400
30457 +++ linux-2.6.39.4/drivers/net/wireless/wl12xx/spi.c 2011-08-05 19:44:37.000000000 -0400
30458 @@ -280,6 +280,8 @@ static void wl1271_spi_raw_write(struct
30459 u32 chunk_len;
30460 int i;
30461
30462 + pax_track_stack();
30463 +
30464 WARN_ON(len > WL1271_AGGR_BUFFER_SIZE);
30465
30466 spi_message_init(&m);
30467 diff -urNp linux-2.6.39.4/drivers/oprofile/buffer_sync.c linux-2.6.39.4/drivers/oprofile/buffer_sync.c
30468 --- linux-2.6.39.4/drivers/oprofile/buffer_sync.c 2011-06-25 12:55:22.000000000 -0400
30469 +++ linux-2.6.39.4/drivers/oprofile/buffer_sync.c 2011-08-05 19:44:37.000000000 -0400
30470 @@ -343,7 +343,7 @@ static void add_data(struct op_entry *en
30471 if (cookie == NO_COOKIE)
30472 offset = pc;
30473 if (cookie == INVALID_COOKIE) {
30474 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30475 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30476 offset = pc;
30477 }
30478 if (cookie != last_cookie) {
30479 @@ -387,14 +387,14 @@ add_sample(struct mm_struct *mm, struct
30480 /* add userspace sample */
30481
30482 if (!mm) {
30483 - atomic_inc(&oprofile_stats.sample_lost_no_mm);
30484 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
30485 return 0;
30486 }
30487
30488 cookie = lookup_dcookie(mm, s->eip, &offset);
30489
30490 if (cookie == INVALID_COOKIE) {
30491 - atomic_inc(&oprofile_stats.sample_lost_no_mapping);
30492 + atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
30493 return 0;
30494 }
30495
30496 @@ -563,7 +563,7 @@ void sync_buffer(int cpu)
30497 /* ignore backtraces if failed to add a sample */
30498 if (state == sb_bt_start) {
30499 state = sb_bt_ignore;
30500 - atomic_inc(&oprofile_stats.bt_lost_no_mapping);
30501 + atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
30502 }
30503 }
30504 release_mm(mm);
30505 diff -urNp linux-2.6.39.4/drivers/oprofile/event_buffer.c linux-2.6.39.4/drivers/oprofile/event_buffer.c
30506 --- linux-2.6.39.4/drivers/oprofile/event_buffer.c 2011-05-19 00:06:34.000000000 -0400
30507 +++ linux-2.6.39.4/drivers/oprofile/event_buffer.c 2011-08-05 19:44:37.000000000 -0400
30508 @@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
30509 }
30510
30511 if (buffer_pos == buffer_size) {
30512 - atomic_inc(&oprofile_stats.event_lost_overflow);
30513 + atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
30514 return;
30515 }
30516
30517 diff -urNp linux-2.6.39.4/drivers/oprofile/oprof.c linux-2.6.39.4/drivers/oprofile/oprof.c
30518 --- linux-2.6.39.4/drivers/oprofile/oprof.c 2011-05-19 00:06:34.000000000 -0400
30519 +++ linux-2.6.39.4/drivers/oprofile/oprof.c 2011-08-05 19:44:37.000000000 -0400
30520 @@ -110,7 +110,7 @@ static void switch_worker(struct work_st
30521 if (oprofile_ops.switch_events())
30522 return;
30523
30524 - atomic_inc(&oprofile_stats.multiplex_counter);
30525 + atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
30526 start_switch_worker();
30527 }
30528
30529 diff -urNp linux-2.6.39.4/drivers/oprofile/oprofilefs.c linux-2.6.39.4/drivers/oprofile/oprofilefs.c
30530 --- linux-2.6.39.4/drivers/oprofile/oprofilefs.c 2011-05-19 00:06:34.000000000 -0400
30531 +++ linux-2.6.39.4/drivers/oprofile/oprofilefs.c 2011-08-05 19:44:37.000000000 -0400
30532 @@ -186,7 +186,7 @@ static const struct file_operations atom
30533
30534
30535 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
30536 - char const *name, atomic_t *val)
30537 + char const *name, atomic_unchecked_t *val)
30538 {
30539 return __oprofilefs_create_file(sb, root, name,
30540 &atomic_ro_fops, 0444, val);
30541 diff -urNp linux-2.6.39.4/drivers/oprofile/oprofile_stats.c linux-2.6.39.4/drivers/oprofile/oprofile_stats.c
30542 --- linux-2.6.39.4/drivers/oprofile/oprofile_stats.c 2011-05-19 00:06:34.000000000 -0400
30543 +++ linux-2.6.39.4/drivers/oprofile/oprofile_stats.c 2011-08-05 19:44:37.000000000 -0400
30544 @@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
30545 cpu_buf->sample_invalid_eip = 0;
30546 }
30547
30548 - atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
30549 - atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
30550 - atomic_set(&oprofile_stats.event_lost_overflow, 0);
30551 - atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
30552 - atomic_set(&oprofile_stats.multiplex_counter, 0);
30553 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
30554 + atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
30555 + atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
30556 + atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
30557 + atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
30558 }
30559
30560
30561 diff -urNp linux-2.6.39.4/drivers/oprofile/oprofile_stats.h linux-2.6.39.4/drivers/oprofile/oprofile_stats.h
30562 --- linux-2.6.39.4/drivers/oprofile/oprofile_stats.h 2011-05-19 00:06:34.000000000 -0400
30563 +++ linux-2.6.39.4/drivers/oprofile/oprofile_stats.h 2011-08-05 19:44:37.000000000 -0400
30564 @@ -13,11 +13,11 @@
30565 #include <asm/atomic.h>
30566
30567 struct oprofile_stat_struct {
30568 - atomic_t sample_lost_no_mm;
30569 - atomic_t sample_lost_no_mapping;
30570 - atomic_t bt_lost_no_mapping;
30571 - atomic_t event_lost_overflow;
30572 - atomic_t multiplex_counter;
30573 + atomic_unchecked_t sample_lost_no_mm;
30574 + atomic_unchecked_t sample_lost_no_mapping;
30575 + atomic_unchecked_t bt_lost_no_mapping;
30576 + atomic_unchecked_t event_lost_overflow;
30577 + atomic_unchecked_t multiplex_counter;
30578 };
30579
30580 extern struct oprofile_stat_struct oprofile_stats;
30581 diff -urNp linux-2.6.39.4/drivers/parport/procfs.c linux-2.6.39.4/drivers/parport/procfs.c
30582 --- linux-2.6.39.4/drivers/parport/procfs.c 2011-05-19 00:06:34.000000000 -0400
30583 +++ linux-2.6.39.4/drivers/parport/procfs.c 2011-08-05 19:44:37.000000000 -0400
30584 @@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
30585
30586 *ppos += len;
30587
30588 - return copy_to_user(result, buffer, len) ? -EFAULT : 0;
30589 + return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
30590 }
30591
30592 #ifdef CONFIG_PARPORT_1284
30593 @@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
30594
30595 *ppos += len;
30596
30597 - return copy_to_user (result, buffer, len) ? -EFAULT : 0;
30598 + return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
30599 }
30600 #endif /* IEEE1284.3 support. */
30601
30602 diff -urNp linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h
30603 --- linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h 2011-05-19 00:06:34.000000000 -0400
30604 +++ linux-2.6.39.4/drivers/pci/hotplug/cpci_hotplug.h 2011-08-05 20:34:06.000000000 -0400
30605 @@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
30606 int (*hardware_test) (struct slot* slot, u32 value);
30607 u8 (*get_power) (struct slot* slot);
30608 int (*set_power) (struct slot* slot, int value);
30609 -};
30610 +} __no_const;
30611
30612 struct cpci_hp_controller {
30613 unsigned int irq;
30614 diff -urNp linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c
30615 --- linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-05-19 00:06:34.000000000 -0400
30616 +++ linux-2.6.39.4/drivers/pci/hotplug/cpqphp_nvram.c 2011-08-05 19:44:37.000000000 -0400
30617 @@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
30618
30619 void compaq_nvram_init (void __iomem *rom_start)
30620 {
30621 +
30622 +#ifndef CONFIG_PAX_KERNEXEC
30623 if (rom_start) {
30624 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
30625 }
30626 +#endif
30627 +
30628 dbg("int15 entry = %p\n", compaq_int15_entry_point);
30629
30630 /* initialize our int15 lock */
30631 diff -urNp linux-2.6.39.4/drivers/pci/pcie/aspm.c linux-2.6.39.4/drivers/pci/pcie/aspm.c
30632 --- linux-2.6.39.4/drivers/pci/pcie/aspm.c 2011-05-19 00:06:34.000000000 -0400
30633 +++ linux-2.6.39.4/drivers/pci/pcie/aspm.c 2011-08-05 19:44:37.000000000 -0400
30634 @@ -27,9 +27,9 @@
30635 #define MODULE_PARAM_PREFIX "pcie_aspm."
30636
30637 /* Note: those are not register definitions */
30638 -#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
30639 -#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
30640 -#define ASPM_STATE_L1 (4) /* L1 state */
30641 +#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
30642 +#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
30643 +#define ASPM_STATE_L1 (4U) /* L1 state */
30644 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
30645 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
30646
30647 diff -urNp linux-2.6.39.4/drivers/pci/probe.c linux-2.6.39.4/drivers/pci/probe.c
30648 --- linux-2.6.39.4/drivers/pci/probe.c 2011-05-19 00:06:34.000000000 -0400
30649 +++ linux-2.6.39.4/drivers/pci/probe.c 2011-08-05 20:34:06.000000000 -0400
30650 @@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
30651 return ret;
30652 }
30653
30654 -static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
30655 +static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
30656 struct device_attribute *attr,
30657 char *buf)
30658 {
30659 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
30660 }
30661
30662 -static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
30663 +static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
30664 struct device_attribute *attr,
30665 char *buf)
30666 {
30667 @@ -165,7 +165,7 @@ int __pci_read_base(struct pci_dev *dev,
30668 u32 l, sz, mask;
30669 u16 orig_cmd;
30670
30671 - mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
30672 + mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
30673
30674 if (!dev->mmio_always_on) {
30675 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
30676 diff -urNp linux-2.6.39.4/drivers/pci/proc.c linux-2.6.39.4/drivers/pci/proc.c
30677 --- linux-2.6.39.4/drivers/pci/proc.c 2011-05-19 00:06:34.000000000 -0400
30678 +++ linux-2.6.39.4/drivers/pci/proc.c 2011-08-05 19:44:37.000000000 -0400
30679 @@ -476,7 +476,16 @@ static const struct file_operations proc
30680 static int __init pci_proc_init(void)
30681 {
30682 struct pci_dev *dev = NULL;
30683 +
30684 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
30685 +#ifdef CONFIG_GRKERNSEC_PROC_USER
30686 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
30687 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
30688 + proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
30689 +#endif
30690 +#else
30691 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
30692 +#endif
30693 proc_create("devices", 0, proc_bus_pci_dir,
30694 &proc_bus_pci_dev_operations);
30695 proc_initialized = 1;
30696 diff -urNp linux-2.6.39.4/drivers/pci/xen-pcifront.c linux-2.6.39.4/drivers/pci/xen-pcifront.c
30697 --- linux-2.6.39.4/drivers/pci/xen-pcifront.c 2011-05-19 00:06:34.000000000 -0400
30698 +++ linux-2.6.39.4/drivers/pci/xen-pcifront.c 2011-08-05 20:34:06.000000000 -0400
30699 @@ -187,6 +187,8 @@ static int pcifront_bus_read(struct pci_
30700 struct pcifront_sd *sd = bus->sysdata;
30701 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30702
30703 + pax_track_stack();
30704 +
30705 if (verbose_request)
30706 dev_info(&pdev->xdev->dev,
30707 "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
30708 @@ -226,6 +228,8 @@ static int pcifront_bus_write(struct pci
30709 struct pcifront_sd *sd = bus->sysdata;
30710 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30711
30712 + pax_track_stack();
30713 +
30714 if (verbose_request)
30715 dev_info(&pdev->xdev->dev,
30716 "write dev=%04x:%02x:%02x.%01x - "
30717 @@ -258,6 +262,8 @@ static int pci_frontend_enable_msix(stru
30718 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30719 struct msi_desc *entry;
30720
30721 + pax_track_stack();
30722 +
30723 if (nvec > SH_INFO_MAX_VEC) {
30724 dev_err(&dev->dev, "too much vector for pci frontend: %x."
30725 " Increase SH_INFO_MAX_VEC.\n", nvec);
30726 @@ -309,6 +315,8 @@ static void pci_frontend_disable_msix(st
30727 struct pcifront_sd *sd = dev->bus->sysdata;
30728 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30729
30730 + pax_track_stack();
30731 +
30732 err = do_pci_op(pdev, &op);
30733
30734 /* What should do for error ? */
30735 @@ -328,6 +336,8 @@ static int pci_frontend_enable_msi(struc
30736 struct pcifront_sd *sd = dev->bus->sysdata;
30737 struct pcifront_device *pdev = pcifront_get_pdev(sd);
30738
30739 + pax_track_stack();
30740 +
30741 err = do_pci_op(pdev, &op);
30742 if (likely(!err)) {
30743 vector[0] = op.value;
30744 diff -urNp linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c
30745 --- linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c 2011-05-19 00:06:34.000000000 -0400
30746 +++ linux-2.6.39.4/drivers/platform/x86/thinkpad_acpi.c 2011-08-05 20:34:06.000000000 -0400
30747 @@ -2109,7 +2109,7 @@ static int hotkey_mask_get(void)
30748 return 0;
30749 }
30750
30751 -void static hotkey_mask_warn_incomplete_mask(void)
30752 +static void hotkey_mask_warn_incomplete_mask(void)
30753 {
30754 /* log only what the user can fix... */
30755 const u32 wantedmask = hotkey_driver_mask &
30756 diff -urNp linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c
30757 --- linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c 2011-05-19 00:06:34.000000000 -0400
30758 +++ linux-2.6.39.4/drivers/pnp/pnpbios/bioscalls.c 2011-08-05 19:44:37.000000000 -0400
30759 @@ -59,7 +59,7 @@ do { \
30760 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
30761 } while(0)
30762
30763 -static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
30764 +static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
30765 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
30766
30767 /*
30768 @@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func
30769
30770 cpu = get_cpu();
30771 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
30772 +
30773 + pax_open_kernel();
30774 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
30775 + pax_close_kernel();
30776
30777 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
30778 spin_lock_irqsave(&pnp_bios_lock, flags);
30779 @@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func
30780 :"memory");
30781 spin_unlock_irqrestore(&pnp_bios_lock, flags);
30782
30783 + pax_open_kernel();
30784 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
30785 + pax_close_kernel();
30786 +
30787 put_cpu();
30788
30789 /* If we get here and this is set then the PnP BIOS faulted on us. */
30790 @@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n
30791 return status;
30792 }
30793
30794 -void pnpbios_calls_init(union pnp_bios_install_struct *header)
30795 +void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
30796 {
30797 int i;
30798
30799 @@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i
30800 pnp_bios_callpoint.offset = header->fields.pm16offset;
30801 pnp_bios_callpoint.segment = PNP_CS16;
30802
30803 + pax_open_kernel();
30804 +
30805 for_each_possible_cpu(i) {
30806 struct desc_struct *gdt = get_cpu_gdt_table(i);
30807 if (!gdt)
30808 @@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i
30809 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
30810 (unsigned long)__va(header->fields.pm16dseg));
30811 }
30812 +
30813 + pax_close_kernel();
30814 }
30815 diff -urNp linux-2.6.39.4/drivers/pnp/resource.c linux-2.6.39.4/drivers/pnp/resource.c
30816 --- linux-2.6.39.4/drivers/pnp/resource.c 2011-05-19 00:06:34.000000000 -0400
30817 +++ linux-2.6.39.4/drivers/pnp/resource.c 2011-08-05 19:44:37.000000000 -0400
30818 @@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
30819 return 1;
30820
30821 /* check if the resource is valid */
30822 - if (*irq < 0 || *irq > 15)
30823 + if (*irq > 15)
30824 return 0;
30825
30826 /* check if the resource is reserved */
30827 @@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
30828 return 1;
30829
30830 /* check if the resource is valid */
30831 - if (*dma < 0 || *dma == 4 || *dma > 7)
30832 + if (*dma == 4 || *dma > 7)
30833 return 0;
30834
30835 /* check if the resource is reserved */
30836 diff -urNp linux-2.6.39.4/drivers/power/bq27x00_battery.c linux-2.6.39.4/drivers/power/bq27x00_battery.c
30837 --- linux-2.6.39.4/drivers/power/bq27x00_battery.c 2011-05-19 00:06:34.000000000 -0400
30838 +++ linux-2.6.39.4/drivers/power/bq27x00_battery.c 2011-08-05 20:34:06.000000000 -0400
30839 @@ -66,7 +66,7 @@
30840 struct bq27x00_device_info;
30841 struct bq27x00_access_methods {
30842 int (*read)(struct bq27x00_device_info *di, u8 reg, bool single);
30843 -};
30844 +} __no_const;
30845
30846 enum bq27x00_chip { BQ27000, BQ27500 };
30847
30848 diff -urNp linux-2.6.39.4/drivers/regulator/max8660.c linux-2.6.39.4/drivers/regulator/max8660.c
30849 --- linux-2.6.39.4/drivers/regulator/max8660.c 2011-05-19 00:06:34.000000000 -0400
30850 +++ linux-2.6.39.4/drivers/regulator/max8660.c 2011-08-05 20:34:06.000000000 -0400
30851 @@ -383,8 +383,10 @@ static int __devinit max8660_probe(struc
30852 max8660->shadow_regs[MAX8660_OVER1] = 5;
30853 } else {
30854 /* Otherwise devices can be toggled via software */
30855 - max8660_dcdc_ops.enable = max8660_dcdc_enable;
30856 - max8660_dcdc_ops.disable = max8660_dcdc_disable;
30857 + pax_open_kernel();
30858 + *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
30859 + *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
30860 + pax_close_kernel();
30861 }
30862
30863 /*
30864 diff -urNp linux-2.6.39.4/drivers/regulator/mc13892-regulator.c linux-2.6.39.4/drivers/regulator/mc13892-regulator.c
30865 --- linux-2.6.39.4/drivers/regulator/mc13892-regulator.c 2011-05-19 00:06:34.000000000 -0400
30866 +++ linux-2.6.39.4/drivers/regulator/mc13892-regulator.c 2011-08-05 20:34:06.000000000 -0400
30867 @@ -560,10 +560,12 @@ static int __devinit mc13892_regulator_p
30868 }
30869 mc13xxx_unlock(mc13892);
30870
30871 - mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30872 + pax_open_kernel();
30873 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
30874 = mc13892_vcam_set_mode;
30875 - mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30876 + *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
30877 = mc13892_vcam_get_mode;
30878 + pax_close_kernel();
30879 for (i = 0; i < pdata->num_regulators; i++) {
30880 init_data = &pdata->regulators[i];
30881 priv->regulators[i] = regulator_register(
30882 diff -urNp linux-2.6.39.4/drivers/rtc/rtc-dev.c linux-2.6.39.4/drivers/rtc/rtc-dev.c
30883 --- linux-2.6.39.4/drivers/rtc/rtc-dev.c 2011-05-19 00:06:34.000000000 -0400
30884 +++ linux-2.6.39.4/drivers/rtc/rtc-dev.c 2011-08-05 19:44:37.000000000 -0400
30885 @@ -14,6 +14,7 @@
30886 #include <linux/module.h>
30887 #include <linux/rtc.h>
30888 #include <linux/sched.h>
30889 +#include <linux/grsecurity.h>
30890 #include "rtc-core.h"
30891
30892 static dev_t rtc_devt;
30893 @@ -345,6 +346,8 @@ static long rtc_dev_ioctl(struct file *f
30894 if (copy_from_user(&tm, uarg, sizeof(tm)))
30895 return -EFAULT;
30896
30897 + gr_log_timechange();
30898 +
30899 return rtc_set_time(rtc, &tm);
30900
30901 case RTC_PIE_ON:
30902 diff -urNp linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h
30903 --- linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h 2011-05-19 00:06:34.000000000 -0400
30904 +++ linux-2.6.39.4/drivers/scsi/aacraid/aacraid.h 2011-08-05 20:34:06.000000000 -0400
30905 @@ -492,7 +492,7 @@ struct adapter_ops
30906 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
30907 /* Administrative operations */
30908 int (*adapter_comm)(struct aac_dev * dev, int comm);
30909 -};
30910 +} __no_const;
30911
30912 /*
30913 * Define which interrupt handler needs to be installed
30914 diff -urNp linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c
30915 --- linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c 2011-05-19 00:06:34.000000000 -0400
30916 +++ linux-2.6.39.4/drivers/scsi/aacraid/commctrl.c 2011-08-05 19:44:37.000000000 -0400
30917 @@ -482,6 +482,7 @@ static int aac_send_raw_srb(struct aac_d
30918 u32 actual_fibsize64, actual_fibsize = 0;
30919 int i;
30920
30921 + pax_track_stack();
30922
30923 if (dev->in_reset) {
30924 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
30925 diff -urNp linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c
30926 --- linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c 2011-05-19 00:06:34.000000000 -0400
30927 +++ linux-2.6.39.4/drivers/scsi/aic94xx/aic94xx_init.c 2011-08-05 19:44:37.000000000 -0400
30928 @@ -486,7 +486,7 @@ static ssize_t asd_show_update_bios(stru
30929 flash_error_table[i].reason);
30930 }
30931
30932 -static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
30933 +static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
30934 asd_show_update_bios, asd_store_update_bios);
30935
30936 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
30937 diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfad.c linux-2.6.39.4/drivers/scsi/bfa/bfad.c
30938 --- linux-2.6.39.4/drivers/scsi/bfa/bfad.c 2011-05-19 00:06:34.000000000 -0400
30939 +++ linux-2.6.39.4/drivers/scsi/bfa/bfad.c 2011-08-05 19:44:37.000000000 -0400
30940 @@ -1027,6 +1027,8 @@ bfad_start_ops(struct bfad_s *bfad) {
30941 struct bfad_vport_s *vport, *vport_new;
30942 struct bfa_fcs_driver_info_s driver_info;
30943
30944 + pax_track_stack();
30945 +
30946 /* Fill the driver_info info to fcs*/
30947 memset(&driver_info, 0, sizeof(driver_info));
30948 strncpy(driver_info.version, BFAD_DRIVER_VERSION,
30949 diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c
30950 --- linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-05-19 00:06:34.000000000 -0400
30951 +++ linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_lport.c 2011-08-05 19:44:37.000000000 -0400
30952 @@ -1559,6 +1559,8 @@ bfa_fcs_lport_fdmi_build_rhba_pyld(struc
30953 u16 len, count;
30954 u16 templen;
30955
30956 + pax_track_stack();
30957 +
30958 /*
30959 * get hba attributes
30960 */
30961 @@ -1836,6 +1838,8 @@ bfa_fcs_lport_fdmi_build_portattr_block(
30962 u8 count = 0;
30963 u16 templen;
30964
30965 + pax_track_stack();
30966 +
30967 /*
30968 * get port attributes
30969 */
30970 diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c
30971 --- linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-05-19 00:06:34.000000000 -0400
30972 +++ linux-2.6.39.4/drivers/scsi/bfa/bfa_fcs_rport.c 2011-08-05 19:44:37.000000000 -0400
30973 @@ -1844,6 +1844,8 @@ bfa_fcs_rport_process_rpsc(struct bfa_fc
30974 struct fc_rpsc_speed_info_s speeds;
30975 struct bfa_port_attr_s pport_attr;
30976
30977 + pax_track_stack();
30978 +
30979 bfa_trc(port->fcs, rx_fchs->s_id);
30980 bfa_trc(port->fcs, rx_fchs->d_id);
30981
30982 diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa.h linux-2.6.39.4/drivers/scsi/bfa/bfa.h
30983 --- linux-2.6.39.4/drivers/scsi/bfa/bfa.h 2011-05-19 00:06:34.000000000 -0400
30984 +++ linux-2.6.39.4/drivers/scsi/bfa/bfa.h 2011-08-05 20:34:06.000000000 -0400
30985 @@ -238,7 +238,7 @@ struct bfa_hwif_s {
30986 u32 *nvecs, u32 *maxvec);
30987 void (*hw_msix_get_rme_range) (struct bfa_s *bfa, u32 *start,
30988 u32 *end);
30989 -};
30990 +} __no_const;
30991 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
30992
30993 struct bfa_iocfc_s {
30994 diff -urNp linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h
30995 --- linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h 2011-05-19 00:06:34.000000000 -0400
30996 +++ linux-2.6.39.4/drivers/scsi/bfa/bfa_ioc.h 2011-08-05 20:34:06.000000000 -0400
30997 @@ -196,7 +196,7 @@ struct bfa_ioc_cbfn_s {
30998 bfa_ioc_disable_cbfn_t disable_cbfn;
30999 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
31000 bfa_ioc_reset_cbfn_t reset_cbfn;
31001 -};
31002 +} __no_const;
31003
31004 /*
31005 * Heartbeat failure notification queue element.
31006 @@ -267,7 +267,7 @@ struct bfa_ioc_hwif_s {
31007 void (*ioc_sync_leave) (struct bfa_ioc_s *ioc);
31008 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
31009 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
31010 -};
31011 +} __no_const;
31012
31013 #define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
31014 #define bfa_ioc_devid(__ioc) ((__ioc)->pcidev.device_id)
31015 diff -urNp linux-2.6.39.4/drivers/scsi/BusLogic.c linux-2.6.39.4/drivers/scsi/BusLogic.c
31016 --- linux-2.6.39.4/drivers/scsi/BusLogic.c 2011-05-19 00:06:34.000000000 -0400
31017 +++ linux-2.6.39.4/drivers/scsi/BusLogic.c 2011-08-05 19:44:37.000000000 -0400
31018 @@ -962,6 +962,8 @@ static int __init BusLogic_InitializeFla
31019 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
31020 *PrototypeHostAdapter)
31021 {
31022 + pax_track_stack();
31023 +
31024 /*
31025 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
31026 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
31027 diff -urNp linux-2.6.39.4/drivers/scsi/dpt_i2o.c linux-2.6.39.4/drivers/scsi/dpt_i2o.c
31028 --- linux-2.6.39.4/drivers/scsi/dpt_i2o.c 2011-05-19 00:06:34.000000000 -0400
31029 +++ linux-2.6.39.4/drivers/scsi/dpt_i2o.c 2011-08-05 19:44:37.000000000 -0400
31030 @@ -1811,6 +1811,8 @@ static int adpt_i2o_passthru(adpt_hba* p
31031 dma_addr_t addr;
31032 ulong flags = 0;
31033
31034 + pax_track_stack();
31035 +
31036 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
31037 // get user msg size in u32s
31038 if(get_user(size, &user_msg[0])){
31039 @@ -2317,6 +2319,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
31040 s32 rcode;
31041 dma_addr_t addr;
31042
31043 + pax_track_stack();
31044 +
31045 memset(msg, 0 , sizeof(msg));
31046 len = scsi_bufflen(cmd);
31047 direction = 0x00000000;
31048 diff -urNp linux-2.6.39.4/drivers/scsi/eata.c linux-2.6.39.4/drivers/scsi/eata.c
31049 --- linux-2.6.39.4/drivers/scsi/eata.c 2011-05-19 00:06:34.000000000 -0400
31050 +++ linux-2.6.39.4/drivers/scsi/eata.c 2011-08-05 19:44:37.000000000 -0400
31051 @@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
31052 struct hostdata *ha;
31053 char name[16];
31054
31055 + pax_track_stack();
31056 +
31057 sprintf(name, "%s%d", driver_name, j);
31058
31059 if (!request_region(port_base, REGION_SIZE, driver_name)) {
31060 diff -urNp linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c
31061 --- linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-05-19 00:06:34.000000000 -0400
31062 +++ linux-2.6.39.4/drivers/scsi/fcoe/fcoe_ctlr.c 2011-08-05 20:34:06.000000000 -0400
31063 @@ -2458,6 +2458,8 @@ static int fcoe_ctlr_vn_recv(struct fcoe
31064 } buf;
31065 int rc;
31066
31067 + pax_track_stack();
31068 +
31069 fiph = (struct fip_header *)skb->data;
31070 sub = fiph->fip_subcode;
31071
31072 diff -urNp linux-2.6.39.4/drivers/scsi/gdth.c linux-2.6.39.4/drivers/scsi/gdth.c
31073 --- linux-2.6.39.4/drivers/scsi/gdth.c 2011-05-19 00:06:34.000000000 -0400
31074 +++ linux-2.6.39.4/drivers/scsi/gdth.c 2011-08-05 19:44:37.000000000 -0400
31075 @@ -4107,6 +4107,8 @@ static int ioc_lockdrv(void __user *arg)
31076 unsigned long flags;
31077 gdth_ha_str *ha;
31078
31079 + pax_track_stack();
31080 +
31081 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
31082 return -EFAULT;
31083 ha = gdth_find_ha(ldrv.ionode);
31084 @@ -4139,6 +4141,8 @@ static int ioc_resetdrv(void __user *arg
31085 gdth_ha_str *ha;
31086 int rval;
31087
31088 + pax_track_stack();
31089 +
31090 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
31091 res.number >= MAX_HDRIVES)
31092 return -EFAULT;
31093 @@ -4174,6 +4178,8 @@ static int ioc_general(void __user *arg,
31094 gdth_ha_str *ha;
31095 int rval;
31096
31097 + pax_track_stack();
31098 +
31099 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
31100 return -EFAULT;
31101 ha = gdth_find_ha(gen.ionode);
31102 @@ -4642,6 +4648,9 @@ static void gdth_flush(gdth_ha_str *ha)
31103 int i;
31104 gdth_cmd_str gdtcmd;
31105 char cmnd[MAX_COMMAND_SIZE];
31106 +
31107 + pax_track_stack();
31108 +
31109 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
31110
31111 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
31112 diff -urNp linux-2.6.39.4/drivers/scsi/gdth_proc.c linux-2.6.39.4/drivers/scsi/gdth_proc.c
31113 --- linux-2.6.39.4/drivers/scsi/gdth_proc.c 2011-05-19 00:06:34.000000000 -0400
31114 +++ linux-2.6.39.4/drivers/scsi/gdth_proc.c 2011-08-05 19:44:37.000000000 -0400
31115 @@ -47,6 +47,9 @@ static int gdth_set_asc_info(struct Scsi
31116 u64 paddr;
31117
31118 char cmnd[MAX_COMMAND_SIZE];
31119 +
31120 + pax_track_stack();
31121 +
31122 memset(cmnd, 0xff, 12);
31123 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
31124
31125 @@ -175,6 +178,8 @@ static int gdth_get_info(char *buffer,ch
31126 gdth_hget_str *phg;
31127 char cmnd[MAX_COMMAND_SIZE];
31128
31129 + pax_track_stack();
31130 +
31131 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
31132 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
31133 if (!gdtcmd || !estr)
31134 diff -urNp linux-2.6.39.4/drivers/scsi/hosts.c linux-2.6.39.4/drivers/scsi/hosts.c
31135 --- linux-2.6.39.4/drivers/scsi/hosts.c 2011-05-19 00:06:34.000000000 -0400
31136 +++ linux-2.6.39.4/drivers/scsi/hosts.c 2011-08-05 19:44:37.000000000 -0400
31137 @@ -42,7 +42,7 @@
31138 #include "scsi_logging.h"
31139
31140
31141 -static atomic_t scsi_host_next_hn; /* host_no for next new host */
31142 +static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
31143
31144
31145 static void scsi_host_cls_release(struct device *dev)
31146 @@ -354,7 +354,7 @@ struct Scsi_Host *scsi_host_alloc(struct
31147 * subtract one because we increment first then return, but we need to
31148 * know what the next host number was before increment
31149 */
31150 - shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
31151 + shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
31152 shost->dma_channel = 0xff;
31153
31154 /* These three are default values which can be overridden */
31155 diff -urNp linux-2.6.39.4/drivers/scsi/hpsa.c linux-2.6.39.4/drivers/scsi/hpsa.c
31156 --- linux-2.6.39.4/drivers/scsi/hpsa.c 2011-05-19 00:06:34.000000000 -0400
31157 +++ linux-2.6.39.4/drivers/scsi/hpsa.c 2011-08-05 20:34:06.000000000 -0400
31158 @@ -469,7 +469,7 @@ static inline u32 next_command(struct ct
31159 u32 a;
31160
31161 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
31162 - return h->access.command_completed(h);
31163 + return h->access->command_completed(h);
31164
31165 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
31166 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
31167 @@ -2889,7 +2889,7 @@ static void start_io(struct ctlr_info *h
31168 while (!list_empty(&h->reqQ)) {
31169 c = list_entry(h->reqQ.next, struct CommandList, list);
31170 /* can't do anything if fifo is full */
31171 - if ((h->access.fifo_full(h))) {
31172 + if ((h->access->fifo_full(h))) {
31173 dev_warn(&h->pdev->dev, "fifo full\n");
31174 break;
31175 }
31176 @@ -2899,7 +2899,7 @@ static void start_io(struct ctlr_info *h
31177 h->Qdepth--;
31178
31179 /* Tell the controller execute command */
31180 - h->access.submit_command(h, c);
31181 + h->access->submit_command(h, c);
31182
31183 /* Put job onto the completed Q */
31184 addQ(&h->cmpQ, c);
31185 @@ -2908,17 +2908,17 @@ static void start_io(struct ctlr_info *h
31186
31187 static inline unsigned long get_next_completion(struct ctlr_info *h)
31188 {
31189 - return h->access.command_completed(h);
31190 + return h->access->command_completed(h);
31191 }
31192
31193 static inline bool interrupt_pending(struct ctlr_info *h)
31194 {
31195 - return h->access.intr_pending(h);
31196 + return h->access->intr_pending(h);
31197 }
31198
31199 static inline long interrupt_not_for_us(struct ctlr_info *h)
31200 {
31201 - return (h->access.intr_pending(h) == 0) ||
31202 + return (h->access->intr_pending(h) == 0) ||
31203 (h->interrupts_enabled == 0);
31204 }
31205
31206 @@ -3684,7 +3684,7 @@ static int __devinit hpsa_pci_init(struc
31207 if (prod_index < 0)
31208 return -ENODEV;
31209 h->product_name = products[prod_index].product_name;
31210 - h->access = *(products[prod_index].access);
31211 + h->access = products[prod_index].access;
31212
31213 if (hpsa_board_disabled(h->pdev)) {
31214 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
31215 @@ -3845,7 +3845,7 @@ static int __devinit hpsa_init_one(struc
31216 }
31217
31218 /* make sure the board interrupts are off */
31219 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31220 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31221
31222 if (h->msix_vector || h->msi_vector)
31223 rc = request_irq(h->intr[h->intr_mode], do_hpsa_intr_msi,
31224 @@ -3892,7 +3892,7 @@ static int __devinit hpsa_init_one(struc
31225 hpsa_scsi_setup(h);
31226
31227 /* Turn the interrupts on so we can service requests */
31228 - h->access.set_intr_mask(h, HPSA_INTR_ON);
31229 + h->access->set_intr_mask(h, HPSA_INTR_ON);
31230
31231 hpsa_put_ctlr_into_performant_mode(h);
31232 hpsa_hba_inquiry(h);
31233 @@ -3955,7 +3955,7 @@ static void hpsa_shutdown(struct pci_dev
31234 * To write all data in the battery backed cache to disks
31235 */
31236 hpsa_flush_cache(h);
31237 - h->access.set_intr_mask(h, HPSA_INTR_OFF);
31238 + h->access->set_intr_mask(h, HPSA_INTR_OFF);
31239 free_irq(h->intr[h->intr_mode], h);
31240 #ifdef CONFIG_PCI_MSI
31241 if (h->msix_vector)
31242 @@ -4118,7 +4118,7 @@ static __devinit void hpsa_enter_perform
31243 return;
31244 }
31245 /* Change the access methods to the performant access methods */
31246 - h->access = SA5_performant_access;
31247 + h->access = &SA5_performant_access;
31248 h->transMethod = CFGTBL_Trans_Performant;
31249 }
31250
31251 diff -urNp linux-2.6.39.4/drivers/scsi/hpsa.h linux-2.6.39.4/drivers/scsi/hpsa.h
31252 --- linux-2.6.39.4/drivers/scsi/hpsa.h 2011-05-19 00:06:34.000000000 -0400
31253 +++ linux-2.6.39.4/drivers/scsi/hpsa.h 2011-08-05 20:34:06.000000000 -0400
31254 @@ -73,7 +73,7 @@ struct ctlr_info {
31255 unsigned int msix_vector;
31256 unsigned int msi_vector;
31257 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
31258 - struct access_method access;
31259 + struct access_method *access;
31260
31261 /* queue and queue Info */
31262 struct list_head reqQ;
31263 diff -urNp linux-2.6.39.4/drivers/scsi/ips.h linux-2.6.39.4/drivers/scsi/ips.h
31264 --- linux-2.6.39.4/drivers/scsi/ips.h 2011-05-19 00:06:34.000000000 -0400
31265 +++ linux-2.6.39.4/drivers/scsi/ips.h 2011-08-05 20:34:06.000000000 -0400
31266 @@ -1027,7 +1027,7 @@ typedef struct {
31267 int (*intr)(struct ips_ha *);
31268 void (*enableint)(struct ips_ha *);
31269 uint32_t (*statupd)(struct ips_ha *);
31270 -} ips_hw_func_t;
31271 +} __no_const ips_hw_func_t;
31272
31273 typedef struct ips_ha {
31274 uint8_t ha_id[IPS_MAX_CHANNELS+1];
31275 diff -urNp linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c
31276 --- linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c 2011-05-19 00:06:34.000000000 -0400
31277 +++ linux-2.6.39.4/drivers/scsi/libfc/fc_exch.c 2011-08-05 19:44:37.000000000 -0400
31278 @@ -105,12 +105,12 @@ struct fc_exch_mgr {
31279 * all together if not used XXX
31280 */
31281 struct {
31282 - atomic_t no_free_exch;
31283 - atomic_t no_free_exch_xid;
31284 - atomic_t xid_not_found;
31285 - atomic_t xid_busy;
31286 - atomic_t seq_not_found;
31287 - atomic_t non_bls_resp;
31288 + atomic_unchecked_t no_free_exch;
31289 + atomic_unchecked_t no_free_exch_xid;
31290 + atomic_unchecked_t xid_not_found;
31291 + atomic_unchecked_t xid_busy;
31292 + atomic_unchecked_t seq_not_found;
31293 + atomic_unchecked_t non_bls_resp;
31294 } stats;
31295 };
31296
31297 @@ -700,7 +700,7 @@ static struct fc_exch *fc_exch_em_alloc(
31298 /* allocate memory for exchange */
31299 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
31300 if (!ep) {
31301 - atomic_inc(&mp->stats.no_free_exch);
31302 + atomic_inc_unchecked(&mp->stats.no_free_exch);
31303 goto out;
31304 }
31305 memset(ep, 0, sizeof(*ep));
31306 @@ -761,7 +761,7 @@ out:
31307 return ep;
31308 err:
31309 spin_unlock_bh(&pool->lock);
31310 - atomic_inc(&mp->stats.no_free_exch_xid);
31311 + atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
31312 mempool_free(ep, mp->ep_pool);
31313 return NULL;
31314 }
31315 @@ -906,7 +906,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31316 xid = ntohs(fh->fh_ox_id); /* we originated exch */
31317 ep = fc_exch_find(mp, xid);
31318 if (!ep) {
31319 - atomic_inc(&mp->stats.xid_not_found);
31320 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31321 reject = FC_RJT_OX_ID;
31322 goto out;
31323 }
31324 @@ -936,7 +936,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31325 ep = fc_exch_find(mp, xid);
31326 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
31327 if (ep) {
31328 - atomic_inc(&mp->stats.xid_busy);
31329 + atomic_inc_unchecked(&mp->stats.xid_busy);
31330 reject = FC_RJT_RX_ID;
31331 goto rel;
31332 }
31333 @@ -947,7 +947,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31334 }
31335 xid = ep->xid; /* get our XID */
31336 } else if (!ep) {
31337 - atomic_inc(&mp->stats.xid_not_found);
31338 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31339 reject = FC_RJT_RX_ID; /* XID not found */
31340 goto out;
31341 }
31342 @@ -964,7 +964,7 @@ static enum fc_pf_rjt_reason fc_seq_look
31343 } else {
31344 sp = &ep->seq;
31345 if (sp->id != fh->fh_seq_id) {
31346 - atomic_inc(&mp->stats.seq_not_found);
31347 + atomic_inc_unchecked(&mp->stats.seq_not_found);
31348 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
31349 goto rel;
31350 }
31351 @@ -1392,22 +1392,22 @@ static void fc_exch_recv_seq_resp(struct
31352
31353 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
31354 if (!ep) {
31355 - atomic_inc(&mp->stats.xid_not_found);
31356 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31357 goto out;
31358 }
31359 if (ep->esb_stat & ESB_ST_COMPLETE) {
31360 - atomic_inc(&mp->stats.xid_not_found);
31361 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31362 goto rel;
31363 }
31364 if (ep->rxid == FC_XID_UNKNOWN)
31365 ep->rxid = ntohs(fh->fh_rx_id);
31366 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
31367 - atomic_inc(&mp->stats.xid_not_found);
31368 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31369 goto rel;
31370 }
31371 if (ep->did != ntoh24(fh->fh_s_id) &&
31372 ep->did != FC_FID_FLOGI) {
31373 - atomic_inc(&mp->stats.xid_not_found);
31374 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31375 goto rel;
31376 }
31377 sof = fr_sof(fp);
31378 @@ -1416,7 +1416,7 @@ static void fc_exch_recv_seq_resp(struct
31379 sp->ssb_stat |= SSB_ST_RESP;
31380 sp->id = fh->fh_seq_id;
31381 } else if (sp->id != fh->fh_seq_id) {
31382 - atomic_inc(&mp->stats.seq_not_found);
31383 + atomic_inc_unchecked(&mp->stats.seq_not_found);
31384 goto rel;
31385 }
31386
31387 @@ -1479,9 +1479,9 @@ static void fc_exch_recv_resp(struct fc_
31388 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
31389
31390 if (!sp)
31391 - atomic_inc(&mp->stats.xid_not_found);
31392 + atomic_inc_unchecked(&mp->stats.xid_not_found);
31393 else
31394 - atomic_inc(&mp->stats.non_bls_resp);
31395 + atomic_inc_unchecked(&mp->stats.non_bls_resp);
31396
31397 fc_frame_free(fp);
31398 }
31399 diff -urNp linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c
31400 --- linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c 2011-05-19 00:06:34.000000000 -0400
31401 +++ linux-2.6.39.4/drivers/scsi/libsas/sas_ata.c 2011-08-05 20:34:06.000000000 -0400
31402 @@ -314,7 +314,7 @@ static struct ata_port_operations sas_sa
31403 .postreset = ata_std_postreset,
31404 .error_handler = ata_std_error_handler,
31405 .post_internal_cmd = sas_ata_post_internal,
31406 - .qc_defer = ata_std_qc_defer,
31407 + .qc_defer = ata_std_qc_defer,
31408 .qc_prep = ata_noop_qc_prep,
31409 .qc_issue = sas_ata_qc_issue,
31410 .qc_fill_rtf = sas_ata_qc_fill_rtf,
31411 diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c
31412 --- linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-19 00:06:34.000000000 -0400
31413 +++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc_debugfs.c 2011-08-05 19:44:37.000000000 -0400
31414 @@ -104,7 +104,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_
31415
31416 #include <linux/debugfs.h>
31417
31418 -static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31419 +static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
31420 static unsigned long lpfc_debugfs_start_time = 0L;
31421
31422 /* iDiag */
31423 @@ -141,7 +141,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
31424 lpfc_debugfs_enable = 0;
31425
31426 len = 0;
31427 - index = (atomic_read(&vport->disc_trc_cnt) + 1) &
31428 + index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
31429 (lpfc_debugfs_max_disc_trc - 1);
31430 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
31431 dtp = vport->disc_trc + i;
31432 @@ -202,7 +202,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
31433 lpfc_debugfs_enable = 0;
31434
31435 len = 0;
31436 - index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
31437 + index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
31438 (lpfc_debugfs_max_slow_ring_trc - 1);
31439 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
31440 dtp = phba->slow_ring_trc + i;
31441 @@ -380,6 +380,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
31442 uint32_t *ptr;
31443 char buffer[1024];
31444
31445 + pax_track_stack();
31446 +
31447 off = 0;
31448 spin_lock_irq(&phba->hbalock);
31449
31450 @@ -617,14 +619,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
31451 !vport || !vport->disc_trc)
31452 return;
31453
31454 - index = atomic_inc_return(&vport->disc_trc_cnt) &
31455 + index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
31456 (lpfc_debugfs_max_disc_trc - 1);
31457 dtp = vport->disc_trc + index;
31458 dtp->fmt = fmt;
31459 dtp->data1 = data1;
31460 dtp->data2 = data2;
31461 dtp->data3 = data3;
31462 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31463 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31464 dtp->jif = jiffies;
31465 #endif
31466 return;
31467 @@ -655,14 +657,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
31468 !phba || !phba->slow_ring_trc)
31469 return;
31470
31471 - index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
31472 + index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
31473 (lpfc_debugfs_max_slow_ring_trc - 1);
31474 dtp = phba->slow_ring_trc + index;
31475 dtp->fmt = fmt;
31476 dtp->data1 = data1;
31477 dtp->data2 = data2;
31478 dtp->data3 = data3;
31479 - dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
31480 + dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
31481 dtp->jif = jiffies;
31482 #endif
31483 return;
31484 @@ -2145,7 +2147,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31485 "slow_ring buffer\n");
31486 goto debug_failed;
31487 }
31488 - atomic_set(&phba->slow_ring_trc_cnt, 0);
31489 + atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
31490 memset(phba->slow_ring_trc, 0,
31491 (sizeof(struct lpfc_debugfs_trc) *
31492 lpfc_debugfs_max_slow_ring_trc));
31493 @@ -2191,7 +2193,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
31494 "buffer\n");
31495 goto debug_failed;
31496 }
31497 - atomic_set(&vport->disc_trc_cnt, 0);
31498 + atomic_set_unchecked(&vport->disc_trc_cnt, 0);
31499
31500 snprintf(name, sizeof(name), "discovery_trace");
31501 vport->debug_disc_trc =
31502 diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h
31503 --- linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h 2011-05-19 00:06:34.000000000 -0400
31504 +++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc.h 2011-08-05 19:44:37.000000000 -0400
31505 @@ -419,7 +419,7 @@ struct lpfc_vport {
31506 struct dentry *debug_nodelist;
31507 struct dentry *vport_debugfs_root;
31508 struct lpfc_debugfs_trc *disc_trc;
31509 - atomic_t disc_trc_cnt;
31510 + atomic_unchecked_t disc_trc_cnt;
31511 #endif
31512 uint8_t stat_data_enabled;
31513 uint8_t stat_data_blocked;
31514 @@ -785,8 +785,8 @@ struct lpfc_hba {
31515 struct timer_list fabric_block_timer;
31516 unsigned long bit_flags;
31517 #define FABRIC_COMANDS_BLOCKED 0
31518 - atomic_t num_rsrc_err;
31519 - atomic_t num_cmd_success;
31520 + atomic_unchecked_t num_rsrc_err;
31521 + atomic_unchecked_t num_cmd_success;
31522 unsigned long last_rsrc_error_time;
31523 unsigned long last_ramp_down_time;
31524 unsigned long last_ramp_up_time;
31525 @@ -800,7 +800,7 @@ struct lpfc_hba {
31526 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
31527 struct dentry *debug_slow_ring_trc;
31528 struct lpfc_debugfs_trc *slow_ring_trc;
31529 - atomic_t slow_ring_trc_cnt;
31530 + atomic_unchecked_t slow_ring_trc_cnt;
31531 /* iDiag debugfs sub-directory */
31532 struct dentry *idiag_root;
31533 struct dentry *idiag_pci_cfg;
31534 diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c
31535 --- linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c 2011-05-19 00:06:34.000000000 -0400
31536 +++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc_init.c 2011-08-05 20:34:06.000000000 -0400
31537 @@ -9535,8 +9535,10 @@ lpfc_init(void)
31538 printk(LPFC_COPYRIGHT "\n");
31539
31540 if (lpfc_enable_npiv) {
31541 - lpfc_transport_functions.vport_create = lpfc_vport_create;
31542 - lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31543 + pax_open_kernel();
31544 + *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
31545 + *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
31546 + pax_close_kernel();
31547 }
31548 lpfc_transport_template =
31549 fc_attach_transport(&lpfc_transport_functions);
31550 diff -urNp linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c
31551 --- linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-19 00:06:34.000000000 -0400
31552 +++ linux-2.6.39.4/drivers/scsi/lpfc/lpfc_scsi.c 2011-08-05 19:44:37.000000000 -0400
31553 @@ -297,7 +297,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
31554 uint32_t evt_posted;
31555
31556 spin_lock_irqsave(&phba->hbalock, flags);
31557 - atomic_inc(&phba->num_rsrc_err);
31558 + atomic_inc_unchecked(&phba->num_rsrc_err);
31559 phba->last_rsrc_error_time = jiffies;
31560
31561 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
31562 @@ -338,7 +338,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
31563 unsigned long flags;
31564 struct lpfc_hba *phba = vport->phba;
31565 uint32_t evt_posted;
31566 - atomic_inc(&phba->num_cmd_success);
31567 + atomic_inc_unchecked(&phba->num_cmd_success);
31568
31569 if (vport->cfg_lun_queue_depth <= queue_depth)
31570 return;
31571 @@ -382,8 +382,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31572 unsigned long num_rsrc_err, num_cmd_success;
31573 int i;
31574
31575 - num_rsrc_err = atomic_read(&phba->num_rsrc_err);
31576 - num_cmd_success = atomic_read(&phba->num_cmd_success);
31577 + num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
31578 + num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
31579
31580 vports = lpfc_create_vport_work_array(phba);
31581 if (vports != NULL)
31582 @@ -403,8 +403,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
31583 }
31584 }
31585 lpfc_destroy_vport_work_array(phba, vports);
31586 - atomic_set(&phba->num_rsrc_err, 0);
31587 - atomic_set(&phba->num_cmd_success, 0);
31588 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
31589 + atomic_set_unchecked(&phba->num_cmd_success, 0);
31590 }
31591
31592 /**
31593 @@ -438,8 +438,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
31594 }
31595 }
31596 lpfc_destroy_vport_work_array(phba, vports);
31597 - atomic_set(&phba->num_rsrc_err, 0);
31598 - atomic_set(&phba->num_cmd_success, 0);
31599 + atomic_set_unchecked(&phba->num_rsrc_err, 0);
31600 + atomic_set_unchecked(&phba->num_cmd_success, 0);
31601 }
31602
31603 /**
31604 diff -urNp linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c
31605 --- linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-19 00:06:34.000000000 -0400
31606 +++ linux-2.6.39.4/drivers/scsi/megaraid/megaraid_mbox.c 2011-08-05 19:44:37.000000000 -0400
31607 @@ -3510,6 +3510,8 @@ megaraid_cmm_register(adapter_t *adapter
31608 int rval;
31609 int i;
31610
31611 + pax_track_stack();
31612 +
31613 // Allocate memory for the base list of scb for management module.
31614 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
31615
31616 diff -urNp linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c
31617 --- linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c 2011-05-19 00:06:34.000000000 -0400
31618 +++ linux-2.6.39.4/drivers/scsi/osd/osd_initiator.c 2011-08-05 19:44:37.000000000 -0400
31619 @@ -97,6 +97,8 @@ static int _osd_get_print_system_info(st
31620 int nelem = ARRAY_SIZE(get_attrs), a = 0;
31621 int ret;
31622
31623 + pax_track_stack();
31624 +
31625 or = osd_start_request(od, GFP_KERNEL);
31626 if (!or)
31627 return -ENOMEM;
31628 diff -urNp linux-2.6.39.4/drivers/scsi/pmcraid.c linux-2.6.39.4/drivers/scsi/pmcraid.c
31629 --- linux-2.6.39.4/drivers/scsi/pmcraid.c 2011-05-19 00:06:34.000000000 -0400
31630 +++ linux-2.6.39.4/drivers/scsi/pmcraid.c 2011-08-05 19:44:37.000000000 -0400
31631 @@ -201,8 +201,8 @@ static int pmcraid_slave_alloc(struct sc
31632 res->scsi_dev = scsi_dev;
31633 scsi_dev->hostdata = res;
31634 res->change_detected = 0;
31635 - atomic_set(&res->read_failures, 0);
31636 - atomic_set(&res->write_failures, 0);
31637 + atomic_set_unchecked(&res->read_failures, 0);
31638 + atomic_set_unchecked(&res->write_failures, 0);
31639 rc = 0;
31640 }
31641 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
31642 @@ -2677,9 +2677,9 @@ static int pmcraid_error_handler(struct
31643
31644 /* If this was a SCSI read/write command keep count of errors */
31645 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
31646 - atomic_inc(&res->read_failures);
31647 + atomic_inc_unchecked(&res->read_failures);
31648 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
31649 - atomic_inc(&res->write_failures);
31650 + atomic_inc_unchecked(&res->write_failures);
31651
31652 if (!RES_IS_GSCSI(res->cfg_entry) &&
31653 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
31654 @@ -3535,7 +3535,7 @@ static int pmcraid_queuecommand_lck(
31655 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31656 * hrrq_id assigned here in queuecommand
31657 */
31658 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31659 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31660 pinstance->num_hrrq;
31661 cmd->cmd_done = pmcraid_io_done;
31662
31663 @@ -3860,7 +3860,7 @@ static long pmcraid_ioctl_passthrough(
31664 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
31665 * hrrq_id assigned here in queuecommand
31666 */
31667 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
31668 + ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
31669 pinstance->num_hrrq;
31670
31671 if (request_size) {
31672 @@ -4495,7 +4495,7 @@ static void pmcraid_worker_function(stru
31673
31674 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
31675 /* add resources only after host is added into system */
31676 - if (!atomic_read(&pinstance->expose_resources))
31677 + if (!atomic_read_unchecked(&pinstance->expose_resources))
31678 return;
31679
31680 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
31681 @@ -5329,8 +5329,8 @@ static int __devinit pmcraid_init_instan
31682 init_waitqueue_head(&pinstance->reset_wait_q);
31683
31684 atomic_set(&pinstance->outstanding_cmds, 0);
31685 - atomic_set(&pinstance->last_message_id, 0);
31686 - atomic_set(&pinstance->expose_resources, 0);
31687 + atomic_set_unchecked(&pinstance->last_message_id, 0);
31688 + atomic_set_unchecked(&pinstance->expose_resources, 0);
31689
31690 INIT_LIST_HEAD(&pinstance->free_res_q);
31691 INIT_LIST_HEAD(&pinstance->used_res_q);
31692 @@ -6045,7 +6045,7 @@ static int __devinit pmcraid_probe(
31693 /* Schedule worker thread to handle CCN and take care of adding and
31694 * removing devices to OS
31695 */
31696 - atomic_set(&pinstance->expose_resources, 1);
31697 + atomic_set_unchecked(&pinstance->expose_resources, 1);
31698 schedule_work(&pinstance->worker_q);
31699 return rc;
31700
31701 diff -urNp linux-2.6.39.4/drivers/scsi/pmcraid.h linux-2.6.39.4/drivers/scsi/pmcraid.h
31702 --- linux-2.6.39.4/drivers/scsi/pmcraid.h 2011-05-19 00:06:34.000000000 -0400
31703 +++ linux-2.6.39.4/drivers/scsi/pmcraid.h 2011-08-05 19:44:37.000000000 -0400
31704 @@ -750,7 +750,7 @@ struct pmcraid_instance {
31705 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
31706
31707 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
31708 - atomic_t last_message_id;
31709 + atomic_unchecked_t last_message_id;
31710
31711 /* configuration table */
31712 struct pmcraid_config_table *cfg_table;
31713 @@ -779,7 +779,7 @@ struct pmcraid_instance {
31714 atomic_t outstanding_cmds;
31715
31716 /* should add/delete resources to mid-layer now ?*/
31717 - atomic_t expose_resources;
31718 + atomic_unchecked_t expose_resources;
31719
31720
31721
31722 @@ -815,8 +815,8 @@ struct pmcraid_resource_entry {
31723 struct pmcraid_config_table_entry_ext cfg_entry_ext;
31724 };
31725 struct scsi_device *scsi_dev; /* Link scsi_device structure */
31726 - atomic_t read_failures; /* count of failed READ commands */
31727 - atomic_t write_failures; /* count of failed WRITE commands */
31728 + atomic_unchecked_t read_failures; /* count of failed READ commands */
31729 + atomic_unchecked_t write_failures; /* count of failed WRITE commands */
31730
31731 /* To indicate add/delete/modify during CCN */
31732 u8 change_detected;
31733 diff -urNp linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h
31734 --- linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h 2011-05-19 00:06:34.000000000 -0400
31735 +++ linux-2.6.39.4/drivers/scsi/qla2xxx/qla_def.h 2011-08-05 20:34:06.000000000 -0400
31736 @@ -2236,7 +2236,7 @@ struct isp_operations {
31737 int (*get_flash_version) (struct scsi_qla_host *, void *);
31738 int (*start_scsi) (srb_t *);
31739 int (*abort_isp) (struct scsi_qla_host *);
31740 -};
31741 +} __no_const;
31742
31743 /* MSI-X Support *************************************************************/
31744
31745 diff -urNp linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h
31746 --- linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h 2011-05-19 00:06:34.000000000 -0400
31747 +++ linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_def.h 2011-08-05 19:44:37.000000000 -0400
31748 @@ -256,7 +256,7 @@ struct ddb_entry {
31749 atomic_t retry_relogin_timer; /* Min Time between relogins
31750 * (4000 only) */
31751 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
31752 - atomic_t relogin_retry_count; /* Num of times relogin has been
31753 + atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
31754 * retried */
31755
31756 uint16_t port;
31757 diff -urNp linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c
31758 --- linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c 2011-05-19 00:06:34.000000000 -0400
31759 +++ linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_init.c 2011-08-05 19:44:37.000000000 -0400
31760 @@ -680,7 +680,7 @@ static struct ddb_entry * qla4xxx_alloc_
31761 ddb_entry->fw_ddb_index = fw_ddb_index;
31762 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
31763 atomic_set(&ddb_entry->relogin_timer, 0);
31764 - atomic_set(&ddb_entry->relogin_retry_count, 0);
31765 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31766 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31767 list_add_tail(&ddb_entry->list, &ha->ddb_list);
31768 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
31769 @@ -1433,7 +1433,7 @@ int qla4xxx_process_ddb_changed(struct s
31770 if ((ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) &&
31771 (atomic_read(&ddb_entry->state) != DDB_STATE_ONLINE)) {
31772 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
31773 - atomic_set(&ddb_entry->relogin_retry_count, 0);
31774 + atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
31775 atomic_set(&ddb_entry->relogin_timer, 0);
31776 clear_bit(DF_RELOGIN, &ddb_entry->flags);
31777 iscsi_unblock_session(ddb_entry->sess);
31778 diff -urNp linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c
31779 --- linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c 2011-05-19 00:06:34.000000000 -0400
31780 +++ linux-2.6.39.4/drivers/scsi/qla4xxx/ql4_os.c 2011-08-05 19:44:37.000000000 -0400
31781 @@ -802,13 +802,13 @@ static void qla4xxx_timer(struct scsi_ql
31782 ddb_entry->fw_ddb_device_state ==
31783 DDB_DS_SESSION_FAILED) {
31784 /* Reset retry relogin timer */
31785 - atomic_inc(&ddb_entry->relogin_retry_count);
31786 + atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
31787 DEBUG2(printk("scsi%ld: ddb [%d] relogin"
31788 " timed out-retrying"
31789 " relogin (%d)\n",
31790 ha->host_no,
31791 ddb_entry->fw_ddb_index,
31792 - atomic_read(&ddb_entry->
31793 + atomic_read_unchecked(&ddb_entry->
31794 relogin_retry_count))
31795 );
31796 start_dpc++;
31797 diff -urNp linux-2.6.39.4/drivers/scsi/scsi.c linux-2.6.39.4/drivers/scsi/scsi.c
31798 --- linux-2.6.39.4/drivers/scsi/scsi.c 2011-05-19 00:06:34.000000000 -0400
31799 +++ linux-2.6.39.4/drivers/scsi/scsi.c 2011-08-05 19:44:37.000000000 -0400
31800 @@ -655,7 +655,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
31801 unsigned long timeout;
31802 int rtn = 0;
31803
31804 - atomic_inc(&cmd->device->iorequest_cnt);
31805 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31806
31807 /* check if the device is still usable */
31808 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
31809 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_debug.c linux-2.6.39.4/drivers/scsi/scsi_debug.c
31810 --- linux-2.6.39.4/drivers/scsi/scsi_debug.c 2011-05-19 00:06:34.000000000 -0400
31811 +++ linux-2.6.39.4/drivers/scsi/scsi_debug.c 2011-08-05 19:44:37.000000000 -0400
31812 @@ -1493,6 +1493,8 @@ static int resp_mode_select(struct scsi_
31813 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
31814 unsigned char *cmd = (unsigned char *)scp->cmnd;
31815
31816 + pax_track_stack();
31817 +
31818 if ((errsts = check_readiness(scp, 1, devip)))
31819 return errsts;
31820 memset(arr, 0, sizeof(arr));
31821 @@ -1590,6 +1592,8 @@ static int resp_log_sense(struct scsi_cm
31822 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
31823 unsigned char *cmd = (unsigned char *)scp->cmnd;
31824
31825 + pax_track_stack();
31826 +
31827 if ((errsts = check_readiness(scp, 1, devip)))
31828 return errsts;
31829 memset(arr, 0, sizeof(arr));
31830 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_lib.c linux-2.6.39.4/drivers/scsi/scsi_lib.c
31831 --- linux-2.6.39.4/drivers/scsi/scsi_lib.c 2011-05-19 00:06:34.000000000 -0400
31832 +++ linux-2.6.39.4/drivers/scsi/scsi_lib.c 2011-08-05 19:44:37.000000000 -0400
31833 @@ -1410,7 +1410,7 @@ static void scsi_kill_request(struct req
31834 shost = sdev->host;
31835 scsi_init_cmd_errh(cmd);
31836 cmd->result = DID_NO_CONNECT << 16;
31837 - atomic_inc(&cmd->device->iorequest_cnt);
31838 + atomic_inc_unchecked(&cmd->device->iorequest_cnt);
31839
31840 /*
31841 * SCSI request completion path will do scsi_device_unbusy(),
31842 @@ -1436,9 +1436,9 @@ static void scsi_softirq_done(struct req
31843
31844 INIT_LIST_HEAD(&cmd->eh_entry);
31845
31846 - atomic_inc(&cmd->device->iodone_cnt);
31847 + atomic_inc_unchecked(&cmd->device->iodone_cnt);
31848 if (cmd->result)
31849 - atomic_inc(&cmd->device->ioerr_cnt);
31850 + atomic_inc_unchecked(&cmd->device->ioerr_cnt);
31851
31852 disposition = scsi_decide_disposition(cmd);
31853 if (disposition != SUCCESS &&
31854 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_sysfs.c linux-2.6.39.4/drivers/scsi/scsi_sysfs.c
31855 --- linux-2.6.39.4/drivers/scsi/scsi_sysfs.c 2011-06-25 12:55:22.000000000 -0400
31856 +++ linux-2.6.39.4/drivers/scsi/scsi_sysfs.c 2011-08-05 19:44:37.000000000 -0400
31857 @@ -622,7 +622,7 @@ show_iostat_##field(struct device *dev,
31858 char *buf) \
31859 { \
31860 struct scsi_device *sdev = to_scsi_device(dev); \
31861 - unsigned long long count = atomic_read(&sdev->field); \
31862 + unsigned long long count = atomic_read_unchecked(&sdev->field); \
31863 return snprintf(buf, 20, "0x%llx\n", count); \
31864 } \
31865 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
31866 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c
31867 --- linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c 2011-05-19 00:06:34.000000000 -0400
31868 +++ linux-2.6.39.4/drivers/scsi/scsi_transport_fc.c 2011-08-05 19:44:37.000000000 -0400
31869 @@ -485,7 +485,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_
31870 * Netlink Infrastructure
31871 */
31872
31873 -static atomic_t fc_event_seq;
31874 +static atomic_unchecked_t fc_event_seq;
31875
31876 /**
31877 * fc_get_event_number - Obtain the next sequential FC event number
31878 @@ -498,7 +498,7 @@ static atomic_t fc_event_seq;
31879 u32
31880 fc_get_event_number(void)
31881 {
31882 - return atomic_add_return(1, &fc_event_seq);
31883 + return atomic_add_return_unchecked(1, &fc_event_seq);
31884 }
31885 EXPORT_SYMBOL(fc_get_event_number);
31886
31887 @@ -646,7 +646,7 @@ static __init int fc_transport_init(void
31888 {
31889 int error;
31890
31891 - atomic_set(&fc_event_seq, 0);
31892 + atomic_set_unchecked(&fc_event_seq, 0);
31893
31894 error = transport_class_register(&fc_host_class);
31895 if (error)
31896 @@ -836,7 +836,7 @@ static int fc_str_to_dev_loss(const char
31897 char *cp;
31898
31899 *val = simple_strtoul(buf, &cp, 0);
31900 - if ((*cp && (*cp != '\n')) || (*val < 0))
31901 + if (*cp && (*cp != '\n'))
31902 return -EINVAL;
31903 /*
31904 * Check for overflow; dev_loss_tmo is u32
31905 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c
31906 --- linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c 2011-05-19 00:06:34.000000000 -0400
31907 +++ linux-2.6.39.4/drivers/scsi/scsi_transport_iscsi.c 2011-08-05 19:44:37.000000000 -0400
31908 @@ -83,7 +83,7 @@ struct iscsi_internal {
31909 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
31910 };
31911
31912 -static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
31913 +static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
31914 static struct workqueue_struct *iscsi_eh_timer_workq;
31915
31916 /*
31917 @@ -761,7 +761,7 @@ int iscsi_add_session(struct iscsi_cls_s
31918 int err;
31919
31920 ihost = shost->shost_data;
31921 - session->sid = atomic_add_return(1, &iscsi_session_nr);
31922 + session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
31923
31924 if (id == ISCSI_MAX_TARGET) {
31925 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
31926 @@ -2200,7 +2200,7 @@ static __init int iscsi_transport_init(v
31927 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
31928 ISCSI_TRANSPORT_VERSION);
31929
31930 - atomic_set(&iscsi_session_nr, 0);
31931 + atomic_set_unchecked(&iscsi_session_nr, 0);
31932
31933 err = class_register(&iscsi_transport_class);
31934 if (err)
31935 diff -urNp linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c
31936 --- linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c 2011-05-19 00:06:34.000000000 -0400
31937 +++ linux-2.6.39.4/drivers/scsi/scsi_transport_srp.c 2011-08-05 19:44:37.000000000 -0400
31938 @@ -33,7 +33,7 @@
31939 #include "scsi_transport_srp_internal.h"
31940
31941 struct srp_host_attrs {
31942 - atomic_t next_port_id;
31943 + atomic_unchecked_t next_port_id;
31944 };
31945 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
31946
31947 @@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
31948 struct Scsi_Host *shost = dev_to_shost(dev);
31949 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
31950
31951 - atomic_set(&srp_host->next_port_id, 0);
31952 + atomic_set_unchecked(&srp_host->next_port_id, 0);
31953 return 0;
31954 }
31955
31956 @@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
31957 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
31958 rport->roles = ids->roles;
31959
31960 - id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
31961 + id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
31962 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
31963
31964 transport_setup_device(&rport->dev);
31965 diff -urNp linux-2.6.39.4/drivers/scsi/sg.c linux-2.6.39.4/drivers/scsi/sg.c
31966 --- linux-2.6.39.4/drivers/scsi/sg.c 2011-05-19 00:06:34.000000000 -0400
31967 +++ linux-2.6.39.4/drivers/scsi/sg.c 2011-08-05 19:44:37.000000000 -0400
31968 @@ -2310,7 +2310,7 @@ struct sg_proc_leaf {
31969 const struct file_operations * fops;
31970 };
31971
31972 -static struct sg_proc_leaf sg_proc_leaf_arr[] = {
31973 +static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
31974 {"allow_dio", &adio_fops},
31975 {"debug", &debug_fops},
31976 {"def_reserved_size", &dressz_fops},
31977 @@ -2325,7 +2325,7 @@ sg_proc_init(void)
31978 {
31979 int k, mask;
31980 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
31981 - struct sg_proc_leaf * leaf;
31982 + const struct sg_proc_leaf * leaf;
31983
31984 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
31985 if (!sg_proc_sgp)
31986 diff -urNp linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c
31987 --- linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-19 00:06:34.000000000 -0400
31988 +++ linux-2.6.39.4/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-08-05 19:44:37.000000000 -0400
31989 @@ -1756,6 +1756,8 @@ static int __devinit sym2_probe(struct p
31990 int do_iounmap = 0;
31991 int do_disable_device = 1;
31992
31993 + pax_track_stack();
31994 +
31995 memset(&sym_dev, 0, sizeof(sym_dev));
31996 memset(&nvram, 0, sizeof(nvram));
31997 sym_dev.pdev = pdev;
31998 diff -urNp linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c
31999 --- linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c 2011-05-19 00:06:34.000000000 -0400
32000 +++ linux-2.6.39.4/drivers/scsi/vmw_pvscsi.c 2011-08-05 19:44:37.000000000 -0400
32001 @@ -447,6 +447,8 @@ static void pvscsi_setup_all_rings(const
32002 dma_addr_t base;
32003 unsigned i;
32004
32005 + pax_track_stack();
32006 +
32007 cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT;
32008 cmd.reqRingNumPages = adapter->req_pages;
32009 cmd.cmpRingNumPages = adapter->cmp_pages;
32010 diff -urNp linux-2.6.39.4/drivers/spi/spi.c linux-2.6.39.4/drivers/spi/spi.c
32011 --- linux-2.6.39.4/drivers/spi/spi.c 2011-05-19 00:06:34.000000000 -0400
32012 +++ linux-2.6.39.4/drivers/spi/spi.c 2011-08-05 19:44:37.000000000 -0400
32013 @@ -1023,7 +1023,7 @@ int spi_bus_unlock(struct spi_master *ma
32014 EXPORT_SYMBOL_GPL(spi_bus_unlock);
32015
32016 /* portable code must never pass more than 32 bytes */
32017 -#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
32018 +#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
32019
32020 static u8 *buf;
32021
32022 diff -urNp linux-2.6.39.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c linux-2.6.39.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c
32023 --- linux-2.6.39.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-05-19 00:06:34.000000000 -0400
32024 +++ linux-2.6.39.4/drivers/staging/ath6kl/os/linux/ar6000_drv.c 2011-08-14 12:12:59.000000000 -0400
32025 @@ -384,7 +384,7 @@ static struct ar_cookie s_ar_cookie_mem[
32026 (((ar)->arTargetType == TARGET_TYPE_AR6003) ? AR6003_HOST_INTEREST_ITEM_ADDRESS(item) : 0))
32027
32028
32029 -static struct net_device_ops ar6000_netdev_ops = {
32030 +static net_device_ops_no_const ar6000_netdev_ops = {
32031 .ndo_init = NULL,
32032 .ndo_open = ar6000_open,
32033 .ndo_stop = ar6000_close,
32034 diff -urNp linux-2.6.39.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h linux-2.6.39.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h
32035 --- linux-2.6.39.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-05-19 00:06:34.000000000 -0400
32036 +++ linux-2.6.39.4/drivers/staging/ath6kl/os/linux/include/ar6k_pal.h 2011-08-14 09:32:05.000000000 -0400
32037 @@ -30,7 +30,7 @@ typedef bool (*ar6k_pal_recv_pkt_t)(void
32038 typedef struct ar6k_pal_config_s
32039 {
32040 ar6k_pal_recv_pkt_t fpar6k_pal_recv_pkt;
32041 -}ar6k_pal_config_t;
32042 +} __no_const ar6k_pal_config_t;
32043
32044 void register_pal_cb(ar6k_pal_config_t *palConfig_p);
32045 #endif /* _AR6K_PAL_H_ */
32046 diff -urNp linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
32047 --- linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-05-19 00:06:34.000000000 -0400
32048 +++ linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/dhd_linux.c 2011-08-05 20:34:06.000000000 -0400
32049 @@ -857,14 +857,14 @@ static void dhd_op_if(dhd_if_t *ifp)
32050 free_netdev(ifp->net);
32051 }
32052 /* Allocate etherdev, including space for private structure */
32053 - ifp->net = alloc_etherdev(sizeof(dhd));
32054 + ifp->net = alloc_etherdev(sizeof(*dhd));
32055 if (!ifp->net) {
32056 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
32057 ret = -ENOMEM;
32058 }
32059 if (ret == 0) {
32060 strcpy(ifp->net->name, ifp->name);
32061 - memcpy(netdev_priv(ifp->net), &dhd, sizeof(dhd));
32062 + memcpy(netdev_priv(ifp->net), dhd, sizeof(*dhd));
32063 err = dhd_net_attach(&dhd->pub, ifp->idx);
32064 if (err != 0) {
32065 DHD_ERROR(("%s: dhd_net_attach failed, "
32066 @@ -1923,7 +1923,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32067 strcpy(nv_path, nvram_path);
32068
32069 /* Allocate etherdev, including space for private structure */
32070 - net = alloc_etherdev(sizeof(dhd));
32071 + net = alloc_etherdev(sizeof(*dhd));
32072 if (!net) {
32073 DHD_ERROR(("%s: OOM - alloc_etherdev\n", __func__));
32074 goto fail;
32075 @@ -1939,7 +1939,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32076 /*
32077 * Save the dhd_info into the priv
32078 */
32079 - memcpy(netdev_priv(net), &dhd, sizeof(dhd));
32080 + memcpy(netdev_priv(net), dhd, sizeof(*dhd));
32081
32082 /* Set network interface name if it was provided as module parameter */
32083 if (iface_name[0]) {
32084 @@ -2056,7 +2056,7 @@ dhd_pub_t *dhd_attach(struct dhd_bus *bu
32085 /*
32086 * Save the dhd_info into the priv
32087 */
32088 - memcpy(netdev_priv(net), &dhd, sizeof(dhd));
32089 + memcpy(netdev_priv(net), dhd, sizeof(*dhd));
32090
32091 #if defined(CUSTOMER_HW2) && defined(CONFIG_WIFI_CONTROL_FUNC)
32092 g_bus = bus;
32093 diff -urNp linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c
32094 --- linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c 2011-05-19 00:06:34.000000000 -0400
32095 +++ linux-2.6.39.4/drivers/staging/brcm80211/brcmfmac/wl_iw.c 2011-08-05 19:44:37.000000000 -0400
32096 @@ -495,7 +495,7 @@ wl_iw_get_range(struct net_device *dev,
32097 list = (wl_u32_list_t *) channels;
32098
32099 dwrq->length = sizeof(struct iw_range);
32100 - memset(range, 0, sizeof(range));
32101 + memset(range, 0, sizeof(*range));
32102
32103 range->min_nwid = range->max_nwid = 0;
32104
32105 diff -urNp linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c
32106 --- linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c 2011-05-19 00:06:34.000000000 -0400
32107 +++ linux-2.6.39.4/drivers/staging/et131x/et1310_tx.c 2011-08-05 19:44:37.000000000 -0400
32108 @@ -635,11 +635,11 @@ inline void et131x_free_send_packet(stru
32109 struct net_device_stats *stats = &etdev->net_stats;
32110
32111 if (tcb->flags & fMP_DEST_BROAD)
32112 - atomic_inc(&etdev->Stats.brdcstxmt);
32113 + atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
32114 else if (tcb->flags & fMP_DEST_MULTI)
32115 - atomic_inc(&etdev->Stats.multixmt);
32116 + atomic_inc_unchecked(&etdev->Stats.multixmt);
32117 else
32118 - atomic_inc(&etdev->Stats.unixmt);
32119 + atomic_inc_unchecked(&etdev->Stats.unixmt);
32120
32121 if (tcb->skb) {
32122 stats->tx_bytes += tcb->skb->len;
32123 diff -urNp linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h
32124 --- linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h 2011-05-19 00:06:34.000000000 -0400
32125 +++ linux-2.6.39.4/drivers/staging/et131x/et131x_adapter.h 2011-08-05 19:44:37.000000000 -0400
32126 @@ -110,11 +110,11 @@ typedef struct _ce_stats_t {
32127 * operations
32128 */
32129 u32 unircv; /* # multicast packets received */
32130 - atomic_t unixmt; /* # multicast packets for Tx */
32131 + atomic_unchecked_t unixmt; /* # multicast packets for Tx */
32132 u32 multircv; /* # multicast packets received */
32133 - atomic_t multixmt; /* # multicast packets for Tx */
32134 + atomic_unchecked_t multixmt; /* # multicast packets for Tx */
32135 u32 brdcstrcv; /* # broadcast packets received */
32136 - atomic_t brdcstxmt; /* # broadcast packets for Tx */
32137 + atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
32138 u32 norcvbuf; /* # Rx packets discarded */
32139 u32 noxmtbuf; /* # Tx packets discarded */
32140
32141 diff -urNp linux-2.6.39.4/drivers/staging/gma500/psb_ttm_glue.c linux-2.6.39.4/drivers/staging/gma500/psb_ttm_glue.c
32142 --- linux-2.6.39.4/drivers/staging/gma500/psb_ttm_glue.c 2011-05-19 00:06:34.000000000 -0400
32143 +++ linux-2.6.39.4/drivers/staging/gma500/psb_ttm_glue.c 2011-08-14 12:25:25.000000000 -0400
32144 @@ -230,8 +230,10 @@ int psb_mmap(struct file *filp, struct v
32145 if (unlikely(dev_priv->ttm_vm_ops == NULL)) {
32146 dev_priv->ttm_vm_ops = (struct vm_operations_struct *)
32147 vma->vm_ops;
32148 - psb_ttm_vm_ops = *vma->vm_ops;
32149 - psb_ttm_vm_ops.fault = &psb_ttm_fault;
32150 + pax_open_kernel();
32151 + memcpy((void *)&psb_ttm_vm_ops, vma->vm_ops, sizeof(psb_ttm_vm_ops));
32152 + *(void **)&psb_ttm_vm_ops.fault = &psb_ttm_fault;
32153 + pax_close_kernel();
32154 }
32155
32156 vma->vm_ops = &psb_ttm_vm_ops;
32157 diff -urNp linux-2.6.39.4/drivers/staging/hv/channel.c linux-2.6.39.4/drivers/staging/hv/channel.c
32158 --- linux-2.6.39.4/drivers/staging/hv/channel.c 2011-05-19 00:06:34.000000000 -0400
32159 +++ linux-2.6.39.4/drivers/staging/hv/channel.c 2011-08-05 19:44:37.000000000 -0400
32160 @@ -509,8 +509,8 @@ int vmbus_establish_gpadl(struct vmbus_c
32161 unsigned long flags;
32162 int ret = 0;
32163
32164 - next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
32165 - atomic_inc(&vmbus_connection.next_gpadl_handle);
32166 + next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
32167 + atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
32168
32169 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
32170 if (ret)
32171 diff -urNp linux-2.6.39.4/drivers/staging/hv/hv.c linux-2.6.39.4/drivers/staging/hv/hv.c
32172 --- linux-2.6.39.4/drivers/staging/hv/hv.c 2011-05-19 00:06:34.000000000 -0400
32173 +++ linux-2.6.39.4/drivers/staging/hv/hv.c 2011-08-05 19:44:37.000000000 -0400
32174 @@ -163,7 +163,7 @@ static u64 do_hypercall(u64 control, voi
32175 u64 output_address = (output) ? virt_to_phys(output) : 0;
32176 u32 output_address_hi = output_address >> 32;
32177 u32 output_address_lo = output_address & 0xFFFFFFFF;
32178 - volatile void *hypercall_page = hv_context.hypercall_page;
32179 + volatile void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
32180
32181 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
32182 control, input, output);
32183 diff -urNp linux-2.6.39.4/drivers/staging/hv/hv_mouse.c linux-2.6.39.4/drivers/staging/hv/hv_mouse.c
32184 --- linux-2.6.39.4/drivers/staging/hv/hv_mouse.c 2011-05-19 00:06:34.000000000 -0400
32185 +++ linux-2.6.39.4/drivers/staging/hv/hv_mouse.c 2011-08-13 20:26:10.000000000 -0400
32186 @@ -898,8 +898,10 @@ static void reportdesc_callback(struct h
32187 if (hid_dev) {
32188 DPRINT_INFO(INPUTVSC_DRV, "hid_device created");
32189
32190 - hid_dev->ll_driver->open = mousevsc_hid_open;
32191 - hid_dev->ll_driver->close = mousevsc_hid_close;
32192 + pax_open_kernel();
32193 + *(void **)&hid_dev->ll_driver->open = mousevsc_hid_open;
32194 + *(void **)&hid_dev->ll_driver->close = mousevsc_hid_close;
32195 + pax_close_kernel();
32196
32197 hid_dev->bus = BUS_VIRTUAL;
32198 hid_dev->vendor = input_device_ctx->device_info.vendor;
32199 diff -urNp linux-2.6.39.4/drivers/staging/hv/rndis_filter.c linux-2.6.39.4/drivers/staging/hv/rndis_filter.c
32200 --- linux-2.6.39.4/drivers/staging/hv/rndis_filter.c 2011-05-19 00:06:34.000000000 -0400
32201 +++ linux-2.6.39.4/drivers/staging/hv/rndis_filter.c 2011-08-05 19:44:37.000000000 -0400
32202 @@ -49,7 +49,7 @@ struct rndis_device {
32203
32204 enum rndis_device_state state;
32205 u32 link_stat;
32206 - atomic_t new_req_id;
32207 + atomic_unchecked_t new_req_id;
32208
32209 spinlock_t request_lock;
32210 struct list_head req_list;
32211 @@ -144,7 +144,7 @@ static struct rndis_request *get_rndis_r
32212 * template
32213 */
32214 set = &rndis_msg->msg.set_req;
32215 - set->req_id = atomic_inc_return(&dev->new_req_id);
32216 + set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32217
32218 /* Add to the request list */
32219 spin_lock_irqsave(&dev->request_lock, flags);
32220 @@ -709,7 +709,7 @@ static void rndis_filter_halt_device(str
32221
32222 /* Setup the rndis set */
32223 halt = &request->request_msg.msg.halt_req;
32224 - halt->req_id = atomic_inc_return(&dev->new_req_id);
32225 + halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
32226
32227 /* Ignore return since this msg is optional. */
32228 rndis_filter_send_request(dev, request);
32229 diff -urNp linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c
32230 --- linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c 2011-05-19 00:06:34.000000000 -0400
32231 +++ linux-2.6.39.4/drivers/staging/hv/vmbus_drv.c 2011-08-05 19:44:37.000000000 -0400
32232 @@ -661,14 +661,14 @@ int vmbus_child_device_register(struct h
32233 {
32234 int ret = 0;
32235
32236 - static atomic_t device_num = ATOMIC_INIT(0);
32237 + static atomic_unchecked_t device_num = ATOMIC_INIT(0);
32238
32239 DPRINT_DBG(VMBUS_DRV, "child device (%p) registering",
32240 child_device_obj);
32241
32242 /* Set the device name. Otherwise, device_register() will fail. */
32243 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
32244 - atomic_inc_return(&device_num));
32245 + atomic_inc_return_unchecked(&device_num));
32246
32247 /* The new device belongs to this bus */
32248 child_device_obj->device.bus = &vmbus_drv.bus; /* device->dev.bus; */
32249 diff -urNp linux-2.6.39.4/drivers/staging/hv/vmbus_private.h linux-2.6.39.4/drivers/staging/hv/vmbus_private.h
32250 --- linux-2.6.39.4/drivers/staging/hv/vmbus_private.h 2011-05-19 00:06:34.000000000 -0400
32251 +++ linux-2.6.39.4/drivers/staging/hv/vmbus_private.h 2011-08-05 19:44:37.000000000 -0400
32252 @@ -58,7 +58,7 @@ enum vmbus_connect_state {
32253 struct vmbus_connection {
32254 enum vmbus_connect_state conn_state;
32255
32256 - atomic_t next_gpadl_handle;
32257 + atomic_unchecked_t next_gpadl_handle;
32258
32259 /*
32260 * Represents channel interrupts. Each bit position represents a
32261 diff -urNp linux-2.6.39.4/drivers/staging/iio/ring_generic.h linux-2.6.39.4/drivers/staging/iio/ring_generic.h
32262 --- linux-2.6.39.4/drivers/staging/iio/ring_generic.h 2011-05-19 00:06:34.000000000 -0400
32263 +++ linux-2.6.39.4/drivers/staging/iio/ring_generic.h 2011-08-13 20:14:25.000000000 -0400
32264 @@ -86,7 +86,7 @@ struct iio_ring_access_funcs {
32265
32266 int (*is_enabled)(struct iio_ring_buffer *ring);
32267 int (*enable)(struct iio_ring_buffer *ring);
32268 -};
32269 +} __no_const;
32270
32271 /**
32272 * struct iio_ring_buffer - general ring buffer structure
32273 @@ -134,7 +134,7 @@ struct iio_ring_buffer {
32274 struct iio_handler access_handler;
32275 struct iio_event_interface ev_int;
32276 struct iio_shared_ev_pointer shared_ev_pointer;
32277 - struct iio_ring_access_funcs access;
32278 + struct iio_ring_access_funcs access;
32279 int (*preenable)(struct iio_dev *);
32280 int (*postenable)(struct iio_dev *);
32281 int (*predisable)(struct iio_dev *);
32282 diff -urNp linux-2.6.39.4/drivers/staging/octeon/ethernet.c linux-2.6.39.4/drivers/staging/octeon/ethernet.c
32283 --- linux-2.6.39.4/drivers/staging/octeon/ethernet.c 2011-05-19 00:06:34.000000000 -0400
32284 +++ linux-2.6.39.4/drivers/staging/octeon/ethernet.c 2011-08-05 19:44:37.000000000 -0400
32285 @@ -258,11 +258,11 @@ static struct net_device_stats *cvm_oct_
32286 * since the RX tasklet also increments it.
32287 */
32288 #ifdef CONFIG_64BIT
32289 - atomic64_add(rx_status.dropped_packets,
32290 - (atomic64_t *)&priv->stats.rx_dropped);
32291 + atomic64_add_unchecked(rx_status.dropped_packets,
32292 + (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32293 #else
32294 - atomic_add(rx_status.dropped_packets,
32295 - (atomic_t *)&priv->stats.rx_dropped);
32296 + atomic_add_unchecked(rx_status.dropped_packets,
32297 + (atomic_unchecked_t *)&priv->stats.rx_dropped);
32298 #endif
32299 }
32300
32301 diff -urNp linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c
32302 --- linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c 2011-05-19 00:06:34.000000000 -0400
32303 +++ linux-2.6.39.4/drivers/staging/octeon/ethernet-rx.c 2011-08-05 19:44:37.000000000 -0400
32304 @@ -417,11 +417,11 @@ static int cvm_oct_napi_poll(struct napi
32305 /* Increment RX stats for virtual ports */
32306 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
32307 #ifdef CONFIG_64BIT
32308 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
32309 - atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
32310 + atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
32311 + atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
32312 #else
32313 - atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
32314 - atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
32315 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
32316 + atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
32317 #endif
32318 }
32319 netif_receive_skb(skb);
32320 @@ -433,9 +433,9 @@ static int cvm_oct_napi_poll(struct napi
32321 dev->name);
32322 */
32323 #ifdef CONFIG_64BIT
32324 - atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
32325 + atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
32326 #else
32327 - atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
32328 + atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
32329 #endif
32330 dev_kfree_skb_irq(skb);
32331 }
32332 diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/inode.c linux-2.6.39.4/drivers/staging/pohmelfs/inode.c
32333 --- linux-2.6.39.4/drivers/staging/pohmelfs/inode.c 2011-05-19 00:06:34.000000000 -0400
32334 +++ linux-2.6.39.4/drivers/staging/pohmelfs/inode.c 2011-08-05 19:44:37.000000000 -0400
32335 @@ -1855,7 +1855,7 @@ static int pohmelfs_fill_super(struct su
32336 mutex_init(&psb->mcache_lock);
32337 psb->mcache_root = RB_ROOT;
32338 psb->mcache_timeout = msecs_to_jiffies(5000);
32339 - atomic_long_set(&psb->mcache_gen, 0);
32340 + atomic_long_set_unchecked(&psb->mcache_gen, 0);
32341
32342 psb->trans_max_pages = 100;
32343
32344 @@ -1870,7 +1870,7 @@ static int pohmelfs_fill_super(struct su
32345 INIT_LIST_HEAD(&psb->crypto_ready_list);
32346 INIT_LIST_HEAD(&psb->crypto_active_list);
32347
32348 - atomic_set(&psb->trans_gen, 1);
32349 + atomic_set_unchecked(&psb->trans_gen, 1);
32350 atomic_long_set(&psb->total_inodes, 0);
32351
32352 mutex_init(&psb->state_lock);
32353 diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c
32354 --- linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c 2011-05-19 00:06:34.000000000 -0400
32355 +++ linux-2.6.39.4/drivers/staging/pohmelfs/mcache.c 2011-08-05 19:44:37.000000000 -0400
32356 @@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
32357 m->data = data;
32358 m->start = start;
32359 m->size = size;
32360 - m->gen = atomic_long_inc_return(&psb->mcache_gen);
32361 + m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
32362
32363 mutex_lock(&psb->mcache_lock);
32364 err = pohmelfs_mcache_insert(psb, m);
32365 diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h
32366 --- linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h 2011-05-19 00:06:34.000000000 -0400
32367 +++ linux-2.6.39.4/drivers/staging/pohmelfs/netfs.h 2011-08-05 19:44:37.000000000 -0400
32368 @@ -571,14 +571,14 @@ struct pohmelfs_config;
32369 struct pohmelfs_sb {
32370 struct rb_root mcache_root;
32371 struct mutex mcache_lock;
32372 - atomic_long_t mcache_gen;
32373 + atomic_long_unchecked_t mcache_gen;
32374 unsigned long mcache_timeout;
32375
32376 unsigned int idx;
32377
32378 unsigned int trans_retries;
32379
32380 - atomic_t trans_gen;
32381 + atomic_unchecked_t trans_gen;
32382
32383 unsigned int crypto_attached_size;
32384 unsigned int crypto_align_size;
32385 diff -urNp linux-2.6.39.4/drivers/staging/pohmelfs/trans.c linux-2.6.39.4/drivers/staging/pohmelfs/trans.c
32386 --- linux-2.6.39.4/drivers/staging/pohmelfs/trans.c 2011-05-19 00:06:34.000000000 -0400
32387 +++ linux-2.6.39.4/drivers/staging/pohmelfs/trans.c 2011-08-05 19:44:37.000000000 -0400
32388 @@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
32389 int err;
32390 struct netfs_cmd *cmd = t->iovec.iov_base;
32391
32392 - t->gen = atomic_inc_return(&psb->trans_gen);
32393 + t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
32394
32395 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
32396 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
32397 diff -urNp linux-2.6.39.4/drivers/staging/rtl8712/rtl871x_io.h linux-2.6.39.4/drivers/staging/rtl8712/rtl871x_io.h
32398 --- linux-2.6.39.4/drivers/staging/rtl8712/rtl871x_io.h 2011-05-19 00:06:34.000000000 -0400
32399 +++ linux-2.6.39.4/drivers/staging/rtl8712/rtl871x_io.h 2011-08-13 20:31:57.000000000 -0400
32400 @@ -83,7 +83,7 @@ struct _io_ops {
32401 u8 *pmem);
32402 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
32403 u8 *pmem);
32404 -};
32405 +} __no_const;
32406
32407 struct io_req {
32408 struct list_head list;
32409 diff -urNp linux-2.6.39.4/drivers/staging/sbe-2t3e3/netdev.c linux-2.6.39.4/drivers/staging/sbe-2t3e3/netdev.c
32410 --- linux-2.6.39.4/drivers/staging/sbe-2t3e3/netdev.c 2011-05-19 00:06:34.000000000 -0400
32411 +++ linux-2.6.39.4/drivers/staging/sbe-2t3e3/netdev.c 2011-08-14 12:29:10.000000000 -0400
32412 @@ -51,7 +51,7 @@ int t3e3_ioctl(struct net_device *dev, s
32413 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
32414
32415 if (rlen)
32416 - if (copy_to_user(data, &resp, rlen))
32417 + if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
32418 return -EFAULT;
32419
32420 return 0;
32421 diff -urNp linux-2.6.39.4/drivers/staging/tty/istallion.c linux-2.6.39.4/drivers/staging/tty/istallion.c
32422 --- linux-2.6.39.4/drivers/staging/tty/istallion.c 2011-05-19 00:06:34.000000000 -0400
32423 +++ linux-2.6.39.4/drivers/staging/tty/istallion.c 2011-08-05 19:44:37.000000000 -0400
32424 @@ -186,7 +186,6 @@ static struct ktermios stli_deftermios
32425 * re-used for each stats call.
32426 */
32427 static comstats_t stli_comstats;
32428 -static combrd_t stli_brdstats;
32429 static struct asystats stli_cdkstats;
32430
32431 /*****************************************************************************/
32432 @@ -4003,6 +4002,7 @@ out:
32433
32434 static int stli_getbrdstats(combrd_t __user *bp)
32435 {
32436 + combrd_t stli_brdstats;
32437 struct stlibrd *brdp;
32438 unsigned int i;
32439
32440 @@ -4226,6 +4226,8 @@ static int stli_getportstruct(struct stl
32441 struct stliport stli_dummyport;
32442 struct stliport *portp;
32443
32444 + pax_track_stack();
32445 +
32446 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
32447 return -EFAULT;
32448 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
32449 @@ -4248,6 +4250,8 @@ static int stli_getbrdstruct(struct stli
32450 struct stlibrd stli_dummybrd;
32451 struct stlibrd *brdp;
32452
32453 + pax_track_stack();
32454 +
32455 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
32456 return -EFAULT;
32457 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
32458 diff -urNp linux-2.6.39.4/drivers/staging/tty/stallion.c linux-2.6.39.4/drivers/staging/tty/stallion.c
32459 --- linux-2.6.39.4/drivers/staging/tty/stallion.c 2011-05-19 00:06:34.000000000 -0400
32460 +++ linux-2.6.39.4/drivers/staging/tty/stallion.c 2011-08-05 19:44:37.000000000 -0400
32461 @@ -2406,6 +2406,8 @@ static int stl_getportstruct(struct stlp
32462 struct stlport stl_dummyport;
32463 struct stlport *portp;
32464
32465 + pax_track_stack();
32466 +
32467 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
32468 return -EFAULT;
32469 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
32470 diff -urNp linux-2.6.39.4/drivers/staging/usbip/usbip_common.h linux-2.6.39.4/drivers/staging/usbip/usbip_common.h
32471 --- linux-2.6.39.4/drivers/staging/usbip/usbip_common.h 2011-05-19 00:06:34.000000000 -0400
32472 +++ linux-2.6.39.4/drivers/staging/usbip/usbip_common.h 2011-08-18 23:21:09.000000000 -0400
32473 @@ -367,7 +367,7 @@ struct usbip_device {
32474 void (*shutdown)(struct usbip_device *);
32475 void (*reset)(struct usbip_device *);
32476 void (*unusable)(struct usbip_device *);
32477 - } eh_ops;
32478 + } __no_const eh_ops;
32479 };
32480
32481
32482 diff -urNp linux-2.6.39.4/drivers/staging/usbip/vhci.h linux-2.6.39.4/drivers/staging/usbip/vhci.h
32483 --- linux-2.6.39.4/drivers/staging/usbip/vhci.h 2011-05-19 00:06:34.000000000 -0400
32484 +++ linux-2.6.39.4/drivers/staging/usbip/vhci.h 2011-08-05 19:44:37.000000000 -0400
32485 @@ -92,7 +92,7 @@ struct vhci_hcd {
32486 unsigned resuming:1;
32487 unsigned long re_timeout;
32488
32489 - atomic_t seqnum;
32490 + atomic_unchecked_t seqnum;
32491
32492 /*
32493 * NOTE:
32494 diff -urNp linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c
32495 --- linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c 2011-05-19 00:06:34.000000000 -0400
32496 +++ linux-2.6.39.4/drivers/staging/usbip/vhci_hcd.c 2011-08-18 23:22:51.000000000 -0400
32497 @@ -536,7 +536,7 @@ static void vhci_tx_urb(struct urb *urb)
32498 return;
32499 }
32500
32501 - priv->seqnum = atomic_inc_return(&the_controller->seqnum);
32502 + priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32503 if (priv->seqnum == 0xffff)
32504 usbip_uinfo("seqnum max\n");
32505
32506 @@ -795,7 +795,7 @@ static int vhci_urb_dequeue(struct usb_h
32507 return -ENOMEM;
32508 }
32509
32510 - unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
32511 + unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
32512 if (unlink->seqnum == 0xffff)
32513 usbip_uinfo("seqnum max\n");
32514
32515 @@ -992,7 +992,7 @@ static int vhci_start(struct usb_hcd *hc
32516 vdev->rhport = rhport;
32517 }
32518
32519 - atomic_set(&vhci->seqnum, 0);
32520 + atomic_set_unchecked(&vhci->seqnum, 0);
32521 spin_lock_init(&vhci->lock);
32522
32523
32524 diff -urNp linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c
32525 --- linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c 2011-05-19 00:06:34.000000000 -0400
32526 +++ linux-2.6.39.4/drivers/staging/usbip/vhci_rx.c 2011-08-05 19:44:37.000000000 -0400
32527 @@ -81,7 +81,7 @@ static void vhci_recv_ret_submit(struct
32528 usbip_uerr("cannot find a urb of seqnum %u\n",
32529 pdu->base.seqnum);
32530 usbip_uinfo("max seqnum %d\n",
32531 - atomic_read(&the_controller->seqnum));
32532 + atomic_read_unchecked(&the_controller->seqnum));
32533 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
32534 return;
32535 }
32536 diff -urNp linux-2.6.39.4/drivers/staging/wlan-ng/hfa384x_usb.c linux-2.6.39.4/drivers/staging/wlan-ng/hfa384x_usb.c
32537 --- linux-2.6.39.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-05-19 00:06:34.000000000 -0400
32538 +++ linux-2.6.39.4/drivers/staging/wlan-ng/hfa384x_usb.c 2011-08-13 20:36:25.000000000 -0400
32539 @@ -204,7 +204,7 @@ static void unlocked_usbctlx_complete(hf
32540
32541 struct usbctlx_completor {
32542 int (*complete) (struct usbctlx_completor *);
32543 -};
32544 +} __no_const;
32545
32546 static int
32547 hfa384x_usbctlx_complete_sync(hfa384x_t *hw,
32548 diff -urNp linux-2.6.39.4/drivers/target/target_core_alua.c linux-2.6.39.4/drivers/target/target_core_alua.c
32549 --- linux-2.6.39.4/drivers/target/target_core_alua.c 2011-05-19 00:06:34.000000000 -0400
32550 +++ linux-2.6.39.4/drivers/target/target_core_alua.c 2011-08-05 19:44:37.000000000 -0400
32551 @@ -675,6 +675,8 @@ static int core_alua_update_tpg_primary_
32552 char path[ALUA_METADATA_PATH_LEN];
32553 int len;
32554
32555 + pax_track_stack();
32556 +
32557 memset(path, 0, ALUA_METADATA_PATH_LEN);
32558
32559 len = snprintf(md_buf, tg_pt_gp->tg_pt_gp_md_buf_len,
32560 @@ -938,6 +940,8 @@ static int core_alua_update_tpg_secondar
32561 char path[ALUA_METADATA_PATH_LEN], wwn[ALUA_SECONDARY_METADATA_WWN_LEN];
32562 int len;
32563
32564 + pax_track_stack();
32565 +
32566 memset(path, 0, ALUA_METADATA_PATH_LEN);
32567 memset(wwn, 0, ALUA_SECONDARY_METADATA_WWN_LEN);
32568
32569 diff -urNp linux-2.6.39.4/drivers/target/target_core_cdb.c linux-2.6.39.4/drivers/target/target_core_cdb.c
32570 --- linux-2.6.39.4/drivers/target/target_core_cdb.c 2011-05-19 00:06:34.000000000 -0400
32571 +++ linux-2.6.39.4/drivers/target/target_core_cdb.c 2011-08-05 19:44:37.000000000 -0400
32572 @@ -838,6 +838,8 @@ target_emulate_modesense(struct se_cmd *
32573 int length = 0;
32574 unsigned char buf[SE_MODE_PAGE_BUF];
32575
32576 + pax_track_stack();
32577 +
32578 memset(buf, 0, SE_MODE_PAGE_BUF);
32579
32580 switch (cdb[2] & 0x3f) {
32581 diff -urNp linux-2.6.39.4/drivers/target/target_core_configfs.c linux-2.6.39.4/drivers/target/target_core_configfs.c
32582 --- linux-2.6.39.4/drivers/target/target_core_configfs.c 2011-05-19 00:06:34.000000000 -0400
32583 +++ linux-2.6.39.4/drivers/target/target_core_configfs.c 2011-08-05 20:34:06.000000000 -0400
32584 @@ -1280,6 +1280,8 @@ static ssize_t target_core_dev_pr_show_a
32585 ssize_t len = 0;
32586 int reg_count = 0, prf_isid;
32587
32588 + pax_track_stack();
32589 +
32590 if (!(su_dev->se_dev_ptr))
32591 return -ENODEV;
32592
32593 diff -urNp linux-2.6.39.4/drivers/target/target_core_pr.c linux-2.6.39.4/drivers/target/target_core_pr.c
32594 --- linux-2.6.39.4/drivers/target/target_core_pr.c 2011-05-19 00:06:34.000000000 -0400
32595 +++ linux-2.6.39.4/drivers/target/target_core_pr.c 2011-08-05 19:44:37.000000000 -0400
32596 @@ -918,6 +918,8 @@ static int __core_scsi3_check_aptpl_regi
32597 unsigned char t_port[PR_APTPL_MAX_TPORT_LEN];
32598 u16 tpgt;
32599
32600 + pax_track_stack();
32601 +
32602 memset(i_port, 0, PR_APTPL_MAX_IPORT_LEN);
32603 memset(t_port, 0, PR_APTPL_MAX_TPORT_LEN);
32604 /*
32605 @@ -1861,6 +1863,8 @@ static int __core_scsi3_update_aptpl_buf
32606 ssize_t len = 0;
32607 int reg_count = 0;
32608
32609 + pax_track_stack();
32610 +
32611 memset(buf, 0, pr_aptpl_buf_len);
32612 /*
32613 * Called to clear metadata once APTPL has been deactivated.
32614 @@ -1983,6 +1987,8 @@ static int __core_scsi3_write_aptpl_to_f
32615 char path[512];
32616 int ret;
32617
32618 + pax_track_stack();
32619 +
32620 memset(iov, 0, sizeof(struct iovec));
32621 memset(path, 0, 512);
32622
32623 diff -urNp linux-2.6.39.4/drivers/target/target_core_tmr.c linux-2.6.39.4/drivers/target/target_core_tmr.c
32624 --- linux-2.6.39.4/drivers/target/target_core_tmr.c 2011-06-03 00:04:14.000000000 -0400
32625 +++ linux-2.6.39.4/drivers/target/target_core_tmr.c 2011-08-05 19:44:37.000000000 -0400
32626 @@ -263,7 +263,7 @@ int core_tmr_lun_reset(
32627 CMD_TFO(cmd)->get_task_tag(cmd), cmd->pr_res_key,
32628 T_TASK(cmd)->t_task_cdbs,
32629 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32630 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32631 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32632 atomic_read(&T_TASK(cmd)->t_transport_active),
32633 atomic_read(&T_TASK(cmd)->t_transport_stop),
32634 atomic_read(&T_TASK(cmd)->t_transport_sent));
32635 @@ -305,7 +305,7 @@ int core_tmr_lun_reset(
32636 DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
32637 " task: %p, t_fe_count: %d dev: %p\n", task,
32638 fe_count, dev);
32639 - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32640 + atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32641 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock,
32642 flags);
32643 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32644 @@ -315,7 +315,7 @@ int core_tmr_lun_reset(
32645 }
32646 DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
32647 " t_fe_count: %d dev: %p\n", task, fe_count, dev);
32648 - atomic_set(&T_TASK(cmd)->t_transport_aborted, 1);
32649 + atomic_set_unchecked(&T_TASK(cmd)->t_transport_aborted, 1);
32650 spin_unlock_irqrestore(&T_TASK(cmd)->t_state_lock, flags);
32651 core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);
32652
32653 diff -urNp linux-2.6.39.4/drivers/target/target_core_transport.c linux-2.6.39.4/drivers/target/target_core_transport.c
32654 --- linux-2.6.39.4/drivers/target/target_core_transport.c 2011-06-03 00:04:14.000000000 -0400
32655 +++ linux-2.6.39.4/drivers/target/target_core_transport.c 2011-08-05 19:44:37.000000000 -0400
32656 @@ -1681,7 +1681,7 @@ struct se_device *transport_add_device_t
32657
32658 dev->queue_depth = dev_limits->queue_depth;
32659 atomic_set(&dev->depth_left, dev->queue_depth);
32660 - atomic_set(&dev->dev_ordered_id, 0);
32661 + atomic_set_unchecked(&dev->dev_ordered_id, 0);
32662
32663 se_dev_set_default_attribs(dev, dev_limits);
32664
32665 @@ -1882,7 +1882,7 @@ static int transport_check_alloc_task_at
32666 * Used to determine when ORDERED commands should go from
32667 * Dormant to Active status.
32668 */
32669 - cmd->se_ordered_id = atomic_inc_return(&SE_DEV(cmd)->dev_ordered_id);
32670 + cmd->se_ordered_id = atomic_inc_return_unchecked(&SE_DEV(cmd)->dev_ordered_id);
32671 smp_mb__after_atomic_inc();
32672 DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
32673 cmd->se_ordered_id, cmd->sam_task_attr,
32674 @@ -2169,7 +2169,7 @@ static void transport_generic_request_fa
32675 " t_transport_active: %d t_transport_stop: %d"
32676 " t_transport_sent: %d\n", T_TASK(cmd)->t_task_cdbs,
32677 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32678 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32679 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32680 atomic_read(&T_TASK(cmd)->t_task_cdbs_ex_left),
32681 atomic_read(&T_TASK(cmd)->t_transport_active),
32682 atomic_read(&T_TASK(cmd)->t_transport_stop),
32683 @@ -2673,9 +2673,9 @@ check_depth:
32684 spin_lock_irqsave(&T_TASK(cmd)->t_state_lock, flags);
32685 atomic_set(&task->task_active, 1);
32686 atomic_set(&task->task_sent, 1);
32687 - atomic_inc(&T_TASK(cmd)->t_task_cdbs_sent);
32688 + atomic_inc_unchecked(&T_TASK(cmd)->t_task_cdbs_sent);
32689
32690 - if (atomic_read(&T_TASK(cmd)->t_task_cdbs_sent) ==
32691 + if (atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent) ==
32692 T_TASK(cmd)->t_task_cdbs)
32693 atomic_set(&cmd->transport_sent, 1);
32694
32695 @@ -5568,7 +5568,7 @@ static void transport_generic_wait_for_t
32696 atomic_set(&T_TASK(cmd)->transport_lun_stop, 0);
32697 }
32698 if (!atomic_read(&T_TASK(cmd)->t_transport_active) ||
32699 - atomic_read(&T_TASK(cmd)->t_transport_aborted))
32700 + atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted))
32701 goto remove;
32702
32703 atomic_set(&T_TASK(cmd)->t_transport_stop, 1);
32704 @@ -5797,7 +5797,7 @@ int transport_check_aborted_status(struc
32705 {
32706 int ret = 0;
32707
32708 - if (atomic_read(&T_TASK(cmd)->t_transport_aborted) != 0) {
32709 + if (atomic_read_unchecked(&T_TASK(cmd)->t_transport_aborted) != 0) {
32710 if (!(send_status) ||
32711 (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
32712 return 1;
32713 @@ -5825,7 +5825,7 @@ void transport_send_task_abort(struct se
32714 */
32715 if (cmd->data_direction == DMA_TO_DEVICE) {
32716 if (CMD_TFO(cmd)->write_pending_status(cmd) != 0) {
32717 - atomic_inc(&T_TASK(cmd)->t_transport_aborted);
32718 + atomic_inc_unchecked(&T_TASK(cmd)->t_transport_aborted);
32719 smp_mb__after_atomic_inc();
32720 cmd->scsi_status = SAM_STAT_TASK_ABORTED;
32721 transport_new_cmd_failure(cmd);
32722 @@ -5949,7 +5949,7 @@ static void transport_processing_shutdow
32723 CMD_TFO(cmd)->get_task_tag(cmd),
32724 T_TASK(cmd)->t_task_cdbs,
32725 atomic_read(&T_TASK(cmd)->t_task_cdbs_left),
32726 - atomic_read(&T_TASK(cmd)->t_task_cdbs_sent),
32727 + atomic_read_unchecked(&T_TASK(cmd)->t_task_cdbs_sent),
32728 atomic_read(&T_TASK(cmd)->t_transport_active),
32729 atomic_read(&T_TASK(cmd)->t_transport_stop),
32730 atomic_read(&T_TASK(cmd)->t_transport_sent));
32731 diff -urNp linux-2.6.39.4/drivers/telephony/ixj.c linux-2.6.39.4/drivers/telephony/ixj.c
32732 --- linux-2.6.39.4/drivers/telephony/ixj.c 2011-05-19 00:06:34.000000000 -0400
32733 +++ linux-2.6.39.4/drivers/telephony/ixj.c 2011-08-05 19:44:37.000000000 -0400
32734 @@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
32735 bool mContinue;
32736 char *pIn, *pOut;
32737
32738 + pax_track_stack();
32739 +
32740 if (!SCI_Prepare(j))
32741 return 0;
32742
32743 diff -urNp linux-2.6.39.4/drivers/tty/hvc/hvcs.c linux-2.6.39.4/drivers/tty/hvc/hvcs.c
32744 --- linux-2.6.39.4/drivers/tty/hvc/hvcs.c 2011-05-19 00:06:34.000000000 -0400
32745 +++ linux-2.6.39.4/drivers/tty/hvc/hvcs.c 2011-08-05 19:44:37.000000000 -0400
32746 @@ -83,6 +83,7 @@
32747 #include <asm/hvcserver.h>
32748 #include <asm/uaccess.h>
32749 #include <asm/vio.h>
32750 +#include <asm/local.h>
32751
32752 /*
32753 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
32754 @@ -270,7 +271,7 @@ struct hvcs_struct {
32755 unsigned int index;
32756
32757 struct tty_struct *tty;
32758 - int open_count;
32759 + local_t open_count;
32760
32761 /*
32762 * Used to tell the driver kernel_thread what operations need to take
32763 @@ -422,7 +423,7 @@ static ssize_t hvcs_vterm_state_store(st
32764
32765 spin_lock_irqsave(&hvcsd->lock, flags);
32766
32767 - if (hvcsd->open_count > 0) {
32768 + if (local_read(&hvcsd->open_count) > 0) {
32769 spin_unlock_irqrestore(&hvcsd->lock, flags);
32770 printk(KERN_INFO "HVCS: vterm state unchanged. "
32771 "The hvcs device node is still in use.\n");
32772 @@ -1145,7 +1146,7 @@ static int hvcs_open(struct tty_struct *
32773 if ((retval = hvcs_partner_connect(hvcsd)))
32774 goto error_release;
32775
32776 - hvcsd->open_count = 1;
32777 + local_set(&hvcsd->open_count, 1);
32778 hvcsd->tty = tty;
32779 tty->driver_data = hvcsd;
32780
32781 @@ -1179,7 +1180,7 @@ fast_open:
32782
32783 spin_lock_irqsave(&hvcsd->lock, flags);
32784 kref_get(&hvcsd->kref);
32785 - hvcsd->open_count++;
32786 + local_inc(&hvcsd->open_count);
32787 hvcsd->todo_mask |= HVCS_SCHED_READ;
32788 spin_unlock_irqrestore(&hvcsd->lock, flags);
32789
32790 @@ -1223,7 +1224,7 @@ static void hvcs_close(struct tty_struct
32791 hvcsd = tty->driver_data;
32792
32793 spin_lock_irqsave(&hvcsd->lock, flags);
32794 - if (--hvcsd->open_count == 0) {
32795 + if (local_dec_and_test(&hvcsd->open_count)) {
32796
32797 vio_disable_interrupts(hvcsd->vdev);
32798
32799 @@ -1249,10 +1250,10 @@ static void hvcs_close(struct tty_struct
32800 free_irq(irq, hvcsd);
32801 kref_put(&hvcsd->kref, destroy_hvcs_struct);
32802 return;
32803 - } else if (hvcsd->open_count < 0) {
32804 + } else if (local_read(&hvcsd->open_count) < 0) {
32805 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
32806 " is missmanaged.\n",
32807 - hvcsd->vdev->unit_address, hvcsd->open_count);
32808 + hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
32809 }
32810
32811 spin_unlock_irqrestore(&hvcsd->lock, flags);
32812 @@ -1268,7 +1269,7 @@ static void hvcs_hangup(struct tty_struc
32813
32814 spin_lock_irqsave(&hvcsd->lock, flags);
32815 /* Preserve this so that we know how many kref refs to put */
32816 - temp_open_count = hvcsd->open_count;
32817 + temp_open_count = local_read(&hvcsd->open_count);
32818
32819 /*
32820 * Don't kref put inside the spinlock because the destruction
32821 @@ -1283,7 +1284,7 @@ static void hvcs_hangup(struct tty_struc
32822 hvcsd->tty->driver_data = NULL;
32823 hvcsd->tty = NULL;
32824
32825 - hvcsd->open_count = 0;
32826 + local_set(&hvcsd->open_count, 0);
32827
32828 /* This will drop any buffered data on the floor which is OK in a hangup
32829 * scenario. */
32830 @@ -1354,7 +1355,7 @@ static int hvcs_write(struct tty_struct
32831 * the middle of a write operation? This is a crummy place to do this
32832 * but we want to keep it all in the spinlock.
32833 */
32834 - if (hvcsd->open_count <= 0) {
32835 + if (local_read(&hvcsd->open_count) <= 0) {
32836 spin_unlock_irqrestore(&hvcsd->lock, flags);
32837 return -ENODEV;
32838 }
32839 @@ -1428,7 +1429,7 @@ static int hvcs_write_room(struct tty_st
32840 {
32841 struct hvcs_struct *hvcsd = tty->driver_data;
32842
32843 - if (!hvcsd || hvcsd->open_count <= 0)
32844 + if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
32845 return 0;
32846
32847 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
32848 diff -urNp linux-2.6.39.4/drivers/tty/ipwireless/tty.c linux-2.6.39.4/drivers/tty/ipwireless/tty.c
32849 --- linux-2.6.39.4/drivers/tty/ipwireless/tty.c 2011-05-19 00:06:34.000000000 -0400
32850 +++ linux-2.6.39.4/drivers/tty/ipwireless/tty.c 2011-08-05 19:44:37.000000000 -0400
32851 @@ -29,6 +29,7 @@
32852 #include <linux/tty_driver.h>
32853 #include <linux/tty_flip.h>
32854 #include <linux/uaccess.h>
32855 +#include <asm/local.h>
32856
32857 #include "tty.h"
32858 #include "network.h"
32859 @@ -51,7 +52,7 @@ struct ipw_tty {
32860 int tty_type;
32861 struct ipw_network *network;
32862 struct tty_struct *linux_tty;
32863 - int open_count;
32864 + local_t open_count;
32865 unsigned int control_lines;
32866 struct mutex ipw_tty_mutex;
32867 int tx_bytes_queued;
32868 @@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
32869 mutex_unlock(&tty->ipw_tty_mutex);
32870 return -ENODEV;
32871 }
32872 - if (tty->open_count == 0)
32873 + if (local_read(&tty->open_count) == 0)
32874 tty->tx_bytes_queued = 0;
32875
32876 - tty->open_count++;
32877 + local_inc(&tty->open_count);
32878
32879 tty->linux_tty = linux_tty;
32880 linux_tty->driver_data = tty;
32881 @@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
32882
32883 static void do_ipw_close(struct ipw_tty *tty)
32884 {
32885 - tty->open_count--;
32886 -
32887 - if (tty->open_count == 0) {
32888 + if (local_dec_return(&tty->open_count) == 0) {
32889 struct tty_struct *linux_tty = tty->linux_tty;
32890
32891 if (linux_tty != NULL) {
32892 @@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
32893 return;
32894
32895 mutex_lock(&tty->ipw_tty_mutex);
32896 - if (tty->open_count == 0) {
32897 + if (local_read(&tty->open_count) == 0) {
32898 mutex_unlock(&tty->ipw_tty_mutex);
32899 return;
32900 }
32901 @@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
32902 return;
32903 }
32904
32905 - if (!tty->open_count) {
32906 + if (!local_read(&tty->open_count)) {
32907 mutex_unlock(&tty->ipw_tty_mutex);
32908 return;
32909 }
32910 @@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
32911 return -ENODEV;
32912
32913 mutex_lock(&tty->ipw_tty_mutex);
32914 - if (!tty->open_count) {
32915 + if (!local_read(&tty->open_count)) {
32916 mutex_unlock(&tty->ipw_tty_mutex);
32917 return -EINVAL;
32918 }
32919 @@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
32920 if (!tty)
32921 return -ENODEV;
32922
32923 - if (!tty->open_count)
32924 + if (!local_read(&tty->open_count))
32925 return -EINVAL;
32926
32927 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
32928 @@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
32929 if (!tty)
32930 return 0;
32931
32932 - if (!tty->open_count)
32933 + if (!local_read(&tty->open_count))
32934 return 0;
32935
32936 return tty->tx_bytes_queued;
32937 @@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
32938 if (!tty)
32939 return -ENODEV;
32940
32941 - if (!tty->open_count)
32942 + if (!local_read(&tty->open_count))
32943 return -EINVAL;
32944
32945 return get_control_lines(tty);
32946 @@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
32947 if (!tty)
32948 return -ENODEV;
32949
32950 - if (!tty->open_count)
32951 + if (!local_read(&tty->open_count))
32952 return -EINVAL;
32953
32954 return set_control_lines(tty, set, clear);
32955 @@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
32956 if (!tty)
32957 return -ENODEV;
32958
32959 - if (!tty->open_count)
32960 + if (!local_read(&tty->open_count))
32961 return -EINVAL;
32962
32963 /* FIXME: Exactly how is the tty object locked here .. */
32964 @@ -582,7 +581,7 @@ void ipwireless_tty_free(struct ipw_tty
32965 against a parallel ioctl etc */
32966 mutex_lock(&ttyj->ipw_tty_mutex);
32967 }
32968 - while (ttyj->open_count)
32969 + while (local_read(&ttyj->open_count))
32970 do_ipw_close(ttyj);
32971 ipwireless_disassociate_network_ttys(network,
32972 ttyj->channel_idx);
32973 diff -urNp linux-2.6.39.4/drivers/tty/n_gsm.c linux-2.6.39.4/drivers/tty/n_gsm.c
32974 --- linux-2.6.39.4/drivers/tty/n_gsm.c 2011-05-19 00:06:34.000000000 -0400
32975 +++ linux-2.6.39.4/drivers/tty/n_gsm.c 2011-08-05 19:44:37.000000000 -0400
32976 @@ -1588,7 +1588,7 @@ static struct gsm_dlci *gsm_dlci_alloc(s
32977 return NULL;
32978 spin_lock_init(&dlci->lock);
32979 dlci->fifo = &dlci->_fifo;
32980 - if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
32981 + if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
32982 kfree(dlci);
32983 return NULL;
32984 }
32985 diff -urNp linux-2.6.39.4/drivers/tty/n_tty.c linux-2.6.39.4/drivers/tty/n_tty.c
32986 --- linux-2.6.39.4/drivers/tty/n_tty.c 2011-05-19 00:06:34.000000000 -0400
32987 +++ linux-2.6.39.4/drivers/tty/n_tty.c 2011-08-05 19:44:37.000000000 -0400
32988 @@ -2122,6 +2122,7 @@ void n_tty_inherit_ops(struct tty_ldisc_
32989 {
32990 *ops = tty_ldisc_N_TTY;
32991 ops->owner = NULL;
32992 - ops->refcount = ops->flags = 0;
32993 + atomic_set(&ops->refcount, 0);
32994 + ops->flags = 0;
32995 }
32996 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
32997 diff -urNp linux-2.6.39.4/drivers/tty/pty.c linux-2.6.39.4/drivers/tty/pty.c
32998 --- linux-2.6.39.4/drivers/tty/pty.c 2011-05-19 00:06:34.000000000 -0400
32999 +++ linux-2.6.39.4/drivers/tty/pty.c 2011-08-05 20:34:06.000000000 -0400
33000 @@ -753,8 +753,10 @@ static void __init unix98_pty_init(void)
33001 register_sysctl_table(pty_root_table);
33002
33003 /* Now create the /dev/ptmx special device */
33004 + pax_open_kernel();
33005 tty_default_fops(&ptmx_fops);
33006 - ptmx_fops.open = ptmx_open;
33007 + *(void **)&ptmx_fops.open = ptmx_open;
33008 + pax_close_kernel();
33009
33010 cdev_init(&ptmx_cdev, &ptmx_fops);
33011 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
33012 diff -urNp linux-2.6.39.4/drivers/tty/rocket.c linux-2.6.39.4/drivers/tty/rocket.c
33013 --- linux-2.6.39.4/drivers/tty/rocket.c 2011-05-19 00:06:34.000000000 -0400
33014 +++ linux-2.6.39.4/drivers/tty/rocket.c 2011-08-05 19:44:37.000000000 -0400
33015 @@ -1277,6 +1277,8 @@ static int get_ports(struct r_port *info
33016 struct rocket_ports tmp;
33017 int board;
33018
33019 + pax_track_stack();
33020 +
33021 if (!retports)
33022 return -EFAULT;
33023 memset(&tmp, 0, sizeof (tmp));
33024 diff -urNp linux-2.6.39.4/drivers/tty/serial/kgdboc.c linux-2.6.39.4/drivers/tty/serial/kgdboc.c
33025 --- linux-2.6.39.4/drivers/tty/serial/kgdboc.c 2011-05-19 00:06:34.000000000 -0400
33026 +++ linux-2.6.39.4/drivers/tty/serial/kgdboc.c 2011-08-05 20:34:06.000000000 -0400
33027 @@ -23,8 +23,9 @@
33028 #define MAX_CONFIG_LEN 40
33029
33030 static struct kgdb_io kgdboc_io_ops;
33031 +static struct kgdb_io kgdboc_io_ops_console;
33032
33033 -/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
33034 +/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
33035 static int configured = -1;
33036
33037 static char config[MAX_CONFIG_LEN];
33038 @@ -147,6 +148,8 @@ static void cleanup_kgdboc(void)
33039 kgdboc_unregister_kbd();
33040 if (configured == 1)
33041 kgdb_unregister_io_module(&kgdboc_io_ops);
33042 + else if (configured == 2)
33043 + kgdb_unregister_io_module(&kgdboc_io_ops_console);
33044 }
33045
33046 static int configure_kgdboc(void)
33047 @@ -156,13 +159,13 @@ static int configure_kgdboc(void)
33048 int err;
33049 char *cptr = config;
33050 struct console *cons;
33051 + int is_console = 0;
33052
33053 err = kgdboc_option_setup(config);
33054 if (err || !strlen(config) || isspace(config[0]))
33055 goto noconfig;
33056
33057 err = -ENODEV;
33058 - kgdboc_io_ops.is_console = 0;
33059 kgdb_tty_driver = NULL;
33060
33061 kgdboc_use_kms = 0;
33062 @@ -183,7 +186,7 @@ static int configure_kgdboc(void)
33063 int idx;
33064 if (cons->device && cons->device(cons, &idx) == p &&
33065 idx == tty_line) {
33066 - kgdboc_io_ops.is_console = 1;
33067 + is_console = 1;
33068 break;
33069 }
33070 cons = cons->next;
33071 @@ -193,12 +196,16 @@ static int configure_kgdboc(void)
33072 kgdb_tty_line = tty_line;
33073
33074 do_register:
33075 - err = kgdb_register_io_module(&kgdboc_io_ops);
33076 + if (is_console) {
33077 + err = kgdb_register_io_module(&kgdboc_io_ops_console);
33078 + configured = 2;
33079 + } else {
33080 + err = kgdb_register_io_module(&kgdboc_io_ops);
33081 + configured = 1;
33082 + }
33083 if (err)
33084 goto noconfig;
33085
33086 - configured = 1;
33087 -
33088 return 0;
33089
33090 noconfig:
33091 @@ -212,7 +219,7 @@ noconfig:
33092 static int __init init_kgdboc(void)
33093 {
33094 /* Already configured? */
33095 - if (configured == 1)
33096 + if (configured >= 1)
33097 return 0;
33098
33099 return configure_kgdboc();
33100 @@ -261,7 +268,7 @@ static int param_set_kgdboc_var(const ch
33101 if (config[len - 1] == '\n')
33102 config[len - 1] = '\0';
33103
33104 - if (configured == 1)
33105 + if (configured >= 1)
33106 cleanup_kgdboc();
33107
33108 /* Go and configure with the new params. */
33109 @@ -301,6 +308,15 @@ static struct kgdb_io kgdboc_io_ops = {
33110 .post_exception = kgdboc_post_exp_handler,
33111 };
33112
33113 +static struct kgdb_io kgdboc_io_ops_console = {
33114 + .name = "kgdboc",
33115 + .read_char = kgdboc_get_char,
33116 + .write_char = kgdboc_put_char,
33117 + .pre_exception = kgdboc_pre_exp_handler,
33118 + .post_exception = kgdboc_post_exp_handler,
33119 + .is_console = 1
33120 +};
33121 +
33122 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
33123 /* This is only available if kgdboc is a built in for early debugging */
33124 static int __init kgdboc_early_init(char *opt)
33125 diff -urNp linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c
33126 --- linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c 2011-05-19 00:06:34.000000000 -0400
33127 +++ linux-2.6.39.4/drivers/tty/serial/mrst_max3110.c 2011-08-05 20:34:06.000000000 -0400
33128 @@ -393,6 +393,8 @@ static void max3110_con_receive(struct u
33129 int loop = 1, num, total = 0;
33130 u8 recv_buf[512], *pbuf;
33131
33132 + pax_track_stack();
33133 +
33134 pbuf = recv_buf;
33135 do {
33136 num = max3110_read_multi(max, pbuf);
33137 diff -urNp linux-2.6.39.4/drivers/tty/tty_io.c linux-2.6.39.4/drivers/tty/tty_io.c
33138 --- linux-2.6.39.4/drivers/tty/tty_io.c 2011-05-19 00:06:34.000000000 -0400
33139 +++ linux-2.6.39.4/drivers/tty/tty_io.c 2011-08-05 20:34:06.000000000 -0400
33140 @@ -3200,7 +3200,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
33141
33142 void tty_default_fops(struct file_operations *fops)
33143 {
33144 - *fops = tty_fops;
33145 + memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
33146 }
33147
33148 /*
33149 diff -urNp linux-2.6.39.4/drivers/tty/tty_ldisc.c linux-2.6.39.4/drivers/tty/tty_ldisc.c
33150 --- linux-2.6.39.4/drivers/tty/tty_ldisc.c 2011-07-09 09:18:51.000000000 -0400
33151 +++ linux-2.6.39.4/drivers/tty/tty_ldisc.c 2011-08-05 19:44:37.000000000 -0400
33152 @@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
33153 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
33154 struct tty_ldisc_ops *ldo = ld->ops;
33155
33156 - ldo->refcount--;
33157 + atomic_dec(&ldo->refcount);
33158 module_put(ldo->owner);
33159 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33160
33161 @@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
33162 spin_lock_irqsave(&tty_ldisc_lock, flags);
33163 tty_ldiscs[disc] = new_ldisc;
33164 new_ldisc->num = disc;
33165 - new_ldisc->refcount = 0;
33166 + atomic_set(&new_ldisc->refcount, 0);
33167 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33168
33169 return ret;
33170 @@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
33171 return -EINVAL;
33172
33173 spin_lock_irqsave(&tty_ldisc_lock, flags);
33174 - if (tty_ldiscs[disc]->refcount)
33175 + if (atomic_read(&tty_ldiscs[disc]->refcount))
33176 ret = -EBUSY;
33177 else
33178 tty_ldiscs[disc] = NULL;
33179 @@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
33180 if (ldops) {
33181 ret = ERR_PTR(-EAGAIN);
33182 if (try_module_get(ldops->owner)) {
33183 - ldops->refcount++;
33184 + atomic_inc(&ldops->refcount);
33185 ret = ldops;
33186 }
33187 }
33188 @@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
33189 unsigned long flags;
33190
33191 spin_lock_irqsave(&tty_ldisc_lock, flags);
33192 - ldops->refcount--;
33193 + atomic_dec(&ldops->refcount);
33194 module_put(ldops->owner);
33195 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
33196 }
33197 diff -urNp linux-2.6.39.4/drivers/tty/vt/keyboard.c linux-2.6.39.4/drivers/tty/vt/keyboard.c
33198 --- linux-2.6.39.4/drivers/tty/vt/keyboard.c 2011-05-19 00:06:34.000000000 -0400
33199 +++ linux-2.6.39.4/drivers/tty/vt/keyboard.c 2011-08-05 19:44:37.000000000 -0400
33200 @@ -658,6 +658,16 @@ static void k_spec(struct vc_data *vc, u
33201 kbd->kbdmode == VC_OFF) &&
33202 value != KVAL(K_SAK))
33203 return; /* SAK is allowed even in raw mode */
33204 +
33205 +#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
33206 + {
33207 + void *func = fn_handler[value];
33208 + if (func == fn_show_state || func == fn_show_ptregs ||
33209 + func == fn_show_mem)
33210 + return;
33211 + }
33212 +#endif
33213 +
33214 fn_handler[value](vc);
33215 }
33216
33217 diff -urNp linux-2.6.39.4/drivers/tty/vt/vt.c linux-2.6.39.4/drivers/tty/vt/vt.c
33218 --- linux-2.6.39.4/drivers/tty/vt/vt.c 2011-05-19 00:06:34.000000000 -0400
33219 +++ linux-2.6.39.4/drivers/tty/vt/vt.c 2011-08-05 19:44:37.000000000 -0400
33220 @@ -261,7 +261,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
33221
33222 static void notify_write(struct vc_data *vc, unsigned int unicode)
33223 {
33224 - struct vt_notifier_param param = { .vc = vc, unicode = unicode };
33225 + struct vt_notifier_param param = { .vc = vc, .c = unicode };
33226 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
33227 }
33228
33229 diff -urNp linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c
33230 --- linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c 2011-05-19 00:06:34.000000000 -0400
33231 +++ linux-2.6.39.4/drivers/tty/vt/vt_ioctl.c 2011-08-05 19:44:37.000000000 -0400
33232 @@ -209,9 +209,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33233 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
33234 return -EFAULT;
33235
33236 - if (!capable(CAP_SYS_TTY_CONFIG))
33237 - perm = 0;
33238 -
33239 switch (cmd) {
33240 case KDGKBENT:
33241 key_map = key_maps[s];
33242 @@ -223,6 +220,9 @@ do_kdsk_ioctl(int cmd, struct kbentry __
33243 val = (i ? K_HOLE : K_NOSUCHMAP);
33244 return put_user(val, &user_kbe->kb_value);
33245 case KDSKBENT:
33246 + if (!capable(CAP_SYS_TTY_CONFIG))
33247 + perm = 0;
33248 +
33249 if (!perm)
33250 return -EPERM;
33251 if (!i && v == K_NOSUCHMAP) {
33252 @@ -324,9 +324,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33253 int i, j, k;
33254 int ret;
33255
33256 - if (!capable(CAP_SYS_TTY_CONFIG))
33257 - perm = 0;
33258 -
33259 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
33260 if (!kbs) {
33261 ret = -ENOMEM;
33262 @@ -360,6 +357,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
33263 kfree(kbs);
33264 return ((p && *p) ? -EOVERFLOW : 0);
33265 case KDSKBSENT:
33266 + if (!capable(CAP_SYS_TTY_CONFIG))
33267 + perm = 0;
33268 +
33269 if (!perm) {
33270 ret = -EPERM;
33271 goto reterr;
33272 diff -urNp linux-2.6.39.4/drivers/uio/uio.c linux-2.6.39.4/drivers/uio/uio.c
33273 --- linux-2.6.39.4/drivers/uio/uio.c 2011-05-19 00:06:34.000000000 -0400
33274 +++ linux-2.6.39.4/drivers/uio/uio.c 2011-08-05 19:44:37.000000000 -0400
33275 @@ -25,6 +25,7 @@
33276 #include <linux/kobject.h>
33277 #include <linux/cdev.h>
33278 #include <linux/uio_driver.h>
33279 +#include <asm/local.h>
33280
33281 #define UIO_MAX_DEVICES (1U << MINORBITS)
33282
33283 @@ -32,10 +33,10 @@ struct uio_device {
33284 struct module *owner;
33285 struct device *dev;
33286 int minor;
33287 - atomic_t event;
33288 + atomic_unchecked_t event;
33289 struct fasync_struct *async_queue;
33290 wait_queue_head_t wait;
33291 - int vma_count;
33292 + local_t vma_count;
33293 struct uio_info *info;
33294 struct kobject *map_dir;
33295 struct kobject *portio_dir;
33296 @@ -242,7 +243,7 @@ static ssize_t show_event(struct device
33297 struct device_attribute *attr, char *buf)
33298 {
33299 struct uio_device *idev = dev_get_drvdata(dev);
33300 - return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
33301 + return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
33302 }
33303
33304 static struct device_attribute uio_class_attributes[] = {
33305 @@ -402,7 +403,7 @@ void uio_event_notify(struct uio_info *i
33306 {
33307 struct uio_device *idev = info->uio_dev;
33308
33309 - atomic_inc(&idev->event);
33310 + atomic_inc_unchecked(&idev->event);
33311 wake_up_interruptible(&idev->wait);
33312 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
33313 }
33314 @@ -455,7 +456,7 @@ static int uio_open(struct inode *inode,
33315 }
33316
33317 listener->dev = idev;
33318 - listener->event_count = atomic_read(&idev->event);
33319 + listener->event_count = atomic_read_unchecked(&idev->event);
33320 filep->private_data = listener;
33321
33322 if (idev->info->open) {
33323 @@ -506,7 +507,7 @@ static unsigned int uio_poll(struct file
33324 return -EIO;
33325
33326 poll_wait(filep, &idev->wait, wait);
33327 - if (listener->event_count != atomic_read(&idev->event))
33328 + if (listener->event_count != atomic_read_unchecked(&idev->event))
33329 return POLLIN | POLLRDNORM;
33330 return 0;
33331 }
33332 @@ -531,7 +532,7 @@ static ssize_t uio_read(struct file *fil
33333 do {
33334 set_current_state(TASK_INTERRUPTIBLE);
33335
33336 - event_count = atomic_read(&idev->event);
33337 + event_count = atomic_read_unchecked(&idev->event);
33338 if (event_count != listener->event_count) {
33339 if (copy_to_user(buf, &event_count, count))
33340 retval = -EFAULT;
33341 @@ -602,13 +603,13 @@ static int uio_find_mem_index(struct vm_
33342 static void uio_vma_open(struct vm_area_struct *vma)
33343 {
33344 struct uio_device *idev = vma->vm_private_data;
33345 - idev->vma_count++;
33346 + local_inc(&idev->vma_count);
33347 }
33348
33349 static void uio_vma_close(struct vm_area_struct *vma)
33350 {
33351 struct uio_device *idev = vma->vm_private_data;
33352 - idev->vma_count--;
33353 + local_dec(&idev->vma_count);
33354 }
33355
33356 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33357 @@ -819,7 +820,7 @@ int __uio_register_device(struct module
33358 idev->owner = owner;
33359 idev->info = info;
33360 init_waitqueue_head(&idev->wait);
33361 - atomic_set(&idev->event, 0);
33362 + atomic_set_unchecked(&idev->event, 0);
33363
33364 ret = uio_get_minor(idev);
33365 if (ret)
33366 diff -urNp linux-2.6.39.4/drivers/usb/atm/cxacru.c linux-2.6.39.4/drivers/usb/atm/cxacru.c
33367 --- linux-2.6.39.4/drivers/usb/atm/cxacru.c 2011-05-19 00:06:34.000000000 -0400
33368 +++ linux-2.6.39.4/drivers/usb/atm/cxacru.c 2011-08-05 19:44:37.000000000 -0400
33369 @@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_c
33370 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
33371 if (ret < 2)
33372 return -EINVAL;
33373 - if (index < 0 || index > 0x7f)
33374 + if (index > 0x7f)
33375 return -EINVAL;
33376 pos += tmp;
33377
33378 diff -urNp linux-2.6.39.4/drivers/usb/atm/usbatm.c linux-2.6.39.4/drivers/usb/atm/usbatm.c
33379 --- linux-2.6.39.4/drivers/usb/atm/usbatm.c 2011-05-19 00:06:34.000000000 -0400
33380 +++ linux-2.6.39.4/drivers/usb/atm/usbatm.c 2011-08-05 19:44:37.000000000 -0400
33381 @@ -332,7 +332,7 @@ static void usbatm_extract_one_cell(stru
33382 if (printk_ratelimit())
33383 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
33384 __func__, vpi, vci);
33385 - atomic_inc(&vcc->stats->rx_err);
33386 + atomic_inc_unchecked(&vcc->stats->rx_err);
33387 return;
33388 }
33389
33390 @@ -360,7 +360,7 @@ static void usbatm_extract_one_cell(stru
33391 if (length > ATM_MAX_AAL5_PDU) {
33392 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
33393 __func__, length, vcc);
33394 - atomic_inc(&vcc->stats->rx_err);
33395 + atomic_inc_unchecked(&vcc->stats->rx_err);
33396 goto out;
33397 }
33398
33399 @@ -369,14 +369,14 @@ static void usbatm_extract_one_cell(stru
33400 if (sarb->len < pdu_length) {
33401 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
33402 __func__, pdu_length, sarb->len, vcc);
33403 - atomic_inc(&vcc->stats->rx_err);
33404 + atomic_inc_unchecked(&vcc->stats->rx_err);
33405 goto out;
33406 }
33407
33408 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
33409 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
33410 __func__, vcc);
33411 - atomic_inc(&vcc->stats->rx_err);
33412 + atomic_inc_unchecked(&vcc->stats->rx_err);
33413 goto out;
33414 }
33415
33416 @@ -386,7 +386,7 @@ static void usbatm_extract_one_cell(stru
33417 if (printk_ratelimit())
33418 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
33419 __func__, length);
33420 - atomic_inc(&vcc->stats->rx_drop);
33421 + atomic_inc_unchecked(&vcc->stats->rx_drop);
33422 goto out;
33423 }
33424
33425 @@ -411,7 +411,7 @@ static void usbatm_extract_one_cell(stru
33426
33427 vcc->push(vcc, skb);
33428
33429 - atomic_inc(&vcc->stats->rx);
33430 + atomic_inc_unchecked(&vcc->stats->rx);
33431 out:
33432 skb_trim(sarb, 0);
33433 }
33434 @@ -614,7 +614,7 @@ static void usbatm_tx_process(unsigned l
33435 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
33436
33437 usbatm_pop(vcc, skb);
33438 - atomic_inc(&vcc->stats->tx);
33439 + atomic_inc_unchecked(&vcc->stats->tx);
33440
33441 skb = skb_dequeue(&instance->sndqueue);
33442 }
33443 @@ -773,11 +773,11 @@ static int usbatm_atm_proc_read(struct a
33444 if (!left--)
33445 return sprintf(page,
33446 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
33447 - atomic_read(&atm_dev->stats.aal5.tx),
33448 - atomic_read(&atm_dev->stats.aal5.tx_err),
33449 - atomic_read(&atm_dev->stats.aal5.rx),
33450 - atomic_read(&atm_dev->stats.aal5.rx_err),
33451 - atomic_read(&atm_dev->stats.aal5.rx_drop));
33452 + atomic_read_unchecked(&atm_dev->stats.aal5.tx),
33453 + atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
33454 + atomic_read_unchecked(&atm_dev->stats.aal5.rx),
33455 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
33456 + atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
33457
33458 if (!left--) {
33459 if (instance->disconnected)
33460 diff -urNp linux-2.6.39.4/drivers/usb/core/devices.c linux-2.6.39.4/drivers/usb/core/devices.c
33461 --- linux-2.6.39.4/drivers/usb/core/devices.c 2011-05-19 00:06:34.000000000 -0400
33462 +++ linux-2.6.39.4/drivers/usb/core/devices.c 2011-08-05 19:44:37.000000000 -0400
33463 @@ -126,7 +126,7 @@ static const char *format_endpt =
33464 * time it gets called.
33465 */
33466 static struct device_connect_event {
33467 - atomic_t count;
33468 + atomic_unchecked_t count;
33469 wait_queue_head_t wait;
33470 } device_event = {
33471 .count = ATOMIC_INIT(1),
33472 @@ -164,7 +164,7 @@ static const struct class_info clas_info
33473
33474 void usbfs_conn_disc_event(void)
33475 {
33476 - atomic_add(2, &device_event.count);
33477 + atomic_add_unchecked(2, &device_event.count);
33478 wake_up(&device_event.wait);
33479 }
33480
33481 @@ -648,7 +648,7 @@ static unsigned int usb_device_poll(stru
33482
33483 poll_wait(file, &device_event.wait, wait);
33484
33485 - event_count = atomic_read(&device_event.count);
33486 + event_count = atomic_read_unchecked(&device_event.count);
33487 if (file->f_version != event_count) {
33488 file->f_version = event_count;
33489 return POLLIN | POLLRDNORM;
33490 diff -urNp linux-2.6.39.4/drivers/usb/core/message.c linux-2.6.39.4/drivers/usb/core/message.c
33491 --- linux-2.6.39.4/drivers/usb/core/message.c 2011-07-09 09:18:51.000000000 -0400
33492 +++ linux-2.6.39.4/drivers/usb/core/message.c 2011-08-05 19:44:37.000000000 -0400
33493 @@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device
33494 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
33495 if (buf) {
33496 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
33497 - if (len > 0) {
33498 - smallbuf = kmalloc(++len, GFP_NOIO);
33499 + if (len++ > 0) {
33500 + smallbuf = kmalloc(len, GFP_NOIO);
33501 if (!smallbuf)
33502 return buf;
33503 memcpy(smallbuf, buf, len);
33504 diff -urNp linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c
33505 --- linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c 2011-05-19 00:06:34.000000000 -0400
33506 +++ linux-2.6.39.4/drivers/usb/early/ehci-dbgp.c 2011-08-05 20:34:06.000000000 -0400
33507 @@ -97,7 +97,8 @@ static inline u32 dbgp_len_update(u32 x,
33508
33509 #ifdef CONFIG_KGDB
33510 static struct kgdb_io kgdbdbgp_io_ops;
33511 -#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
33512 +static struct kgdb_io kgdbdbgp_io_ops_console;
33513 +#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
33514 #else
33515 #define dbgp_kgdb_mode (0)
33516 #endif
33517 @@ -1032,6 +1033,13 @@ static struct kgdb_io kgdbdbgp_io_ops =
33518 .write_char = kgdbdbgp_write_char,
33519 };
33520
33521 +static struct kgdb_io kgdbdbgp_io_ops_console = {
33522 + .name = "kgdbdbgp",
33523 + .read_char = kgdbdbgp_read_char,
33524 + .write_char = kgdbdbgp_write_char,
33525 + .is_console = 1
33526 +};
33527 +
33528 static int kgdbdbgp_wait_time;
33529
33530 static int __init kgdbdbgp_parse_config(char *str)
33531 @@ -1047,8 +1055,10 @@ static int __init kgdbdbgp_parse_config(
33532 ptr++;
33533 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
33534 }
33535 - kgdb_register_io_module(&kgdbdbgp_io_ops);
33536 - kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
33537 + if (early_dbgp_console.index != -1)
33538 + kgdb_register_io_module(&kgdbdbgp_io_ops_console);
33539 + else
33540 + kgdb_register_io_module(&kgdbdbgp_io_ops);
33541
33542 return 0;
33543 }
33544 diff -urNp linux-2.6.39.4/drivers/usb/host/xhci-mem.c linux-2.6.39.4/drivers/usb/host/xhci-mem.c
33545 --- linux-2.6.39.4/drivers/usb/host/xhci-mem.c 2011-06-25 12:55:23.000000000 -0400
33546 +++ linux-2.6.39.4/drivers/usb/host/xhci-mem.c 2011-08-05 19:44:37.000000000 -0400
33547 @@ -1680,6 +1680,8 @@ static int xhci_check_trb_in_td_math(str
33548 unsigned int num_tests;
33549 int i, ret;
33550
33551 + pax_track_stack();
33552 +
33553 num_tests = ARRAY_SIZE(simple_test_vector);
33554 for (i = 0; i < num_tests; i++) {
33555 ret = xhci_test_trb_in_td(xhci,
33556 diff -urNp linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h
33557 --- linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h 2011-05-19 00:06:34.000000000 -0400
33558 +++ linux-2.6.39.4/drivers/usb/wusbcore/wa-hc.h 2011-08-05 19:44:37.000000000 -0400
33559 @@ -192,7 +192,7 @@ struct wahc {
33560 struct list_head xfer_delayed_list;
33561 spinlock_t xfer_list_lock;
33562 struct work_struct xfer_work;
33563 - atomic_t xfer_id_count;
33564 + atomic_unchecked_t xfer_id_count;
33565 };
33566
33567
33568 @@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
33569 INIT_LIST_HEAD(&wa->xfer_delayed_list);
33570 spin_lock_init(&wa->xfer_list_lock);
33571 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
33572 - atomic_set(&wa->xfer_id_count, 1);
33573 + atomic_set_unchecked(&wa->xfer_id_count, 1);
33574 }
33575
33576 /**
33577 diff -urNp linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c
33578 --- linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c 2011-05-19 00:06:34.000000000 -0400
33579 +++ linux-2.6.39.4/drivers/usb/wusbcore/wa-xfer.c 2011-08-05 19:44:37.000000000 -0400
33580 @@ -294,7 +294,7 @@ out:
33581 */
33582 static void wa_xfer_id_init(struct wa_xfer *xfer)
33583 {
33584 - xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
33585 + xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
33586 }
33587
33588 /*
33589 diff -urNp linux-2.6.39.4/drivers/vhost/vhost.c linux-2.6.39.4/drivers/vhost/vhost.c
33590 --- linux-2.6.39.4/drivers/vhost/vhost.c 2011-05-19 00:06:34.000000000 -0400
33591 +++ linux-2.6.39.4/drivers/vhost/vhost.c 2011-08-05 19:44:37.000000000 -0400
33592 @@ -580,7 +580,7 @@ static int init_used(struct vhost_virtqu
33593 return get_user(vq->last_used_idx, &used->idx);
33594 }
33595
33596 -static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp)
33597 +static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
33598 {
33599 struct file *eventfp, *filep = NULL,
33600 *pollstart = NULL, *pollstop = NULL;
33601 diff -urNp linux-2.6.39.4/drivers/video/fbcmap.c linux-2.6.39.4/drivers/video/fbcmap.c
33602 --- linux-2.6.39.4/drivers/video/fbcmap.c 2011-05-19 00:06:34.000000000 -0400
33603 +++ linux-2.6.39.4/drivers/video/fbcmap.c 2011-08-05 19:44:37.000000000 -0400
33604 @@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user
33605 rc = -ENODEV;
33606 goto out;
33607 }
33608 - if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
33609 - !info->fbops->fb_setcmap)) {
33610 + if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
33611 rc = -EINVAL;
33612 goto out1;
33613 }
33614 diff -urNp linux-2.6.39.4/drivers/video/fbmem.c linux-2.6.39.4/drivers/video/fbmem.c
33615 --- linux-2.6.39.4/drivers/video/fbmem.c 2011-05-19 00:06:34.000000000 -0400
33616 +++ linux-2.6.39.4/drivers/video/fbmem.c 2011-08-05 19:44:37.000000000 -0400
33617 @@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_in
33618 image->dx += image->width + 8;
33619 }
33620 } else if (rotate == FB_ROTATE_UD) {
33621 - for (x = 0; x < num && image->dx >= 0; x++) {
33622 + for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
33623 info->fbops->fb_imageblit(info, image);
33624 image->dx -= image->width + 8;
33625 }
33626 @@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_in
33627 image->dy += image->height + 8;
33628 }
33629 } else if (rotate == FB_ROTATE_CCW) {
33630 - for (x = 0; x < num && image->dy >= 0; x++) {
33631 + for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
33632 info->fbops->fb_imageblit(info, image);
33633 image->dy -= image->height + 8;
33634 }
33635 @@ -939,6 +939,8 @@ fb_set_var(struct fb_info *info, struct
33636 int flags = info->flags;
33637 int ret = 0;
33638
33639 + pax_track_stack();
33640 +
33641 if (var->activate & FB_ACTIVATE_INV_MODE) {
33642 struct fb_videomode mode1, mode2;
33643
33644 @@ -1064,6 +1066,8 @@ static long do_fb_ioctl(struct fb_info *
33645 void __user *argp = (void __user *)arg;
33646 long ret = 0;
33647
33648 + pax_track_stack();
33649 +
33650 switch (cmd) {
33651 case FBIOGET_VSCREENINFO:
33652 if (!lock_fb_info(info))
33653 @@ -1143,7 +1147,7 @@ static long do_fb_ioctl(struct fb_info *
33654 return -EFAULT;
33655 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
33656 return -EINVAL;
33657 - if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
33658 + if (con2fb.framebuffer >= FB_MAX)
33659 return -EINVAL;
33660 if (!registered_fb[con2fb.framebuffer])
33661 request_module("fb%d", con2fb.framebuffer);
33662 diff -urNp linux-2.6.39.4/drivers/video/i810/i810_accel.c linux-2.6.39.4/drivers/video/i810/i810_accel.c
33663 --- linux-2.6.39.4/drivers/video/i810/i810_accel.c 2011-05-19 00:06:34.000000000 -0400
33664 +++ linux-2.6.39.4/drivers/video/i810/i810_accel.c 2011-08-05 19:44:37.000000000 -0400
33665 @@ -73,6 +73,7 @@ static inline int wait_for_space(struct
33666 }
33667 }
33668 printk("ringbuffer lockup!!!\n");
33669 + printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
33670 i810_report_error(mmio);
33671 par->dev_flags |= LOCKUP;
33672 info->pixmap.scan_align = 1;
33673 diff -urNp linux-2.6.39.4/drivers/video/udlfb.c linux-2.6.39.4/drivers/video/udlfb.c
33674 --- linux-2.6.39.4/drivers/video/udlfb.c 2011-05-19 00:06:34.000000000 -0400
33675 +++ linux-2.6.39.4/drivers/video/udlfb.c 2011-08-05 19:44:37.000000000 -0400
33676 @@ -584,11 +584,11 @@ int dlfb_handle_damage(struct dlfb_data
33677 dlfb_urb_completion(urb);
33678
33679 error:
33680 - atomic_add(bytes_sent, &dev->bytes_sent);
33681 - atomic_add(bytes_identical, &dev->bytes_identical);
33682 - atomic_add(width*height*2, &dev->bytes_rendered);
33683 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
33684 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
33685 + atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
33686 end_cycles = get_cycles();
33687 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
33688 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
33689 >> 10)), /* Kcycles */
33690 &dev->cpu_kcycles_used);
33691
33692 @@ -709,11 +709,11 @@ static void dlfb_dpy_deferred_io(struct
33693 dlfb_urb_completion(urb);
33694
33695 error:
33696 - atomic_add(bytes_sent, &dev->bytes_sent);
33697 - atomic_add(bytes_identical, &dev->bytes_identical);
33698 - atomic_add(bytes_rendered, &dev->bytes_rendered);
33699 + atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
33700 + atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
33701 + atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
33702 end_cycles = get_cycles();
33703 - atomic_add(((unsigned int) ((end_cycles - start_cycles)
33704 + atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
33705 >> 10)), /* Kcycles */
33706 &dev->cpu_kcycles_used);
33707 }
33708 @@ -1301,7 +1301,7 @@ static ssize_t metrics_bytes_rendered_sh
33709 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33710 struct dlfb_data *dev = fb_info->par;
33711 return snprintf(buf, PAGE_SIZE, "%u\n",
33712 - atomic_read(&dev->bytes_rendered));
33713 + atomic_read_unchecked(&dev->bytes_rendered));
33714 }
33715
33716 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
33717 @@ -1309,7 +1309,7 @@ static ssize_t metrics_bytes_identical_s
33718 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33719 struct dlfb_data *dev = fb_info->par;
33720 return snprintf(buf, PAGE_SIZE, "%u\n",
33721 - atomic_read(&dev->bytes_identical));
33722 + atomic_read_unchecked(&dev->bytes_identical));
33723 }
33724
33725 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
33726 @@ -1317,7 +1317,7 @@ static ssize_t metrics_bytes_sent_show(s
33727 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33728 struct dlfb_data *dev = fb_info->par;
33729 return snprintf(buf, PAGE_SIZE, "%u\n",
33730 - atomic_read(&dev->bytes_sent));
33731 + atomic_read_unchecked(&dev->bytes_sent));
33732 }
33733
33734 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
33735 @@ -1325,7 +1325,7 @@ static ssize_t metrics_cpu_kcycles_used_
33736 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33737 struct dlfb_data *dev = fb_info->par;
33738 return snprintf(buf, PAGE_SIZE, "%u\n",
33739 - atomic_read(&dev->cpu_kcycles_used));
33740 + atomic_read_unchecked(&dev->cpu_kcycles_used));
33741 }
33742
33743 static ssize_t edid_show(
33744 @@ -1382,10 +1382,10 @@ static ssize_t metrics_reset_store(struc
33745 struct fb_info *fb_info = dev_get_drvdata(fbdev);
33746 struct dlfb_data *dev = fb_info->par;
33747
33748 - atomic_set(&dev->bytes_rendered, 0);
33749 - atomic_set(&dev->bytes_identical, 0);
33750 - atomic_set(&dev->bytes_sent, 0);
33751 - atomic_set(&dev->cpu_kcycles_used, 0);
33752 + atomic_set_unchecked(&dev->bytes_rendered, 0);
33753 + atomic_set_unchecked(&dev->bytes_identical, 0);
33754 + atomic_set_unchecked(&dev->bytes_sent, 0);
33755 + atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
33756
33757 return count;
33758 }
33759 diff -urNp linux-2.6.39.4/drivers/video/uvesafb.c linux-2.6.39.4/drivers/video/uvesafb.c
33760 --- linux-2.6.39.4/drivers/video/uvesafb.c 2011-05-19 00:06:34.000000000 -0400
33761 +++ linux-2.6.39.4/drivers/video/uvesafb.c 2011-08-05 20:34:06.000000000 -0400
33762 @@ -19,6 +19,7 @@
33763 #include <linux/io.h>
33764 #include <linux/mutex.h>
33765 #include <linux/slab.h>
33766 +#include <linux/moduleloader.h>
33767 #include <video/edid.h>
33768 #include <video/uvesafb.h>
33769 #ifdef CONFIG_X86
33770 @@ -121,7 +122,7 @@ static int uvesafb_helper_start(void)
33771 NULL,
33772 };
33773
33774 - return call_usermodehelper(v86d_path, argv, envp, 1);
33775 + return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
33776 }
33777
33778 /*
33779 @@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi(
33780 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
33781 par->pmi_setpal = par->ypan = 0;
33782 } else {
33783 +
33784 +#ifdef CONFIG_PAX_KERNEXEC
33785 +#ifdef CONFIG_MODULES
33786 + par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
33787 +#endif
33788 + if (!par->pmi_code) {
33789 + par->pmi_setpal = par->ypan = 0;
33790 + return 0;
33791 + }
33792 +#endif
33793 +
33794 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
33795 + task->t.regs.edi);
33796 +
33797 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33798 + pax_open_kernel();
33799 + memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
33800 + pax_close_kernel();
33801 +
33802 + par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
33803 + par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
33804 +#else
33805 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
33806 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
33807 +#endif
33808 +
33809 printk(KERN_INFO "uvesafb: protected mode interface info at "
33810 "%04x:%04x\n",
33811 (u16)task->t.regs.es, (u16)task->t.regs.edi);
33812 @@ -1821,6 +1844,11 @@ out:
33813 if (par->vbe_modes)
33814 kfree(par->vbe_modes);
33815
33816 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33817 + if (par->pmi_code)
33818 + module_free_exec(NULL, par->pmi_code);
33819 +#endif
33820 +
33821 framebuffer_release(info);
33822 return err;
33823 }
33824 @@ -1847,6 +1875,12 @@ static int uvesafb_remove(struct platfor
33825 kfree(par->vbe_state_orig);
33826 if (par->vbe_state_saved)
33827 kfree(par->vbe_state_saved);
33828 +
33829 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33830 + if (par->pmi_code)
33831 + module_free_exec(NULL, par->pmi_code);
33832 +#endif
33833 +
33834 }
33835
33836 framebuffer_release(info);
33837 diff -urNp linux-2.6.39.4/drivers/video/vesafb.c linux-2.6.39.4/drivers/video/vesafb.c
33838 --- linux-2.6.39.4/drivers/video/vesafb.c 2011-05-19 00:06:34.000000000 -0400
33839 +++ linux-2.6.39.4/drivers/video/vesafb.c 2011-08-05 20:34:06.000000000 -0400
33840 @@ -9,6 +9,7 @@
33841 */
33842
33843 #include <linux/module.h>
33844 +#include <linux/moduleloader.h>
33845 #include <linux/kernel.h>
33846 #include <linux/errno.h>
33847 #include <linux/string.h>
33848 @@ -52,8 +53,8 @@ static int vram_remap __initdata; /*
33849 static int vram_total __initdata; /* Set total amount of memory */
33850 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
33851 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
33852 -static void (*pmi_start)(void) __read_mostly;
33853 -static void (*pmi_pal) (void) __read_mostly;
33854 +static void (*pmi_start)(void) __read_only;
33855 +static void (*pmi_pal) (void) __read_only;
33856 static int depth __read_mostly;
33857 static int vga_compat __read_mostly;
33858 /* --------------------------------------------------------------------- */
33859 @@ -232,6 +233,7 @@ static int __init vesafb_probe(struct pl
33860 unsigned int size_vmode;
33861 unsigned int size_remap;
33862 unsigned int size_total;
33863 + void *pmi_code = NULL;
33864
33865 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
33866 return -ENODEV;
33867 @@ -274,10 +276,6 @@ static int __init vesafb_probe(struct pl
33868 size_remap = size_total;
33869 vesafb_fix.smem_len = size_remap;
33870
33871 -#ifndef __i386__
33872 - screen_info.vesapm_seg = 0;
33873 -#endif
33874 -
33875 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
33876 printk(KERN_WARNING
33877 "vesafb: cannot reserve video memory at 0x%lx\n",
33878 @@ -306,9 +304,21 @@ static int __init vesafb_probe(struct pl
33879 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
33880 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
33881
33882 +#ifdef __i386__
33883 +
33884 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33885 + pmi_code = module_alloc_exec(screen_info.vesapm_size);
33886 + if (!pmi_code)
33887 +#elif !defined(CONFIG_PAX_KERNEXEC)
33888 + if (0)
33889 +#endif
33890 +
33891 +#endif
33892 + screen_info.vesapm_seg = 0;
33893 +
33894 if (screen_info.vesapm_seg) {
33895 - printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
33896 - screen_info.vesapm_seg,screen_info.vesapm_off);
33897 + printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
33898 + screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
33899 }
33900
33901 if (screen_info.vesapm_seg < 0xc000)
33902 @@ -316,9 +326,25 @@ static int __init vesafb_probe(struct pl
33903
33904 if (ypan || pmi_setpal) {
33905 unsigned short *pmi_base;
33906 +
33907 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
33908 - pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
33909 - pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
33910 +
33911 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33912 + pax_open_kernel();
33913 + memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
33914 +#else
33915 + pmi_code = pmi_base;
33916 +#endif
33917 +
33918 + pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
33919 + pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
33920 +
33921 +#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33922 + pmi_start = ktva_ktla(pmi_start);
33923 + pmi_pal = ktva_ktla(pmi_pal);
33924 + pax_close_kernel();
33925 +#endif
33926 +
33927 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
33928 if (pmi_base[3]) {
33929 printk(KERN_INFO "vesafb: pmi: ports = ");
33930 @@ -487,6 +513,11 @@ static int __init vesafb_probe(struct pl
33931 info->node, info->fix.id);
33932 return 0;
33933 err:
33934 +
33935 +#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
33936 + module_free_exec(NULL, pmi_code);
33937 +#endif
33938 +
33939 if (info->screen_base)
33940 iounmap(info->screen_base);
33941 framebuffer_release(info);
33942 diff -urNp linux-2.6.39.4/drivers/virtio/virtio_balloon.c linux-2.6.39.4/drivers/virtio/virtio_balloon.c
33943 --- linux-2.6.39.4/drivers/virtio/virtio_balloon.c 2011-05-19 00:06:34.000000000 -0400
33944 +++ linux-2.6.39.4/drivers/virtio/virtio_balloon.c 2011-08-05 19:44:37.000000000 -0400
33945 @@ -176,6 +176,8 @@ static void update_balloon_stats(struct
33946 struct sysinfo i;
33947 int idx = 0;
33948
33949 + pax_track_stack();
33950 +
33951 all_vm_events(events);
33952 si_meminfo(&i);
33953
33954 diff -urNp linux-2.6.39.4/fs/9p/vfs_inode.c linux-2.6.39.4/fs/9p/vfs_inode.c
33955 --- linux-2.6.39.4/fs/9p/vfs_inode.c 2011-05-19 00:06:34.000000000 -0400
33956 +++ linux-2.6.39.4/fs/9p/vfs_inode.c 2011-08-05 19:44:37.000000000 -0400
33957 @@ -1210,7 +1210,7 @@ static void *v9fs_vfs_follow_link(struct
33958 void
33959 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
33960 {
33961 - char *s = nd_get_link(nd);
33962 + const char *s = nd_get_link(nd);
33963
33964 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
33965 IS_ERR(s) ? "<error>" : s);
33966 diff -urNp linux-2.6.39.4/fs/aio.c linux-2.6.39.4/fs/aio.c
33967 --- linux-2.6.39.4/fs/aio.c 2011-05-19 00:06:34.000000000 -0400
33968 +++ linux-2.6.39.4/fs/aio.c 2011-08-05 19:44:37.000000000 -0400
33969 @@ -119,7 +119,7 @@ static int aio_setup_ring(struct kioctx
33970 size += sizeof(struct io_event) * nr_events;
33971 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
33972
33973 - if (nr_pages < 0)
33974 + if (nr_pages <= 0)
33975 return -EINVAL;
33976
33977 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
33978 @@ -1088,6 +1088,8 @@ static int read_events(struct kioctx *ct
33979 struct aio_timeout to;
33980 int retry = 0;
33981
33982 + pax_track_stack();
33983 +
33984 /* needed to zero any padding within an entry (there shouldn't be
33985 * any, but C is fun!
33986 */
33987 @@ -1381,22 +1383,27 @@ static ssize_t aio_fsync(struct kiocb *i
33988 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
33989 {
33990 ssize_t ret;
33991 + struct iovec iovstack;
33992
33993 #ifdef CONFIG_COMPAT
33994 if (compat)
33995 ret = compat_rw_copy_check_uvector(type,
33996 (struct compat_iovec __user *)kiocb->ki_buf,
33997 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
33998 + kiocb->ki_nbytes, 1, &iovstack,
33999 &kiocb->ki_iovec);
34000 else
34001 #endif
34002 ret = rw_copy_check_uvector(type,
34003 (struct iovec __user *)kiocb->ki_buf,
34004 - kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
34005 + kiocb->ki_nbytes, 1, &iovstack,
34006 &kiocb->ki_iovec);
34007 if (ret < 0)
34008 goto out;
34009
34010 + if (kiocb->ki_iovec == &iovstack) {
34011 + kiocb->ki_inline_vec = iovstack;
34012 + kiocb->ki_iovec = &kiocb->ki_inline_vec;
34013 + }
34014 kiocb->ki_nr_segs = kiocb->ki_nbytes;
34015 kiocb->ki_cur_seg = 0;
34016 /* ki_nbytes/left now reflect bytes instead of segs */
34017 diff -urNp linux-2.6.39.4/fs/attr.c linux-2.6.39.4/fs/attr.c
34018 --- linux-2.6.39.4/fs/attr.c 2011-05-19 00:06:34.000000000 -0400
34019 +++ linux-2.6.39.4/fs/attr.c 2011-08-05 19:44:37.000000000 -0400
34020 @@ -98,6 +98,7 @@ int inode_newsize_ok(const struct inode
34021 unsigned long limit;
34022
34023 limit = rlimit(RLIMIT_FSIZE);
34024 + gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
34025 if (limit != RLIM_INFINITY && offset > limit)
34026 goto out_sig;
34027 if (offset > inode->i_sb->s_maxbytes)
34028 diff -urNp linux-2.6.39.4/fs/befs/linuxvfs.c linux-2.6.39.4/fs/befs/linuxvfs.c
34029 --- linux-2.6.39.4/fs/befs/linuxvfs.c 2011-05-19 00:06:34.000000000 -0400
34030 +++ linux-2.6.39.4/fs/befs/linuxvfs.c 2011-08-05 19:44:37.000000000 -0400
34031 @@ -498,7 +498,7 @@ static void befs_put_link(struct dentry
34032 {
34033 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
34034 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
34035 - char *link = nd_get_link(nd);
34036 + const char *link = nd_get_link(nd);
34037 if (!IS_ERR(link))
34038 kfree(link);
34039 }
34040 diff -urNp linux-2.6.39.4/fs/binfmt_aout.c linux-2.6.39.4/fs/binfmt_aout.c
34041 --- linux-2.6.39.4/fs/binfmt_aout.c 2011-05-19 00:06:34.000000000 -0400
34042 +++ linux-2.6.39.4/fs/binfmt_aout.c 2011-08-05 19:44:37.000000000 -0400
34043 @@ -16,6 +16,7 @@
34044 #include <linux/string.h>
34045 #include <linux/fs.h>
34046 #include <linux/file.h>
34047 +#include <linux/security.h>
34048 #include <linux/stat.h>
34049 #include <linux/fcntl.h>
34050 #include <linux/ptrace.h>
34051 @@ -86,6 +87,8 @@ static int aout_core_dump(struct coredum
34052 #endif
34053 # define START_STACK(u) ((void __user *)u.start_stack)
34054
34055 + memset(&dump, 0, sizeof(dump));
34056 +
34057 fs = get_fs();
34058 set_fs(KERNEL_DS);
34059 has_dumped = 1;
34060 @@ -97,10 +100,12 @@ static int aout_core_dump(struct coredum
34061
34062 /* If the size of the dump file exceeds the rlimit, then see what would happen
34063 if we wrote the stack, but not the data area. */
34064 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
34065 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
34066 dump.u_dsize = 0;
34067
34068 /* Make sure we have enough room to write the stack and data areas. */
34069 + gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
34070 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
34071 dump.u_ssize = 0;
34072
34073 @@ -234,6 +239,8 @@ static int load_aout_binary(struct linux
34074 rlim = rlimit(RLIMIT_DATA);
34075 if (rlim >= RLIM_INFINITY)
34076 rlim = ~0;
34077 +
34078 + gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
34079 if (ex.a_data + ex.a_bss > rlim)
34080 return -ENOMEM;
34081
34082 @@ -262,6 +269,27 @@ static int load_aout_binary(struct linux
34083 install_exec_creds(bprm);
34084 current->flags &= ~PF_FORKNOEXEC;
34085
34086 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
34087 + current->mm->pax_flags = 0UL;
34088 +#endif
34089 +
34090 +#ifdef CONFIG_PAX_PAGEEXEC
34091 + if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
34092 + current->mm->pax_flags |= MF_PAX_PAGEEXEC;
34093 +
34094 +#ifdef CONFIG_PAX_EMUTRAMP
34095 + if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
34096 + current->mm->pax_flags |= MF_PAX_EMUTRAMP;
34097 +#endif
34098 +
34099 +#ifdef CONFIG_PAX_MPROTECT
34100 + if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
34101 + current->mm->pax_flags |= MF_PAX_MPROTECT;
34102 +#endif
34103 +
34104 + }
34105 +#endif
34106 +
34107 if (N_MAGIC(ex) == OMAGIC) {
34108 unsigned long text_addr, map_size;
34109 loff_t pos;
34110 @@ -334,7 +362,7 @@ static int load_aout_binary(struct linux
34111
34112 down_write(&current->mm->mmap_sem);
34113 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
34114 - PROT_READ | PROT_WRITE | PROT_EXEC,
34115 + PROT_READ | PROT_WRITE,
34116 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
34117 fd_offset + ex.a_text);
34118 up_write(&current->mm->mmap_sem);
34119 diff -urNp linux-2.6.39.4/fs/binfmt_elf.c linux-2.6.39.4/fs/binfmt_elf.c
34120 --- linux-2.6.39.4/fs/binfmt_elf.c 2011-05-19 00:06:34.000000000 -0400
34121 +++ linux-2.6.39.4/fs/binfmt_elf.c 2011-08-05 19:44:37.000000000 -0400
34122 @@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump
34123 #define elf_core_dump NULL
34124 #endif
34125
34126 +#ifdef CONFIG_PAX_MPROTECT
34127 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
34128 +#endif
34129 +
34130 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
34131 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
34132 #else
34133 @@ -70,6 +74,11 @@ static struct linux_binfmt elf_format =
34134 .load_binary = load_elf_binary,
34135 .load_shlib = load_elf_library,
34136 .core_dump = elf_core_dump,
34137 +
34138 +#ifdef CONFIG_PAX_MPROTECT
34139 + .handle_mprotect= elf_handle_mprotect,
34140 +#endif
34141 +
34142 .min_coredump = ELF_EXEC_PAGESIZE,
34143 };
34144
34145 @@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
34146
34147 static int set_brk(unsigned long start, unsigned long end)
34148 {
34149 + unsigned long e = end;
34150 +
34151 start = ELF_PAGEALIGN(start);
34152 end = ELF_PAGEALIGN(end);
34153 if (end > start) {
34154 @@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
34155 if (BAD_ADDR(addr))
34156 return addr;
34157 }
34158 - current->mm->start_brk = current->mm->brk = end;
34159 + current->mm->start_brk = current->mm->brk = e;
34160 return 0;
34161 }
34162
34163 @@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
34164 elf_addr_t __user *u_rand_bytes;
34165 const char *k_platform = ELF_PLATFORM;
34166 const char *k_base_platform = ELF_BASE_PLATFORM;
34167 - unsigned char k_rand_bytes[16];
34168 + u32 k_rand_bytes[4];
34169 int items;
34170 elf_addr_t *elf_info;
34171 int ei_index = 0;
34172 const struct cred *cred = current_cred();
34173 struct vm_area_struct *vma;
34174 + unsigned long saved_auxv[AT_VECTOR_SIZE];
34175 +
34176 + pax_track_stack();
34177
34178 /*
34179 * In some cases (e.g. Hyper-Threading), we want to avoid L1
34180 @@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
34181 * Generate 16 random bytes for userspace PRNG seeding.
34182 */
34183 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
34184 - u_rand_bytes = (elf_addr_t __user *)
34185 - STACK_ALLOC(p, sizeof(k_rand_bytes));
34186 + srandom32(k_rand_bytes[0] ^ random32());
34187 + srandom32(k_rand_bytes[1] ^ random32());
34188 + srandom32(k_rand_bytes[2] ^ random32());
34189 + srandom32(k_rand_bytes[3] ^ random32());
34190 + p = STACK_ROUND(p, sizeof(k_rand_bytes));
34191 + u_rand_bytes = (elf_addr_t __user *) p;
34192 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
34193 return -EFAULT;
34194
34195 @@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
34196 return -EFAULT;
34197 current->mm->env_end = p;
34198
34199 + memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
34200 +
34201 /* Put the elf_info on the stack in the right place. */
34202 sp = (elf_addr_t __user *)envp + 1;
34203 - if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
34204 + if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
34205 return -EFAULT;
34206 return 0;
34207 }
34208 @@ -381,10 +401,10 @@ static unsigned long load_elf_interp(str
34209 {
34210 struct elf_phdr *elf_phdata;
34211 struct elf_phdr *eppnt;
34212 - unsigned long load_addr = 0;
34213 + unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
34214 int load_addr_set = 0;
34215 unsigned long last_bss = 0, elf_bss = 0;
34216 - unsigned long error = ~0UL;
34217 + unsigned long error = -EINVAL;
34218 unsigned long total_size;
34219 int retval, i, size;
34220
34221 @@ -430,6 +450,11 @@ static unsigned long load_elf_interp(str
34222 goto out_close;
34223 }
34224
34225 +#ifdef CONFIG_PAX_SEGMEXEC
34226 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
34227 + pax_task_size = SEGMEXEC_TASK_SIZE;
34228 +#endif
34229 +
34230 eppnt = elf_phdata;
34231 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
34232 if (eppnt->p_type == PT_LOAD) {
34233 @@ -473,8 +498,8 @@ static unsigned long load_elf_interp(str
34234 k = load_addr + eppnt->p_vaddr;
34235 if (BAD_ADDR(k) ||
34236 eppnt->p_filesz > eppnt->p_memsz ||
34237 - eppnt->p_memsz > TASK_SIZE ||
34238 - TASK_SIZE - eppnt->p_memsz < k) {
34239 + eppnt->p_memsz > pax_task_size ||
34240 + pax_task_size - eppnt->p_memsz < k) {
34241 error = -ENOMEM;
34242 goto out_close;
34243 }
34244 @@ -528,6 +553,193 @@ out:
34245 return error;
34246 }
34247
34248 +#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
34249 +static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
34250 +{
34251 + unsigned long pax_flags = 0UL;
34252 +
34253 +#ifdef CONFIG_PAX_PAGEEXEC
34254 + if (elf_phdata->p_flags & PF_PAGEEXEC)
34255 + pax_flags |= MF_PAX_PAGEEXEC;
34256 +#endif
34257 +
34258 +#ifdef CONFIG_PAX_SEGMEXEC
34259 + if (elf_phdata->p_flags & PF_SEGMEXEC)
34260 + pax_flags |= MF_PAX_SEGMEXEC;
34261 +#endif
34262 +
34263 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34264 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34265 + if ((__supported_pte_mask & _PAGE_NX))
34266 + pax_flags &= ~MF_PAX_SEGMEXEC;
34267 + else
34268 + pax_flags &= ~MF_PAX_PAGEEXEC;
34269 + }
34270 +#endif
34271 +
34272 +#ifdef CONFIG_PAX_EMUTRAMP
34273 + if (elf_phdata->p_flags & PF_EMUTRAMP)
34274 + pax_flags |= MF_PAX_EMUTRAMP;
34275 +#endif
34276 +
34277 +#ifdef CONFIG_PAX_MPROTECT
34278 + if (elf_phdata->p_flags & PF_MPROTECT)
34279 + pax_flags |= MF_PAX_MPROTECT;
34280 +#endif
34281 +
34282 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
34283 + if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
34284 + pax_flags |= MF_PAX_RANDMMAP;
34285 +#endif
34286 +
34287 + return pax_flags;
34288 +}
34289 +#endif
34290 +
34291 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
34292 +static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
34293 +{
34294 + unsigned long pax_flags = 0UL;
34295 +
34296 +#ifdef CONFIG_PAX_PAGEEXEC
34297 + if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
34298 + pax_flags |= MF_PAX_PAGEEXEC;
34299 +#endif
34300 +
34301 +#ifdef CONFIG_PAX_SEGMEXEC
34302 + if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
34303 + pax_flags |= MF_PAX_SEGMEXEC;
34304 +#endif
34305 +
34306 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34307 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34308 + if ((__supported_pte_mask & _PAGE_NX))
34309 + pax_flags &= ~MF_PAX_SEGMEXEC;
34310 + else
34311 + pax_flags &= ~MF_PAX_PAGEEXEC;
34312 + }
34313 +#endif
34314 +
34315 +#ifdef CONFIG_PAX_EMUTRAMP
34316 + if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
34317 + pax_flags |= MF_PAX_EMUTRAMP;
34318 +#endif
34319 +
34320 +#ifdef CONFIG_PAX_MPROTECT
34321 + if (!(elf_phdata->p_flags & PF_NOMPROTECT))
34322 + pax_flags |= MF_PAX_MPROTECT;
34323 +#endif
34324 +
34325 +#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
34326 + if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
34327 + pax_flags |= MF_PAX_RANDMMAP;
34328 +#endif
34329 +
34330 + return pax_flags;
34331 +}
34332 +#endif
34333 +
34334 +#ifdef CONFIG_PAX_EI_PAX
34335 +static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
34336 +{
34337 + unsigned long pax_flags = 0UL;
34338 +
34339 +#ifdef CONFIG_PAX_PAGEEXEC
34340 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
34341 + pax_flags |= MF_PAX_PAGEEXEC;
34342 +#endif
34343 +
34344 +#ifdef CONFIG_PAX_SEGMEXEC
34345 + if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
34346 + pax_flags |= MF_PAX_SEGMEXEC;
34347 +#endif
34348 +
34349 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
34350 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34351 + if ((__supported_pte_mask & _PAGE_NX))
34352 + pax_flags &= ~MF_PAX_SEGMEXEC;
34353 + else
34354 + pax_flags &= ~MF_PAX_PAGEEXEC;
34355 + }
34356 +#endif
34357 +
34358 +#ifdef CONFIG_PAX_EMUTRAMP
34359 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
34360 + pax_flags |= MF_PAX_EMUTRAMP;
34361 +#endif
34362 +
34363 +#ifdef CONFIG_PAX_MPROTECT
34364 + if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
34365 + pax_flags |= MF_PAX_MPROTECT;
34366 +#endif
34367 +
34368 +#ifdef CONFIG_PAX_ASLR
34369 + if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
34370 + pax_flags |= MF_PAX_RANDMMAP;
34371 +#endif
34372 +
34373 + return pax_flags;
34374 +}
34375 +#endif
34376 +
34377 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
34378 +static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
34379 +{
34380 + unsigned long pax_flags = 0UL;
34381 +
34382 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
34383 + unsigned long i;
34384 + int found_flags = 0;
34385 +#endif
34386 +
34387 +#ifdef CONFIG_PAX_EI_PAX
34388 + pax_flags = pax_parse_ei_pax(elf_ex);
34389 +#endif
34390 +
34391 +#ifdef CONFIG_PAX_PT_PAX_FLAGS
34392 + for (i = 0UL; i < elf_ex->e_phnum; i++)
34393 + if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
34394 + if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
34395 + ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
34396 + ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
34397 + ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
34398 + ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
34399 + return -EINVAL;
34400 +
34401 +#ifdef CONFIG_PAX_SOFTMODE
34402 + if (pax_softmode)
34403 + pax_flags = pax_parse_softmode(&elf_phdata[i]);
34404 + else
34405 +#endif
34406 +
34407 + pax_flags = pax_parse_hardmode(&elf_phdata[i]);
34408 + found_flags = 1;
34409 + break;
34410 + }
34411 +#endif
34412 +
34413 +#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
34414 + if (found_flags == 0) {
34415 + struct elf_phdr phdr;
34416 + memset(&phdr, 0, sizeof(phdr));
34417 + phdr.p_flags = PF_NOEMUTRAMP;
34418 +#ifdef CONFIG_PAX_SOFTMODE
34419 + if (pax_softmode)
34420 + pax_flags = pax_parse_softmode(&phdr);
34421 + else
34422 +#endif
34423 + pax_flags = pax_parse_hardmode(&phdr);
34424 + }
34425 +#endif
34426 +
34427 + if (0 > pax_check_flags(&pax_flags))
34428 + return -EINVAL;
34429 +
34430 + current->mm->pax_flags = pax_flags;
34431 + return 0;
34432 +}
34433 +#endif
34434 +
34435 /*
34436 * These are the functions used to load ELF style executables and shared
34437 * libraries. There is no binary dependent code anywhere else.
34438 @@ -544,6 +756,11 @@ static unsigned long randomize_stack_top
34439 {
34440 unsigned int random_variable = 0;
34441
34442 +#ifdef CONFIG_PAX_RANDUSTACK
34443 + if (randomize_va_space)
34444 + return stack_top - current->mm->delta_stack;
34445 +#endif
34446 +
34447 if ((current->flags & PF_RANDOMIZE) &&
34448 !(current->personality & ADDR_NO_RANDOMIZE)) {
34449 random_variable = get_random_int() & STACK_RND_MASK;
34450 @@ -562,7 +779,7 @@ static int load_elf_binary(struct linux_
34451 unsigned long load_addr = 0, load_bias = 0;
34452 int load_addr_set = 0;
34453 char * elf_interpreter = NULL;
34454 - unsigned long error;
34455 + unsigned long error = 0;
34456 struct elf_phdr *elf_ppnt, *elf_phdata;
34457 unsigned long elf_bss, elf_brk;
34458 int retval, i;
34459 @@ -572,11 +789,11 @@ static int load_elf_binary(struct linux_
34460 unsigned long start_code, end_code, start_data, end_data;
34461 unsigned long reloc_func_desc __maybe_unused = 0;
34462 int executable_stack = EXSTACK_DEFAULT;
34463 - unsigned long def_flags = 0;
34464 struct {
34465 struct elfhdr elf_ex;
34466 struct elfhdr interp_elf_ex;
34467 } *loc;
34468 + unsigned long pax_task_size = TASK_SIZE;
34469
34470 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
34471 if (!loc) {
34472 @@ -714,11 +931,81 @@ static int load_elf_binary(struct linux_
34473
34474 /* OK, This is the point of no return */
34475 current->flags &= ~PF_FORKNOEXEC;
34476 - current->mm->def_flags = def_flags;
34477 +
34478 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
34479 + current->mm->pax_flags = 0UL;
34480 +#endif
34481 +
34482 +#ifdef CONFIG_PAX_DLRESOLVE
34483 + current->mm->call_dl_resolve = 0UL;
34484 +#endif
34485 +
34486 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
34487 + current->mm->call_syscall = 0UL;
34488 +#endif
34489 +
34490 +#ifdef CONFIG_PAX_ASLR
34491 + current->mm->delta_mmap = 0UL;
34492 + current->mm->delta_stack = 0UL;
34493 +#endif
34494 +
34495 + current->mm->def_flags = 0;
34496 +
34497 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
34498 + if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
34499 + send_sig(SIGKILL, current, 0);
34500 + goto out_free_dentry;
34501 + }
34502 +#endif
34503 +
34504 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
34505 + pax_set_initial_flags(bprm);
34506 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
34507 + if (pax_set_initial_flags_func)
34508 + (pax_set_initial_flags_func)(bprm);
34509 +#endif
34510 +
34511 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
34512 + if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
34513 + current->mm->context.user_cs_limit = PAGE_SIZE;
34514 + current->mm->def_flags |= VM_PAGEEXEC;
34515 + }
34516 +#endif
34517 +
34518 +#ifdef CONFIG_PAX_SEGMEXEC
34519 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
34520 + current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
34521 + current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
34522 + pax_task_size = SEGMEXEC_TASK_SIZE;
34523 + current->mm->def_flags |= VM_NOHUGEPAGE;
34524 + }
34525 +#endif
34526 +
34527 +#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
34528 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34529 + set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
34530 + put_cpu();
34531 + }
34532 +#endif
34533
34534 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
34535 may depend on the personality. */
34536 SET_PERSONALITY(loc->elf_ex);
34537 +
34538 +#ifdef CONFIG_PAX_ASLR
34539 + if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
34540 + current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
34541 + current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
34542 + }
34543 +#endif
34544 +
34545 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
34546 + if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
34547 + executable_stack = EXSTACK_DISABLE_X;
34548 + current->personality &= ~READ_IMPLIES_EXEC;
34549 + } else
34550 +#endif
34551 +
34552 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
34553 current->personality |= READ_IMPLIES_EXEC;
34554
34555 @@ -800,6 +1087,20 @@ static int load_elf_binary(struct linux_
34556 #else
34557 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
34558 #endif
34559 +
34560 +#ifdef CONFIG_PAX_RANDMMAP
34561 + /* PaX: randomize base address at the default exe base if requested */
34562 + if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
34563 +#ifdef CONFIG_SPARC64
34564 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
34565 +#else
34566 + load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
34567 +#endif
34568 + load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
34569 + elf_flags |= MAP_FIXED;
34570 + }
34571 +#endif
34572 +
34573 }
34574
34575 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
34576 @@ -832,9 +1133,9 @@ static int load_elf_binary(struct linux_
34577 * allowed task size. Note that p_filesz must always be
34578 * <= p_memsz so it is only necessary to check p_memsz.
34579 */
34580 - if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
34581 - elf_ppnt->p_memsz > TASK_SIZE ||
34582 - TASK_SIZE - elf_ppnt->p_memsz < k) {
34583 + if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
34584 + elf_ppnt->p_memsz > pax_task_size ||
34585 + pax_task_size - elf_ppnt->p_memsz < k) {
34586 /* set_brk can never work. Avoid overflows. */
34587 send_sig(SIGKILL, current, 0);
34588 retval = -EINVAL;
34589 @@ -862,6 +1163,11 @@ static int load_elf_binary(struct linux_
34590 start_data += load_bias;
34591 end_data += load_bias;
34592
34593 +#ifdef CONFIG_PAX_RANDMMAP
34594 + if (current->mm->pax_flags & MF_PAX_RANDMMAP)
34595 + elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
34596 +#endif
34597 +
34598 /* Calling set_brk effectively mmaps the pages that we need
34599 * for the bss and break sections. We must do this before
34600 * mapping in the interpreter, to make sure it doesn't wind
34601 @@ -873,9 +1179,11 @@ static int load_elf_binary(struct linux_
34602 goto out_free_dentry;
34603 }
34604 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
34605 - send_sig(SIGSEGV, current, 0);
34606 - retval = -EFAULT; /* Nobody gets to see this, but.. */
34607 - goto out_free_dentry;
34608 + /*
34609 + * This bss-zeroing can fail if the ELF
34610 + * file specifies odd protections. So
34611 + * we don't check the return value
34612 + */
34613 }
34614
34615 if (elf_interpreter) {
34616 @@ -1090,7 +1398,7 @@ out:
34617 * Decide what to dump of a segment, part, all or none.
34618 */
34619 static unsigned long vma_dump_size(struct vm_area_struct *vma,
34620 - unsigned long mm_flags)
34621 + unsigned long mm_flags, long signr)
34622 {
34623 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
34624
34625 @@ -1124,7 +1432,7 @@ static unsigned long vma_dump_size(struc
34626 if (vma->vm_file == NULL)
34627 return 0;
34628
34629 - if (FILTER(MAPPED_PRIVATE))
34630 + if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
34631 goto whole;
34632
34633 /*
34634 @@ -1346,9 +1654,9 @@ static void fill_auxv_note(struct memelf
34635 {
34636 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
34637 int i = 0;
34638 - do
34639 + do {
34640 i += 2;
34641 - while (auxv[i - 2] != AT_NULL);
34642 + } while (auxv[i - 2] != AT_NULL);
34643 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
34644 }
34645
34646 @@ -1854,14 +2162,14 @@ static void fill_extnum_info(struct elfh
34647 }
34648
34649 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
34650 - unsigned long mm_flags)
34651 + struct coredump_params *cprm)
34652 {
34653 struct vm_area_struct *vma;
34654 size_t size = 0;
34655
34656 for (vma = first_vma(current, gate_vma); vma != NULL;
34657 vma = next_vma(vma, gate_vma))
34658 - size += vma_dump_size(vma, mm_flags);
34659 + size += vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34660 return size;
34661 }
34662
34663 @@ -1955,7 +2263,7 @@ static int elf_core_dump(struct coredump
34664
34665 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
34666
34667 - offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
34668 + offset += elf_core_vma_data_size(gate_vma, cprm);
34669 offset += elf_core_extra_data_size();
34670 e_shoff = offset;
34671
34672 @@ -1969,10 +2277,12 @@ static int elf_core_dump(struct coredump
34673 offset = dataoff;
34674
34675 size += sizeof(*elf);
34676 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34677 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
34678 goto end_coredump;
34679
34680 size += sizeof(*phdr4note);
34681 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34682 if (size > cprm->limit
34683 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
34684 goto end_coredump;
34685 @@ -1986,7 +2296,7 @@ static int elf_core_dump(struct coredump
34686 phdr.p_offset = offset;
34687 phdr.p_vaddr = vma->vm_start;
34688 phdr.p_paddr = 0;
34689 - phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
34690 + phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34691 phdr.p_memsz = vma->vm_end - vma->vm_start;
34692 offset += phdr.p_filesz;
34693 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
34694 @@ -1997,6 +2307,7 @@ static int elf_core_dump(struct coredump
34695 phdr.p_align = ELF_EXEC_PAGESIZE;
34696
34697 size += sizeof(phdr);
34698 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34699 if (size > cprm->limit
34700 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
34701 goto end_coredump;
34702 @@ -2021,7 +2332,7 @@ static int elf_core_dump(struct coredump
34703 unsigned long addr;
34704 unsigned long end;
34705
34706 - end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
34707 + end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr);
34708
34709 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
34710 struct page *page;
34711 @@ -2030,6 +2341,7 @@ static int elf_core_dump(struct coredump
34712 page = get_dump_page(addr);
34713 if (page) {
34714 void *kaddr = kmap(page);
34715 + gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
34716 stop = ((size += PAGE_SIZE) > cprm->limit) ||
34717 !dump_write(cprm->file, kaddr,
34718 PAGE_SIZE);
34719 @@ -2047,6 +2359,7 @@ static int elf_core_dump(struct coredump
34720
34721 if (e_phnum == PN_XNUM) {
34722 size += sizeof(*shdr4extnum);
34723 + gr_learn_resource(current, RLIMIT_CORE, size, 1);
34724 if (size > cprm->limit
34725 || !dump_write(cprm->file, shdr4extnum,
34726 sizeof(*shdr4extnum)))
34727 @@ -2067,6 +2380,97 @@ out:
34728
34729 #endif /* CONFIG_ELF_CORE */
34730
34731 +#ifdef CONFIG_PAX_MPROTECT
34732 +/* PaX: non-PIC ELF libraries need relocations on their executable segments
34733 + * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
34734 + * we'll remove VM_MAYWRITE for good on RELRO segments.
34735 + *
34736 + * The checks favour ld-linux.so behaviour which operates on a per ELF segment
34737 + * basis because we want to allow the common case and not the special ones.
34738 + */
34739 +static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
34740 +{
34741 + struct elfhdr elf_h;
34742 + struct elf_phdr elf_p;
34743 + unsigned long i;
34744 + unsigned long oldflags;
34745 + bool is_textrel_rw, is_textrel_rx, is_relro;
34746 +
34747 + if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
34748 + return;
34749 +
34750 + oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
34751 + newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
34752 +
34753 +#ifdef CONFIG_PAX_ELFRELOCS
34754 + /* possible TEXTREL */
34755 + is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
34756 + is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
34757 +#else
34758 + is_textrel_rw = false;
34759 + is_textrel_rx = false;
34760 +#endif
34761 +
34762 + /* possible RELRO */
34763 + is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
34764 +
34765 + if (!is_textrel_rw && !is_textrel_rx && !is_relro)
34766 + return;
34767 +
34768 + if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
34769 + memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
34770 +
34771 +#ifdef CONFIG_PAX_ETEXECRELOCS
34772 + ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
34773 +#else
34774 + ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
34775 +#endif
34776 +
34777 + (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
34778 + !elf_check_arch(&elf_h) ||
34779 + elf_h.e_phentsize != sizeof(struct elf_phdr) ||
34780 + elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
34781 + return;
34782 +
34783 + for (i = 0UL; i < elf_h.e_phnum; i++) {
34784 + if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
34785 + return;
34786 + switch (elf_p.p_type) {
34787 + case PT_DYNAMIC:
34788 + if (!is_textrel_rw && !is_textrel_rx)
34789 + continue;
34790 + i = 0UL;
34791 + while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
34792 + elf_dyn dyn;
34793 +
34794 + if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
34795 + return;
34796 + if (dyn.d_tag == DT_NULL)
34797 + return;
34798 + if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
34799 + gr_log_textrel(vma);
34800 + if (is_textrel_rw)
34801 + vma->vm_flags |= VM_MAYWRITE;
34802 + else
34803 + /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
34804 + vma->vm_flags &= ~VM_MAYWRITE;
34805 + return;
34806 + }
34807 + i++;
34808 + }
34809 + return;
34810 +
34811 + case PT_GNU_RELRO:
34812 + if (!is_relro)
34813 + continue;
34814 + if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
34815 + vma->vm_flags &= ~VM_MAYWRITE;
34816 + return;
34817 + }
34818 + }
34819 +}
34820 +#endif
34821 +
34822 static int __init init_elf_binfmt(void)
34823 {
34824 return register_binfmt(&elf_format);
34825 diff -urNp linux-2.6.39.4/fs/binfmt_flat.c linux-2.6.39.4/fs/binfmt_flat.c
34826 --- linux-2.6.39.4/fs/binfmt_flat.c 2011-05-19 00:06:34.000000000 -0400
34827 +++ linux-2.6.39.4/fs/binfmt_flat.c 2011-08-05 19:44:37.000000000 -0400
34828 @@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b
34829 realdatastart = (unsigned long) -ENOMEM;
34830 printk("Unable to allocate RAM for process data, errno %d\n",
34831 (int)-realdatastart);
34832 + down_write(&current->mm->mmap_sem);
34833 do_munmap(current->mm, textpos, text_len);
34834 + up_write(&current->mm->mmap_sem);
34835 ret = realdatastart;
34836 goto err;
34837 }
34838 @@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b
34839 }
34840 if (IS_ERR_VALUE(result)) {
34841 printk("Unable to read data+bss, errno %d\n", (int)-result);
34842 + down_write(&current->mm->mmap_sem);
34843 do_munmap(current->mm, textpos, text_len);
34844 do_munmap(current->mm, realdatastart, len);
34845 + up_write(&current->mm->mmap_sem);
34846 ret = result;
34847 goto err;
34848 }
34849 @@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b
34850 }
34851 if (IS_ERR_VALUE(result)) {
34852 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
34853 + down_write(&current->mm->mmap_sem);
34854 do_munmap(current->mm, textpos, text_len + data_len + extra +
34855 MAX_SHARED_LIBS * sizeof(unsigned long));
34856 + up_write(&current->mm->mmap_sem);
34857 ret = result;
34858 goto err;
34859 }
34860 diff -urNp linux-2.6.39.4/fs/bio.c linux-2.6.39.4/fs/bio.c
34861 --- linux-2.6.39.4/fs/bio.c 2011-05-19 00:06:34.000000000 -0400
34862 +++ linux-2.6.39.4/fs/bio.c 2011-08-05 19:44:37.000000000 -0400
34863 @@ -1233,7 +1233,7 @@ static void bio_copy_kern_endio(struct b
34864 const int read = bio_data_dir(bio) == READ;
34865 struct bio_map_data *bmd = bio->bi_private;
34866 int i;
34867 - char *p = bmd->sgvecs[0].iov_base;
34868 + char *p = (__force char *)bmd->sgvecs[0].iov_base;
34869
34870 __bio_for_each_segment(bvec, bio, i, 0) {
34871 char *addr = page_address(bvec->bv_page);
34872 diff -urNp linux-2.6.39.4/fs/block_dev.c linux-2.6.39.4/fs/block_dev.c
34873 --- linux-2.6.39.4/fs/block_dev.c 2011-07-09 09:18:51.000000000 -0400
34874 +++ linux-2.6.39.4/fs/block_dev.c 2011-08-05 19:44:37.000000000 -0400
34875 @@ -671,7 +671,7 @@ static bool bd_may_claim(struct block_de
34876 else if (bdev->bd_contains == bdev)
34877 return true; /* is a whole device which isn't held */
34878
34879 - else if (whole->bd_holder == bd_may_claim)
34880 + else if (whole->bd_holder == (void *)bd_may_claim)
34881 return true; /* is a partition of a device that is being partitioned */
34882 else if (whole->bd_holder != NULL)
34883 return false; /* is a partition of a held device */
34884 diff -urNp linux-2.6.39.4/fs/btrfs/ctree.c linux-2.6.39.4/fs/btrfs/ctree.c
34885 --- linux-2.6.39.4/fs/btrfs/ctree.c 2011-05-19 00:06:34.000000000 -0400
34886 +++ linux-2.6.39.4/fs/btrfs/ctree.c 2011-08-05 19:44:37.000000000 -0400
34887 @@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
34888 free_extent_buffer(buf);
34889 add_root_to_dirty_list(root);
34890 } else {
34891 - if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
34892 - parent_start = parent->start;
34893 - else
34894 + if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
34895 + if (parent)
34896 + parent_start = parent->start;
34897 + else
34898 + parent_start = 0;
34899 + } else
34900 parent_start = 0;
34901
34902 WARN_ON(trans->transid != btrfs_header_generation(parent));
34903 @@ -3647,7 +3650,6 @@ setup_items_for_insert(struct btrfs_tran
34904
34905 ret = 0;
34906 if (slot == 0) {
34907 - struct btrfs_disk_key disk_key;
34908 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
34909 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
34910 }
34911 diff -urNp linux-2.6.39.4/fs/btrfs/free-space-cache.c linux-2.6.39.4/fs/btrfs/free-space-cache.c
34912 --- linux-2.6.39.4/fs/btrfs/free-space-cache.c 2011-05-19 00:06:34.000000000 -0400
34913 +++ linux-2.6.39.4/fs/btrfs/free-space-cache.c 2011-08-05 19:44:37.000000000 -0400
34914 @@ -1910,8 +1910,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
34915 while(1) {
34916 if (entry->bytes < bytes ||
34917 (!entry->bitmap && entry->offset < min_start)) {
34918 - struct rb_node *node;
34919 -
34920 node = rb_next(&entry->offset_index);
34921 if (!node)
34922 break;
34923 @@ -1925,7 +1923,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
34924 cluster, entry, bytes,
34925 min_start);
34926 if (ret == 0) {
34927 - struct rb_node *node;
34928 node = rb_next(&entry->offset_index);
34929 if (!node)
34930 break;
34931 diff -urNp linux-2.6.39.4/fs/btrfs/inode.c linux-2.6.39.4/fs/btrfs/inode.c
34932 --- linux-2.6.39.4/fs/btrfs/inode.c 2011-05-19 00:06:34.000000000 -0400
34933 +++ linux-2.6.39.4/fs/btrfs/inode.c 2011-08-05 20:34:06.000000000 -0400
34934 @@ -6947,7 +6947,7 @@ fail:
34935 return -ENOMEM;
34936 }
34937
34938 -static int btrfs_getattr(struct vfsmount *mnt,
34939 +int btrfs_getattr(struct vfsmount *mnt,
34940 struct dentry *dentry, struct kstat *stat)
34941 {
34942 struct inode *inode = dentry->d_inode;
34943 @@ -6959,6 +6959,14 @@ static int btrfs_getattr(struct vfsmount
34944 return 0;
34945 }
34946
34947 +EXPORT_SYMBOL(btrfs_getattr);
34948 +
34949 +dev_t get_btrfs_dev_from_inode(struct inode *inode)
34950 +{
34951 + return BTRFS_I(inode)->root->anon_super.s_dev;
34952 +}
34953 +EXPORT_SYMBOL(get_btrfs_dev_from_inode);
34954 +
34955 /*
34956 * If a file is moved, it will inherit the cow and compression flags of the new
34957 * directory.
34958 diff -urNp linux-2.6.39.4/fs/btrfs/ioctl.c linux-2.6.39.4/fs/btrfs/ioctl.c
34959 --- linux-2.6.39.4/fs/btrfs/ioctl.c 2011-05-19 00:06:34.000000000 -0400
34960 +++ linux-2.6.39.4/fs/btrfs/ioctl.c 2011-08-05 19:44:37.000000000 -0400
34961 @@ -2361,9 +2361,12 @@ long btrfs_ioctl_space_info(struct btrfs
34962 for (i = 0; i < num_types; i++) {
34963 struct btrfs_space_info *tmp;
34964
34965 + /* Don't copy in more than we allocated */
34966 if (!slot_count)
34967 break;
34968
34969 + slot_count--;
34970 +
34971 info = NULL;
34972 rcu_read_lock();
34973 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
34974 @@ -2385,10 +2388,7 @@ long btrfs_ioctl_space_info(struct btrfs
34975 memcpy(dest, &space, sizeof(space));
34976 dest++;
34977 space_args.total_spaces++;
34978 - slot_count--;
34979 }
34980 - if (!slot_count)
34981 - break;
34982 }
34983 up_read(&info->groups_sem);
34984 }
34985 diff -urNp linux-2.6.39.4/fs/btrfs/relocation.c linux-2.6.39.4/fs/btrfs/relocation.c
34986 --- linux-2.6.39.4/fs/btrfs/relocation.c 2011-05-19 00:06:34.000000000 -0400
34987 +++ linux-2.6.39.4/fs/btrfs/relocation.c 2011-08-05 19:44:37.000000000 -0400
34988 @@ -1239,7 +1239,7 @@ static int __update_reloc_root(struct bt
34989 }
34990 spin_unlock(&rc->reloc_root_tree.lock);
34991
34992 - BUG_ON((struct btrfs_root *)node->data != root);
34993 + BUG_ON(!node || (struct btrfs_root *)node->data != root);
34994
34995 if (!del) {
34996 spin_lock(&rc->reloc_root_tree.lock);
34997 diff -urNp linux-2.6.39.4/fs/cachefiles/bind.c linux-2.6.39.4/fs/cachefiles/bind.c
34998 --- linux-2.6.39.4/fs/cachefiles/bind.c 2011-05-19 00:06:34.000000000 -0400
34999 +++ linux-2.6.39.4/fs/cachefiles/bind.c 2011-08-05 19:44:37.000000000 -0400
35000 @@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
35001 args);
35002
35003 /* start by checking things over */
35004 - ASSERT(cache->fstop_percent >= 0 &&
35005 - cache->fstop_percent < cache->fcull_percent &&
35006 + ASSERT(cache->fstop_percent < cache->fcull_percent &&
35007 cache->fcull_percent < cache->frun_percent &&
35008 cache->frun_percent < 100);
35009
35010 - ASSERT(cache->bstop_percent >= 0 &&
35011 - cache->bstop_percent < cache->bcull_percent &&
35012 + ASSERT(cache->bstop_percent < cache->bcull_percent &&
35013 cache->bcull_percent < cache->brun_percent &&
35014 cache->brun_percent < 100);
35015
35016 diff -urNp linux-2.6.39.4/fs/cachefiles/daemon.c linux-2.6.39.4/fs/cachefiles/daemon.c
35017 --- linux-2.6.39.4/fs/cachefiles/daemon.c 2011-05-19 00:06:34.000000000 -0400
35018 +++ linux-2.6.39.4/fs/cachefiles/daemon.c 2011-08-05 19:44:37.000000000 -0400
35019 @@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(st
35020 if (n > buflen)
35021 return -EMSGSIZE;
35022
35023 - if (copy_to_user(_buffer, buffer, n) != 0)
35024 + if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
35025 return -EFAULT;
35026
35027 return n;
35028 @@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(s
35029 if (test_bit(CACHEFILES_DEAD, &cache->flags))
35030 return -EIO;
35031
35032 - if (datalen < 0 || datalen > PAGE_SIZE - 1)
35033 + if (datalen > PAGE_SIZE - 1)
35034 return -EOPNOTSUPP;
35035
35036 /* drag the command string into the kernel so we can parse it */
35037 @@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struc
35038 if (args[0] != '%' || args[1] != '\0')
35039 return -EINVAL;
35040
35041 - if (fstop < 0 || fstop >= cache->fcull_percent)
35042 + if (fstop >= cache->fcull_percent)
35043 return cachefiles_daemon_range_error(cache, args);
35044
35045 cache->fstop_percent = fstop;
35046 @@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struc
35047 if (args[0] != '%' || args[1] != '\0')
35048 return -EINVAL;
35049
35050 - if (bstop < 0 || bstop >= cache->bcull_percent)
35051 + if (bstop >= cache->bcull_percent)
35052 return cachefiles_daemon_range_error(cache, args);
35053
35054 cache->bstop_percent = bstop;
35055 diff -urNp linux-2.6.39.4/fs/cachefiles/internal.h linux-2.6.39.4/fs/cachefiles/internal.h
35056 --- linux-2.6.39.4/fs/cachefiles/internal.h 2011-05-19 00:06:34.000000000 -0400
35057 +++ linux-2.6.39.4/fs/cachefiles/internal.h 2011-08-05 19:44:37.000000000 -0400
35058 @@ -57,7 +57,7 @@ struct cachefiles_cache {
35059 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
35060 struct rb_root active_nodes; /* active nodes (can't be culled) */
35061 rwlock_t active_lock; /* lock for active_nodes */
35062 - atomic_t gravecounter; /* graveyard uniquifier */
35063 + atomic_unchecked_t gravecounter; /* graveyard uniquifier */
35064 unsigned frun_percent; /* when to stop culling (% files) */
35065 unsigned fcull_percent; /* when to start culling (% files) */
35066 unsigned fstop_percent; /* when to stop allocating (% files) */
35067 @@ -169,19 +169,19 @@ extern int cachefiles_check_in_use(struc
35068 * proc.c
35069 */
35070 #ifdef CONFIG_CACHEFILES_HISTOGRAM
35071 -extern atomic_t cachefiles_lookup_histogram[HZ];
35072 -extern atomic_t cachefiles_mkdir_histogram[HZ];
35073 -extern atomic_t cachefiles_create_histogram[HZ];
35074 +extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
35075 +extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
35076 +extern atomic_unchecked_t cachefiles_create_histogram[HZ];
35077
35078 extern int __init cachefiles_proc_init(void);
35079 extern void cachefiles_proc_cleanup(void);
35080 static inline
35081 -void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
35082 +void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
35083 {
35084 unsigned long jif = jiffies - start_jif;
35085 if (jif >= HZ)
35086 jif = HZ - 1;
35087 - atomic_inc(&histogram[jif]);
35088 + atomic_inc_unchecked(&histogram[jif]);
35089 }
35090
35091 #else
35092 diff -urNp linux-2.6.39.4/fs/cachefiles/namei.c linux-2.6.39.4/fs/cachefiles/namei.c
35093 --- linux-2.6.39.4/fs/cachefiles/namei.c 2011-05-19 00:06:34.000000000 -0400
35094 +++ linux-2.6.39.4/fs/cachefiles/namei.c 2011-08-05 19:44:37.000000000 -0400
35095 @@ -318,7 +318,7 @@ try_again:
35096 /* first step is to make up a grave dentry in the graveyard */
35097 sprintf(nbuffer, "%08x%08x",
35098 (uint32_t) get_seconds(),
35099 - (uint32_t) atomic_inc_return(&cache->gravecounter));
35100 + (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
35101
35102 /* do the multiway lock magic */
35103 trap = lock_rename(cache->graveyard, dir);
35104 diff -urNp linux-2.6.39.4/fs/cachefiles/proc.c linux-2.6.39.4/fs/cachefiles/proc.c
35105 --- linux-2.6.39.4/fs/cachefiles/proc.c 2011-05-19 00:06:34.000000000 -0400
35106 +++ linux-2.6.39.4/fs/cachefiles/proc.c 2011-08-05 19:44:37.000000000 -0400
35107 @@ -14,9 +14,9 @@
35108 #include <linux/seq_file.h>
35109 #include "internal.h"
35110
35111 -atomic_t cachefiles_lookup_histogram[HZ];
35112 -atomic_t cachefiles_mkdir_histogram[HZ];
35113 -atomic_t cachefiles_create_histogram[HZ];
35114 +atomic_unchecked_t cachefiles_lookup_histogram[HZ];
35115 +atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
35116 +atomic_unchecked_t cachefiles_create_histogram[HZ];
35117
35118 /*
35119 * display the latency histogram
35120 @@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
35121 return 0;
35122 default:
35123 index = (unsigned long) v - 3;
35124 - x = atomic_read(&cachefiles_lookup_histogram[index]);
35125 - y = atomic_read(&cachefiles_mkdir_histogram[index]);
35126 - z = atomic_read(&cachefiles_create_histogram[index]);
35127 + x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
35128 + y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
35129 + z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
35130 if (x == 0 && y == 0 && z == 0)
35131 return 0;
35132
35133 diff -urNp linux-2.6.39.4/fs/cachefiles/rdwr.c linux-2.6.39.4/fs/cachefiles/rdwr.c
35134 --- linux-2.6.39.4/fs/cachefiles/rdwr.c 2011-05-19 00:06:34.000000000 -0400
35135 +++ linux-2.6.39.4/fs/cachefiles/rdwr.c 2011-08-05 19:44:37.000000000 -0400
35136 @@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache
35137 old_fs = get_fs();
35138 set_fs(KERNEL_DS);
35139 ret = file->f_op->write(
35140 - file, (const void __user *) data, len, &pos);
35141 + file, (__force const void __user *) data, len, &pos);
35142 set_fs(old_fs);
35143 kunmap(page);
35144 if (ret != len)
35145 diff -urNp linux-2.6.39.4/fs/ceph/dir.c linux-2.6.39.4/fs/ceph/dir.c
35146 --- linux-2.6.39.4/fs/ceph/dir.c 2011-05-19 00:06:34.000000000 -0400
35147 +++ linux-2.6.39.4/fs/ceph/dir.c 2011-08-05 19:44:37.000000000 -0400
35148 @@ -226,7 +226,7 @@ static int ceph_readdir(struct file *fil
35149 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
35150 struct ceph_mds_client *mdsc = fsc->mdsc;
35151 unsigned frag = fpos_frag(filp->f_pos);
35152 - int off = fpos_off(filp->f_pos);
35153 + unsigned int off = fpos_off(filp->f_pos);
35154 int err;
35155 u32 ftype;
35156 struct ceph_mds_reply_info_parsed *rinfo;
35157 @@ -360,7 +360,7 @@ more:
35158 rinfo = &fi->last_readdir->r_reply_info;
35159 dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
35160 rinfo->dir_nr, off, fi->offset);
35161 - while (off - fi->offset >= 0 && off - fi->offset < rinfo->dir_nr) {
35162 + while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
35163 u64 pos = ceph_make_fpos(frag, off);
35164 struct ceph_mds_reply_inode *in =
35165 rinfo->dir_in[off - fi->offset].in;
35166 diff -urNp linux-2.6.39.4/fs/cifs/cifs_debug.c linux-2.6.39.4/fs/cifs/cifs_debug.c
35167 --- linux-2.6.39.4/fs/cifs/cifs_debug.c 2011-05-19 00:06:34.000000000 -0400
35168 +++ linux-2.6.39.4/fs/cifs/cifs_debug.c 2011-08-05 19:44:37.000000000 -0400
35169 @@ -279,25 +279,25 @@ static ssize_t cifs_stats_proc_write(str
35170 tcon = list_entry(tmp3,
35171 struct cifsTconInfo,
35172 tcon_list);
35173 - atomic_set(&tcon->num_smbs_sent, 0);
35174 - atomic_set(&tcon->num_writes, 0);
35175 - atomic_set(&tcon->num_reads, 0);
35176 - atomic_set(&tcon->num_oplock_brks, 0);
35177 - atomic_set(&tcon->num_opens, 0);
35178 - atomic_set(&tcon->num_posixopens, 0);
35179 - atomic_set(&tcon->num_posixmkdirs, 0);
35180 - atomic_set(&tcon->num_closes, 0);
35181 - atomic_set(&tcon->num_deletes, 0);
35182 - atomic_set(&tcon->num_mkdirs, 0);
35183 - atomic_set(&tcon->num_rmdirs, 0);
35184 - atomic_set(&tcon->num_renames, 0);
35185 - atomic_set(&tcon->num_t2renames, 0);
35186 - atomic_set(&tcon->num_ffirst, 0);
35187 - atomic_set(&tcon->num_fnext, 0);
35188 - atomic_set(&tcon->num_fclose, 0);
35189 - atomic_set(&tcon->num_hardlinks, 0);
35190 - atomic_set(&tcon->num_symlinks, 0);
35191 - atomic_set(&tcon->num_locks, 0);
35192 + atomic_set_unchecked(&tcon->num_smbs_sent, 0);
35193 + atomic_set_unchecked(&tcon->num_writes, 0);
35194 + atomic_set_unchecked(&tcon->num_reads, 0);
35195 + atomic_set_unchecked(&tcon->num_oplock_brks, 0);
35196 + atomic_set_unchecked(&tcon->num_opens, 0);
35197 + atomic_set_unchecked(&tcon->num_posixopens, 0);
35198 + atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
35199 + atomic_set_unchecked(&tcon->num_closes, 0);
35200 + atomic_set_unchecked(&tcon->num_deletes, 0);
35201 + atomic_set_unchecked(&tcon->num_mkdirs, 0);
35202 + atomic_set_unchecked(&tcon->num_rmdirs, 0);
35203 + atomic_set_unchecked(&tcon->num_renames, 0);
35204 + atomic_set_unchecked(&tcon->num_t2renames, 0);
35205 + atomic_set_unchecked(&tcon->num_ffirst, 0);
35206 + atomic_set_unchecked(&tcon->num_fnext, 0);
35207 + atomic_set_unchecked(&tcon->num_fclose, 0);
35208 + atomic_set_unchecked(&tcon->num_hardlinks, 0);
35209 + atomic_set_unchecked(&tcon->num_symlinks, 0);
35210 + atomic_set_unchecked(&tcon->num_locks, 0);
35211 }
35212 }
35213 }
35214 @@ -357,41 +357,41 @@ static int cifs_stats_proc_show(struct s
35215 if (tcon->need_reconnect)
35216 seq_puts(m, "\tDISCONNECTED ");
35217 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
35218 - atomic_read(&tcon->num_smbs_sent),
35219 - atomic_read(&tcon->num_oplock_brks));
35220 + atomic_read_unchecked(&tcon->num_smbs_sent),
35221 + atomic_read_unchecked(&tcon->num_oplock_brks));
35222 seq_printf(m, "\nReads: %d Bytes: %lld",
35223 - atomic_read(&tcon->num_reads),
35224 + atomic_read_unchecked(&tcon->num_reads),
35225 (long long)(tcon->bytes_read));
35226 seq_printf(m, "\nWrites: %d Bytes: %lld",
35227 - atomic_read(&tcon->num_writes),
35228 + atomic_read_unchecked(&tcon->num_writes),
35229 (long long)(tcon->bytes_written));
35230 seq_printf(m, "\nFlushes: %d",
35231 - atomic_read(&tcon->num_flushes));
35232 + atomic_read_unchecked(&tcon->num_flushes));
35233 seq_printf(m, "\nLocks: %d HardLinks: %d "
35234 "Symlinks: %d",
35235 - atomic_read(&tcon->num_locks),
35236 - atomic_read(&tcon->num_hardlinks),
35237 - atomic_read(&tcon->num_symlinks));
35238 + atomic_read_unchecked(&tcon->num_locks),
35239 + atomic_read_unchecked(&tcon->num_hardlinks),
35240 + atomic_read_unchecked(&tcon->num_symlinks));
35241 seq_printf(m, "\nOpens: %d Closes: %d "
35242 "Deletes: %d",
35243 - atomic_read(&tcon->num_opens),
35244 - atomic_read(&tcon->num_closes),
35245 - atomic_read(&tcon->num_deletes));
35246 + atomic_read_unchecked(&tcon->num_opens),
35247 + atomic_read_unchecked(&tcon->num_closes),
35248 + atomic_read_unchecked(&tcon->num_deletes));
35249 seq_printf(m, "\nPosix Opens: %d "
35250 "Posix Mkdirs: %d",
35251 - atomic_read(&tcon->num_posixopens),
35252 - atomic_read(&tcon->num_posixmkdirs));
35253 + atomic_read_unchecked(&tcon->num_posixopens),
35254 + atomic_read_unchecked(&tcon->num_posixmkdirs));
35255 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
35256 - atomic_read(&tcon->num_mkdirs),
35257 - atomic_read(&tcon->num_rmdirs));
35258 + atomic_read_unchecked(&tcon->num_mkdirs),
35259 + atomic_read_unchecked(&tcon->num_rmdirs));
35260 seq_printf(m, "\nRenames: %d T2 Renames %d",
35261 - atomic_read(&tcon->num_renames),
35262 - atomic_read(&tcon->num_t2renames));
35263 + atomic_read_unchecked(&tcon->num_renames),
35264 + atomic_read_unchecked(&tcon->num_t2renames));
35265 seq_printf(m, "\nFindFirst: %d FNext %d "
35266 "FClose %d",
35267 - atomic_read(&tcon->num_ffirst),
35268 - atomic_read(&tcon->num_fnext),
35269 - atomic_read(&tcon->num_fclose));
35270 + atomic_read_unchecked(&tcon->num_ffirst),
35271 + atomic_read_unchecked(&tcon->num_fnext),
35272 + atomic_read_unchecked(&tcon->num_fclose));
35273 }
35274 }
35275 }
35276 diff -urNp linux-2.6.39.4/fs/cifs/cifsglob.h linux-2.6.39.4/fs/cifs/cifsglob.h
35277 --- linux-2.6.39.4/fs/cifs/cifsglob.h 2011-05-19 00:06:34.000000000 -0400
35278 +++ linux-2.6.39.4/fs/cifs/cifsglob.h 2011-08-05 19:44:37.000000000 -0400
35279 @@ -305,28 +305,28 @@ struct cifsTconInfo {
35280 __u16 Flags; /* optional support bits */
35281 enum statusEnum tidStatus;
35282 #ifdef CONFIG_CIFS_STATS
35283 - atomic_t num_smbs_sent;
35284 - atomic_t num_writes;
35285 - atomic_t num_reads;
35286 - atomic_t num_flushes;
35287 - atomic_t num_oplock_brks;
35288 - atomic_t num_opens;
35289 - atomic_t num_closes;
35290 - atomic_t num_deletes;
35291 - atomic_t num_mkdirs;
35292 - atomic_t num_posixopens;
35293 - atomic_t num_posixmkdirs;
35294 - atomic_t num_rmdirs;
35295 - atomic_t num_renames;
35296 - atomic_t num_t2renames;
35297 - atomic_t num_ffirst;
35298 - atomic_t num_fnext;
35299 - atomic_t num_fclose;
35300 - atomic_t num_hardlinks;
35301 - atomic_t num_symlinks;
35302 - atomic_t num_locks;
35303 - atomic_t num_acl_get;
35304 - atomic_t num_acl_set;
35305 + atomic_unchecked_t num_smbs_sent;
35306 + atomic_unchecked_t num_writes;
35307 + atomic_unchecked_t num_reads;
35308 + atomic_unchecked_t num_flushes;
35309 + atomic_unchecked_t num_oplock_brks;
35310 + atomic_unchecked_t num_opens;
35311 + atomic_unchecked_t num_closes;
35312 + atomic_unchecked_t num_deletes;
35313 + atomic_unchecked_t num_mkdirs;
35314 + atomic_unchecked_t num_posixopens;
35315 + atomic_unchecked_t num_posixmkdirs;
35316 + atomic_unchecked_t num_rmdirs;
35317 + atomic_unchecked_t num_renames;
35318 + atomic_unchecked_t num_t2renames;
35319 + atomic_unchecked_t num_ffirst;
35320 + atomic_unchecked_t num_fnext;
35321 + atomic_unchecked_t num_fclose;
35322 + atomic_unchecked_t num_hardlinks;
35323 + atomic_unchecked_t num_symlinks;
35324 + atomic_unchecked_t num_locks;
35325 + atomic_unchecked_t num_acl_get;
35326 + atomic_unchecked_t num_acl_set;
35327 #ifdef CONFIG_CIFS_STATS2
35328 unsigned long long time_writes;
35329 unsigned long long time_reads;
35330 @@ -509,7 +509,7 @@ static inline char CIFS_DIR_SEP(const st
35331 }
35332
35333 #ifdef CONFIG_CIFS_STATS
35334 -#define cifs_stats_inc atomic_inc
35335 +#define cifs_stats_inc atomic_inc_unchecked
35336
35337 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
35338 unsigned int bytes)
35339 diff -urNp linux-2.6.39.4/fs/cifs/link.c linux-2.6.39.4/fs/cifs/link.c
35340 --- linux-2.6.39.4/fs/cifs/link.c 2011-05-19 00:06:34.000000000 -0400
35341 +++ linux-2.6.39.4/fs/cifs/link.c 2011-08-05 19:44:37.000000000 -0400
35342 @@ -577,7 +577,7 @@ symlink_exit:
35343
35344 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
35345 {
35346 - char *p = nd_get_link(nd);
35347 + const char *p = nd_get_link(nd);
35348 if (!IS_ERR(p))
35349 kfree(p);
35350 }
35351 diff -urNp linux-2.6.39.4/fs/coda/cache.c linux-2.6.39.4/fs/coda/cache.c
35352 --- linux-2.6.39.4/fs/coda/cache.c 2011-05-19 00:06:34.000000000 -0400
35353 +++ linux-2.6.39.4/fs/coda/cache.c 2011-08-05 19:44:37.000000000 -0400
35354 @@ -24,7 +24,7 @@
35355 #include "coda_linux.h"
35356 #include "coda_cache.h"
35357
35358 -static atomic_t permission_epoch = ATOMIC_INIT(0);
35359 +static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
35360
35361 /* replace or extend an acl cache hit */
35362 void coda_cache_enter(struct inode *inode, int mask)
35363 @@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inod
35364 struct coda_inode_info *cii = ITOC(inode);
35365
35366 spin_lock(&cii->c_lock);
35367 - cii->c_cached_epoch = atomic_read(&permission_epoch);
35368 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
35369 if (cii->c_uid != current_fsuid()) {
35370 cii->c_uid = current_fsuid();
35371 cii->c_cached_perm = mask;
35372 @@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode
35373 {
35374 struct coda_inode_info *cii = ITOC(inode);
35375 spin_lock(&cii->c_lock);
35376 - cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
35377 + cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
35378 spin_unlock(&cii->c_lock);
35379 }
35380
35381 /* remove all acl caches */
35382 void coda_cache_clear_all(struct super_block *sb)
35383 {
35384 - atomic_inc(&permission_epoch);
35385 + atomic_inc_unchecked(&permission_epoch);
35386 }
35387
35388
35389 @@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode
35390 spin_lock(&cii->c_lock);
35391 hit = (mask & cii->c_cached_perm) == mask &&
35392 cii->c_uid == current_fsuid() &&
35393 - cii->c_cached_epoch == atomic_read(&permission_epoch);
35394 + cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
35395 spin_unlock(&cii->c_lock);
35396
35397 return hit;
35398 diff -urNp linux-2.6.39.4/fs/compat_binfmt_elf.c linux-2.6.39.4/fs/compat_binfmt_elf.c
35399 --- linux-2.6.39.4/fs/compat_binfmt_elf.c 2011-05-19 00:06:34.000000000 -0400
35400 +++ linux-2.6.39.4/fs/compat_binfmt_elf.c 2011-08-05 19:44:37.000000000 -0400
35401 @@ -30,11 +30,13 @@
35402 #undef elf_phdr
35403 #undef elf_shdr
35404 #undef elf_note
35405 +#undef elf_dyn
35406 #undef elf_addr_t
35407 #define elfhdr elf32_hdr
35408 #define elf_phdr elf32_phdr
35409 #define elf_shdr elf32_shdr
35410 #define elf_note elf32_note
35411 +#define elf_dyn Elf32_Dyn
35412 #define elf_addr_t Elf32_Addr
35413
35414 /*
35415 diff -urNp linux-2.6.39.4/fs/compat.c linux-2.6.39.4/fs/compat.c
35416 --- linux-2.6.39.4/fs/compat.c 2011-05-19 00:06:34.000000000 -0400
35417 +++ linux-2.6.39.4/fs/compat.c 2011-08-05 19:44:37.000000000 -0400
35418 @@ -566,7 +566,7 @@ ssize_t compat_rw_copy_check_uvector(int
35419 goto out;
35420
35421 ret = -EINVAL;
35422 - if (nr_segs > UIO_MAXIOV || nr_segs < 0)
35423 + if (nr_segs > UIO_MAXIOV)
35424 goto out;
35425 if (nr_segs > fast_segs) {
35426 ret = -ENOMEM;
35427 @@ -848,6 +848,7 @@ struct compat_old_linux_dirent {
35428
35429 struct compat_readdir_callback {
35430 struct compat_old_linux_dirent __user *dirent;
35431 + struct file * file;
35432 int result;
35433 };
35434
35435 @@ -865,6 +866,10 @@ static int compat_fillonedir(void *__buf
35436 buf->result = -EOVERFLOW;
35437 return -EOVERFLOW;
35438 }
35439 +
35440 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35441 + return 0;
35442 +
35443 buf->result++;
35444 dirent = buf->dirent;
35445 if (!access_ok(VERIFY_WRITE, dirent,
35446 @@ -897,6 +902,7 @@ asmlinkage long compat_sys_old_readdir(u
35447
35448 buf.result = 0;
35449 buf.dirent = dirent;
35450 + buf.file = file;
35451
35452 error = vfs_readdir(file, compat_fillonedir, &buf);
35453 if (buf.result)
35454 @@ -917,6 +923,7 @@ struct compat_linux_dirent {
35455 struct compat_getdents_callback {
35456 struct compat_linux_dirent __user *current_dir;
35457 struct compat_linux_dirent __user *previous;
35458 + struct file * file;
35459 int count;
35460 int error;
35461 };
35462 @@ -938,6 +945,10 @@ static int compat_filldir(void *__buf, c
35463 buf->error = -EOVERFLOW;
35464 return -EOVERFLOW;
35465 }
35466 +
35467 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35468 + return 0;
35469 +
35470 dirent = buf->previous;
35471 if (dirent) {
35472 if (__put_user(offset, &dirent->d_off))
35473 @@ -985,6 +996,7 @@ asmlinkage long compat_sys_getdents(unsi
35474 buf.previous = NULL;
35475 buf.count = count;
35476 buf.error = 0;
35477 + buf.file = file;
35478
35479 error = vfs_readdir(file, compat_filldir, &buf);
35480 if (error >= 0)
35481 @@ -1006,6 +1018,7 @@ out:
35482 struct compat_getdents_callback64 {
35483 struct linux_dirent64 __user *current_dir;
35484 struct linux_dirent64 __user *previous;
35485 + struct file * file;
35486 int count;
35487 int error;
35488 };
35489 @@ -1022,6 +1035,10 @@ static int compat_filldir64(void * __buf
35490 buf->error = -EINVAL; /* only used if we fail.. */
35491 if (reclen > buf->count)
35492 return -EINVAL;
35493 +
35494 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
35495 + return 0;
35496 +
35497 dirent = buf->previous;
35498
35499 if (dirent) {
35500 @@ -1073,6 +1090,7 @@ asmlinkage long compat_sys_getdents64(un
35501 buf.previous = NULL;
35502 buf.count = count;
35503 buf.error = 0;
35504 + buf.file = file;
35505
35506 error = vfs_readdir(file, compat_filldir64, &buf);
35507 if (error >= 0)
35508 @@ -1436,6 +1454,11 @@ int compat_do_execve(char * filename,
35509 compat_uptr_t __user *envp,
35510 struct pt_regs * regs)
35511 {
35512 +#ifdef CONFIG_GRKERNSEC
35513 + struct file *old_exec_file;
35514 + struct acl_subject_label *old_acl;
35515 + struct rlimit old_rlim[RLIM_NLIMITS];
35516 +#endif
35517 struct linux_binprm *bprm;
35518 struct file *file;
35519 struct files_struct *displaced;
35520 @@ -1472,6 +1495,19 @@ int compat_do_execve(char * filename,
35521 bprm->filename = filename;
35522 bprm->interp = filename;
35523
35524 + if (gr_process_user_ban()) {
35525 + retval = -EPERM;
35526 + goto out_file;
35527 + }
35528 +
35529 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
35530 + retval = -EAGAIN;
35531 + if (gr_handle_nproc())
35532 + goto out_file;
35533 + retval = -EACCES;
35534 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
35535 + goto out_file;
35536 +
35537 retval = bprm_mm_init(bprm);
35538 if (retval)
35539 goto out_file;
35540 @@ -1501,9 +1537,40 @@ int compat_do_execve(char * filename,
35541 if (retval < 0)
35542 goto out;
35543
35544 + if (!gr_tpe_allow(file)) {
35545 + retval = -EACCES;
35546 + goto out;
35547 + }
35548 +
35549 + if (gr_check_crash_exec(file)) {
35550 + retval = -EACCES;
35551 + goto out;
35552 + }
35553 +
35554 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
35555 +
35556 + gr_handle_exec_args_compat(bprm, argv);
35557 +
35558 +#ifdef CONFIG_GRKERNSEC
35559 + old_acl = current->acl;
35560 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
35561 + old_exec_file = current->exec_file;
35562 + get_file(file);
35563 + current->exec_file = file;
35564 +#endif
35565 +
35566 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
35567 + bprm->unsafe & LSM_UNSAFE_SHARE);
35568 + if (retval < 0)
35569 + goto out_fail;
35570 +
35571 retval = search_binary_handler(bprm, regs);
35572 if (retval < 0)
35573 - goto out;
35574 + goto out_fail;
35575 +#ifdef CONFIG_GRKERNSEC
35576 + if (old_exec_file)
35577 + fput(old_exec_file);
35578 +#endif
35579
35580 /* execve succeeded */
35581 current->fs->in_exec = 0;
35582 @@ -1514,6 +1581,14 @@ int compat_do_execve(char * filename,
35583 put_files_struct(displaced);
35584 return retval;
35585
35586 +out_fail:
35587 +#ifdef CONFIG_GRKERNSEC
35588 + current->acl = old_acl;
35589 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
35590 + fput(current->exec_file);
35591 + current->exec_file = old_exec_file;
35592 +#endif
35593 +
35594 out:
35595 if (bprm->mm) {
35596 acct_arg_size(bprm, 0);
35597 @@ -1681,6 +1756,8 @@ int compat_core_sys_select(int n, compat
35598 struct fdtable *fdt;
35599 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
35600
35601 + pax_track_stack();
35602 +
35603 if (n < 0)
35604 goto out_nofds;
35605
35606 diff -urNp linux-2.6.39.4/fs/compat_ioctl.c linux-2.6.39.4/fs/compat_ioctl.c
35607 --- linux-2.6.39.4/fs/compat_ioctl.c 2011-05-19 00:06:34.000000000 -0400
35608 +++ linux-2.6.39.4/fs/compat_ioctl.c 2011-08-05 19:44:37.000000000 -0400
35609 @@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsi
35610
35611 err = get_user(palp, &up->palette);
35612 err |= get_user(length, &up->length);
35613 + if (err)
35614 + return -EFAULT;
35615
35616 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
35617 err = put_user(compat_ptr(palp), &up_native->palette);
35618 @@ -1638,8 +1640,8 @@ asmlinkage long compat_sys_ioctl(unsigne
35619 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
35620 {
35621 unsigned int a, b;
35622 - a = *(unsigned int *)p;
35623 - b = *(unsigned int *)q;
35624 + a = *(const unsigned int *)p;
35625 + b = *(const unsigned int *)q;
35626 if (a > b)
35627 return 1;
35628 if (a < b)
35629 diff -urNp linux-2.6.39.4/fs/configfs/dir.c linux-2.6.39.4/fs/configfs/dir.c
35630 --- linux-2.6.39.4/fs/configfs/dir.c 2011-05-19 00:06:34.000000000 -0400
35631 +++ linux-2.6.39.4/fs/configfs/dir.c 2011-08-05 19:44:37.000000000 -0400
35632 @@ -1575,7 +1575,8 @@ static int configfs_readdir(struct file
35633 }
35634 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
35635 struct configfs_dirent *next;
35636 - const char * name;
35637 + const unsigned char * name;
35638 + char d_name[sizeof(next->s_dentry->d_iname)];
35639 int len;
35640 struct inode *inode = NULL;
35641
35642 @@ -1585,7 +1586,12 @@ static int configfs_readdir(struct file
35643 continue;
35644
35645 name = configfs_get_name(next);
35646 - len = strlen(name);
35647 + if (next->s_dentry && name == next->s_dentry->d_iname) {
35648 + len = next->s_dentry->d_name.len;
35649 + memcpy(d_name, name, len);
35650 + name = d_name;
35651 + } else
35652 + len = strlen(name);
35653
35654 /*
35655 * We'll have a dentry and an inode for
35656 diff -urNp linux-2.6.39.4/fs/dcache.c linux-2.6.39.4/fs/dcache.c
35657 --- linux-2.6.39.4/fs/dcache.c 2011-05-19 00:06:34.000000000 -0400
35658 +++ linux-2.6.39.4/fs/dcache.c 2011-08-05 19:44:37.000000000 -0400
35659 @@ -3069,7 +3069,7 @@ void __init vfs_caches_init(unsigned lon
35660 mempages -= reserve;
35661
35662 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
35663 - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
35664 + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
35665
35666 dcache_init();
35667 inode_init();
35668 diff -urNp linux-2.6.39.4/fs/ecryptfs/inode.c linux-2.6.39.4/fs/ecryptfs/inode.c
35669 --- linux-2.6.39.4/fs/ecryptfs/inode.c 2011-06-03 00:04:14.000000000 -0400
35670 +++ linux-2.6.39.4/fs/ecryptfs/inode.c 2011-08-05 19:44:37.000000000 -0400
35671 @@ -623,7 +623,7 @@ static int ecryptfs_readlink_lower(struc
35672 old_fs = get_fs();
35673 set_fs(get_ds());
35674 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
35675 - (char __user *)lower_buf,
35676 + (__force char __user *)lower_buf,
35677 lower_bufsiz);
35678 set_fs(old_fs);
35679 if (rc < 0)
35680 @@ -669,7 +669,7 @@ static void *ecryptfs_follow_link(struct
35681 }
35682 old_fs = get_fs();
35683 set_fs(get_ds());
35684 - rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
35685 + rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
35686 set_fs(old_fs);
35687 if (rc < 0) {
35688 kfree(buf);
35689 @@ -684,7 +684,7 @@ out:
35690 static void
35691 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
35692 {
35693 - char *buf = nd_get_link(nd);
35694 + const char *buf = nd_get_link(nd);
35695 if (!IS_ERR(buf)) {
35696 /* Free the char* */
35697 kfree(buf);
35698 diff -urNp linux-2.6.39.4/fs/ecryptfs/miscdev.c linux-2.6.39.4/fs/ecryptfs/miscdev.c
35699 --- linux-2.6.39.4/fs/ecryptfs/miscdev.c 2011-05-19 00:06:34.000000000 -0400
35700 +++ linux-2.6.39.4/fs/ecryptfs/miscdev.c 2011-08-05 19:44:37.000000000 -0400
35701 @@ -328,7 +328,7 @@ check_list:
35702 goto out_unlock_msg_ctx;
35703 i = 5;
35704 if (msg_ctx->msg) {
35705 - if (copy_to_user(&buf[i], packet_length, packet_length_size))
35706 + if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
35707 goto out_unlock_msg_ctx;
35708 i += packet_length_size;
35709 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
35710 diff -urNp linux-2.6.39.4/fs/exec.c linux-2.6.39.4/fs/exec.c
35711 --- linux-2.6.39.4/fs/exec.c 2011-06-25 12:55:23.000000000 -0400
35712 +++ linux-2.6.39.4/fs/exec.c 2011-08-05 19:44:37.000000000 -0400
35713 @@ -55,12 +55,24 @@
35714 #include <linux/fs_struct.h>
35715 #include <linux/pipe_fs_i.h>
35716 #include <linux/oom.h>
35717 +#include <linux/random.h>
35718 +#include <linux/seq_file.h>
35719 +
35720 +#ifdef CONFIG_PAX_REFCOUNT
35721 +#include <linux/kallsyms.h>
35722 +#include <linux/kdebug.h>
35723 +#endif
35724
35725 #include <asm/uaccess.h>
35726 #include <asm/mmu_context.h>
35727 #include <asm/tlb.h>
35728 #include "internal.h"
35729
35730 +#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
35731 +void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
35732 +EXPORT_SYMBOL(pax_set_initial_flags_func);
35733 +#endif
35734 +
35735 int core_uses_pid;
35736 char core_pattern[CORENAME_MAX_SIZE] = "core";
35737 unsigned int core_pipe_limit;
35738 @@ -70,7 +82,7 @@ struct core_name {
35739 char *corename;
35740 int used, size;
35741 };
35742 -static atomic_t call_count = ATOMIC_INIT(1);
35743 +static atomic_unchecked_t call_count = ATOMIC_INIT(1);
35744
35745 /* The maximal length of core_pattern is also specified in sysctl.c */
35746
35747 @@ -116,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __use
35748 char *tmp = getname(library);
35749 int error = PTR_ERR(tmp);
35750 static const struct open_flags uselib_flags = {
35751 - .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
35752 + .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
35753 .acc_mode = MAY_READ | MAY_EXEC | MAY_OPEN,
35754 .intent = LOOKUP_OPEN
35755 };
35756 @@ -190,18 +202,10 @@ struct page *get_arg_page(struct linux_b
35757 int write)
35758 {
35759 struct page *page;
35760 - int ret;
35761
35762 -#ifdef CONFIG_STACK_GROWSUP
35763 - if (write) {
35764 - ret = expand_stack_downwards(bprm->vma, pos);
35765 - if (ret < 0)
35766 - return NULL;
35767 - }
35768 -#endif
35769 - ret = get_user_pages(current, bprm->mm, pos,
35770 - 1, write, 1, &page, NULL);
35771 - if (ret <= 0)
35772 + if (0 > expand_stack_downwards(bprm->vma, pos))
35773 + return NULL;
35774 + if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
35775 return NULL;
35776
35777 if (write) {
35778 @@ -276,6 +280,11 @@ static int __bprm_mm_init(struct linux_b
35779 vma->vm_end = STACK_TOP_MAX;
35780 vma->vm_start = vma->vm_end - PAGE_SIZE;
35781 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
35782 +
35783 +#ifdef CONFIG_PAX_SEGMEXEC
35784 + vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
35785 +#endif
35786 +
35787 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
35788 INIT_LIST_HEAD(&vma->anon_vma_chain);
35789
35790 @@ -290,6 +299,12 @@ static int __bprm_mm_init(struct linux_b
35791 mm->stack_vm = mm->total_vm = 1;
35792 up_write(&mm->mmap_sem);
35793 bprm->p = vma->vm_end - sizeof(void *);
35794 +
35795 +#ifdef CONFIG_PAX_RANDUSTACK
35796 + if (randomize_va_space)
35797 + bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
35798 +#endif
35799 +
35800 return 0;
35801 err:
35802 up_write(&mm->mmap_sem);
35803 @@ -525,7 +540,7 @@ int copy_strings_kernel(int argc, const
35804 int r;
35805 mm_segment_t oldfs = get_fs();
35806 set_fs(KERNEL_DS);
35807 - r = copy_strings(argc, (const char __user *const __user *)argv, bprm);
35808 + r = copy_strings(argc, (__force const char __user *const __user *)argv, bprm);
35809 set_fs(oldfs);
35810 return r;
35811 }
35812 @@ -555,7 +570,8 @@ static int shift_arg_pages(struct vm_are
35813 unsigned long new_end = old_end - shift;
35814 struct mmu_gather *tlb;
35815
35816 - BUG_ON(new_start > new_end);
35817 + if (new_start >= new_end || new_start < mmap_min_addr)
35818 + return -ENOMEM;
35819
35820 /*
35821 * ensure there are no vmas between where we want to go
35822 @@ -564,6 +580,10 @@ static int shift_arg_pages(struct vm_are
35823 if (vma != find_vma(mm, new_start))
35824 return -EFAULT;
35825
35826 +#ifdef CONFIG_PAX_SEGMEXEC
35827 + BUG_ON(pax_find_mirror_vma(vma));
35828 +#endif
35829 +
35830 /*
35831 * cover the whole range: [new_start, old_end)
35832 */
35833 @@ -644,10 +664,6 @@ int setup_arg_pages(struct linux_binprm
35834 stack_top = arch_align_stack(stack_top);
35835 stack_top = PAGE_ALIGN(stack_top);
35836
35837 - if (unlikely(stack_top < mmap_min_addr) ||
35838 - unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
35839 - return -ENOMEM;
35840 -
35841 stack_shift = vma->vm_end - stack_top;
35842
35843 bprm->p -= stack_shift;
35844 @@ -659,8 +675,28 @@ int setup_arg_pages(struct linux_binprm
35845 bprm->exec -= stack_shift;
35846
35847 down_write(&mm->mmap_sem);
35848 +
35849 + /* Move stack pages down in memory. */
35850 + if (stack_shift) {
35851 + ret = shift_arg_pages(vma, stack_shift);
35852 + if (ret)
35853 + goto out_unlock;
35854 + }
35855 +
35856 vm_flags = VM_STACK_FLAGS;
35857
35858 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
35859 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
35860 + vm_flags &= ~VM_EXEC;
35861 +
35862 +#ifdef CONFIG_PAX_MPROTECT
35863 + if (mm->pax_flags & MF_PAX_MPROTECT)
35864 + vm_flags &= ~VM_MAYEXEC;
35865 +#endif
35866 +
35867 + }
35868 +#endif
35869 +
35870 /*
35871 * Adjust stack execute permissions; explicitly enable for
35872 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
35873 @@ -679,13 +715,6 @@ int setup_arg_pages(struct linux_binprm
35874 goto out_unlock;
35875 BUG_ON(prev != vma);
35876
35877 - /* Move stack pages down in memory. */
35878 - if (stack_shift) {
35879 - ret = shift_arg_pages(vma, stack_shift);
35880 - if (ret)
35881 - goto out_unlock;
35882 - }
35883 -
35884 /* mprotect_fixup is overkill to remove the temporary stack flags */
35885 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
35886
35887 @@ -725,7 +754,7 @@ struct file *open_exec(const char *name)
35888 struct file *file;
35889 int err;
35890 static const struct open_flags open_exec_flags = {
35891 - .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
35892 + .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC | FMODE_GREXEC,
35893 .acc_mode = MAY_EXEC | MAY_OPEN,
35894 .intent = LOOKUP_OPEN
35895 };
35896 @@ -766,7 +795,7 @@ int kernel_read(struct file *file, loff_
35897 old_fs = get_fs();
35898 set_fs(get_ds());
35899 /* The cast to a user pointer is valid due to the set_fs() */
35900 - result = vfs_read(file, (void __user *)addr, count, &pos);
35901 + result = vfs_read(file, (__force void __user *)addr, count, &pos);
35902 set_fs(old_fs);
35903 return result;
35904 }
35905 @@ -1189,7 +1218,7 @@ int check_unsafe_exec(struct linux_binpr
35906 }
35907 rcu_read_unlock();
35908
35909 - if (p->fs->users > n_fs) {
35910 + if (atomic_read(&p->fs->users) > n_fs) {
35911 bprm->unsafe |= LSM_UNSAFE_SHARE;
35912 } else {
35913 res = -EAGAIN;
35914 @@ -1381,6 +1410,11 @@ int do_execve(const char * filename,
35915 const char __user *const __user *envp,
35916 struct pt_regs * regs)
35917 {
35918 +#ifdef CONFIG_GRKERNSEC
35919 + struct file *old_exec_file;
35920 + struct acl_subject_label *old_acl;
35921 + struct rlimit old_rlim[RLIM_NLIMITS];
35922 +#endif
35923 struct linux_binprm *bprm;
35924 struct file *file;
35925 struct files_struct *displaced;
35926 @@ -1417,6 +1451,23 @@ int do_execve(const char * filename,
35927 bprm->filename = filename;
35928 bprm->interp = filename;
35929
35930 + if (gr_process_user_ban()) {
35931 + retval = -EPERM;
35932 + goto out_file;
35933 + }
35934 +
35935 + gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
35936 +
35937 + if (gr_handle_nproc()) {
35938 + retval = -EAGAIN;
35939 + goto out_file;
35940 + }
35941 +
35942 + if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
35943 + retval = -EACCES;
35944 + goto out_file;
35945 + }
35946 +
35947 retval = bprm_mm_init(bprm);
35948 if (retval)
35949 goto out_file;
35950 @@ -1446,9 +1497,40 @@ int do_execve(const char * filename,
35951 if (retval < 0)
35952 goto out;
35953
35954 + if (!gr_tpe_allow(file)) {
35955 + retval = -EACCES;
35956 + goto out;
35957 + }
35958 +
35959 + if (gr_check_crash_exec(file)) {
35960 + retval = -EACCES;
35961 + goto out;
35962 + }
35963 +
35964 + gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
35965 +
35966 + gr_handle_exec_args(bprm, argv);
35967 +
35968 +#ifdef CONFIG_GRKERNSEC
35969 + old_acl = current->acl;
35970 + memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
35971 + old_exec_file = current->exec_file;
35972 + get_file(file);
35973 + current->exec_file = file;
35974 +#endif
35975 +
35976 + retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
35977 + bprm->unsafe & LSM_UNSAFE_SHARE);
35978 + if (retval < 0)
35979 + goto out_fail;
35980 +
35981 retval = search_binary_handler(bprm,regs);
35982 if (retval < 0)
35983 - goto out;
35984 + goto out_fail;
35985 +#ifdef CONFIG_GRKERNSEC
35986 + if (old_exec_file)
35987 + fput(old_exec_file);
35988 +#endif
35989
35990 /* execve succeeded */
35991 current->fs->in_exec = 0;
35992 @@ -1459,6 +1541,14 @@ int do_execve(const char * filename,
35993 put_files_struct(displaced);
35994 return retval;
35995
35996 +out_fail:
35997 +#ifdef CONFIG_GRKERNSEC
35998 + current->acl = old_acl;
35999 + memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
36000 + fput(current->exec_file);
36001 + current->exec_file = old_exec_file;
36002 +#endif
36003 +
36004 out:
36005 if (bprm->mm) {
36006 acct_arg_size(bprm, 0);
36007 @@ -1504,7 +1594,7 @@ static int expand_corename(struct core_n
36008 {
36009 char *old_corename = cn->corename;
36010
36011 - cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
36012 + cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
36013 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
36014
36015 if (!cn->corename) {
36016 @@ -1557,7 +1647,7 @@ static int format_corename(struct core_n
36017 int pid_in_pattern = 0;
36018 int err = 0;
36019
36020 - cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
36021 + cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
36022 cn->corename = kmalloc(cn->size, GFP_KERNEL);
36023 cn->used = 0;
36024
36025 @@ -1645,6 +1735,219 @@ out:
36026 return ispipe;
36027 }
36028
36029 +int pax_check_flags(unsigned long *flags)
36030 +{
36031 + int retval = 0;
36032 +
36033 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
36034 + if (*flags & MF_PAX_SEGMEXEC)
36035 + {
36036 + *flags &= ~MF_PAX_SEGMEXEC;
36037 + retval = -EINVAL;
36038 + }
36039 +#endif
36040 +
36041 + if ((*flags & MF_PAX_PAGEEXEC)
36042 +
36043 +#ifdef CONFIG_PAX_PAGEEXEC
36044 + && (*flags & MF_PAX_SEGMEXEC)
36045 +#endif
36046 +
36047 + )
36048 + {
36049 + *flags &= ~MF_PAX_PAGEEXEC;
36050 + retval = -EINVAL;
36051 + }
36052 +
36053 + if ((*flags & MF_PAX_MPROTECT)
36054 +
36055 +#ifdef CONFIG_PAX_MPROTECT
36056 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
36057 +#endif
36058 +
36059 + )
36060 + {
36061 + *flags &= ~MF_PAX_MPROTECT;
36062 + retval = -EINVAL;
36063 + }
36064 +
36065 + if ((*flags & MF_PAX_EMUTRAMP)
36066 +
36067 +#ifdef CONFIG_PAX_EMUTRAMP
36068 + && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
36069 +#endif
36070 +
36071 + )
36072 + {
36073 + *flags &= ~MF_PAX_EMUTRAMP;
36074 + retval = -EINVAL;
36075 + }
36076 +
36077 + return retval;
36078 +}
36079 +
36080 +EXPORT_SYMBOL(pax_check_flags);
36081 +
36082 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
36083 +void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
36084 +{
36085 + struct task_struct *tsk = current;
36086 + struct mm_struct *mm = current->mm;
36087 + char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
36088 + char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
36089 + char *path_exec = NULL;
36090 + char *path_fault = NULL;
36091 + unsigned long start = 0UL, end = 0UL, offset = 0UL;
36092 +
36093 + if (buffer_exec && buffer_fault) {
36094 + struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
36095 +
36096 + down_read(&mm->mmap_sem);
36097 + vma = mm->mmap;
36098 + while (vma && (!vma_exec || !vma_fault)) {
36099 + if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
36100 + vma_exec = vma;
36101 + if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
36102 + vma_fault = vma;
36103 + vma = vma->vm_next;
36104 + }
36105 + if (vma_exec) {
36106 + path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
36107 + if (IS_ERR(path_exec))
36108 + path_exec = "<path too long>";
36109 + else {
36110 + path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
36111 + if (path_exec) {
36112 + *path_exec = 0;
36113 + path_exec = buffer_exec;
36114 + } else
36115 + path_exec = "<path too long>";
36116 + }
36117 + }
36118 + if (vma_fault) {
36119 + start = vma_fault->vm_start;
36120 + end = vma_fault->vm_end;
36121 + offset = vma_fault->vm_pgoff << PAGE_SHIFT;
36122 + if (vma_fault->vm_file) {
36123 + path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
36124 + if (IS_ERR(path_fault))
36125 + path_fault = "<path too long>";
36126 + else {
36127 + path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
36128 + if (path_fault) {
36129 + *path_fault = 0;
36130 + path_fault = buffer_fault;
36131 + } else
36132 + path_fault = "<path too long>";
36133 + }
36134 + } else
36135 + path_fault = "<anonymous mapping>";
36136 + }
36137 + up_read(&mm->mmap_sem);
36138 + }
36139 + if (tsk->signal->curr_ip)
36140 + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
36141 + else
36142 + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
36143 + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
36144 + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
36145 + task_uid(tsk), task_euid(tsk), pc, sp);
36146 + free_page((unsigned long)buffer_exec);
36147 + free_page((unsigned long)buffer_fault);
36148 + pax_report_insns(pc, sp);
36149 + do_coredump(SIGKILL, SIGKILL, regs);
36150 +}
36151 +#endif
36152 +
36153 +#ifdef CONFIG_PAX_REFCOUNT
36154 +void pax_report_refcount_overflow(struct pt_regs *regs)
36155 +{
36156 + if (current->signal->curr_ip)
36157 + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
36158 + &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
36159 + else
36160 + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
36161 + current->comm, task_pid_nr(current), current_uid(), current_euid());
36162 + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
36163 + show_regs(regs);
36164 + force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
36165 +}
36166 +#endif
36167 +
36168 +#ifdef CONFIG_PAX_USERCOPY
36169 +/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
36170 +int object_is_on_stack(const void *obj, unsigned long len)
36171 +{
36172 + const void * const stack = task_stack_page(current);
36173 + const void * const stackend = stack + THREAD_SIZE;
36174 +
36175 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
36176 + const void *frame = NULL;
36177 + const void *oldframe;
36178 +#endif
36179 +
36180 + if (obj + len < obj)
36181 + return -1;
36182 +
36183 + if (obj + len <= stack || stackend <= obj)
36184 + return 0;
36185 +
36186 + if (obj < stack || stackend < obj + len)
36187 + return -1;
36188 +
36189 +#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
36190 + oldframe = __builtin_frame_address(1);
36191 + if (oldframe)
36192 + frame = __builtin_frame_address(2);
36193 + /*
36194 + low ----------------------------------------------> high
36195 + [saved bp][saved ip][args][local vars][saved bp][saved ip]
36196 + ^----------------^
36197 + allow copies only within here
36198 + */
36199 + while (stack <= frame && frame < stackend) {
36200 + /* if obj + len extends past the last frame, this
36201 + check won't pass and the next frame will be 0,
36202 + causing us to bail out and correctly report
36203 + the copy as invalid
36204 + */
36205 + if (obj + len <= frame)
36206 + return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
36207 + oldframe = frame;
36208 + frame = *(const void * const *)frame;
36209 + }
36210 + return -1;
36211 +#else
36212 + return 1;
36213 +#endif
36214 +}
36215 +
36216 +
36217 +NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
36218 +{
36219 + if (current->signal->curr_ip)
36220 + printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
36221 + &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
36222 + else
36223 + printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
36224 + to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
36225 + dump_stack();
36226 + gr_handle_kernel_exploit();
36227 + do_group_exit(SIGKILL);
36228 +}
36229 +#endif
36230 +
36231 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
36232 +void pax_track_stack(void)
36233 +{
36234 + unsigned long sp = (unsigned long)&sp;
36235 + if (sp < current_thread_info()->lowest_stack &&
36236 + sp > (unsigned long)task_stack_page(current))
36237 + current_thread_info()->lowest_stack = sp;
36238 +}
36239 +EXPORT_SYMBOL(pax_track_stack);
36240 +#endif
36241 +
36242 static int zap_process(struct task_struct *start, int exit_code)
36243 {
36244 struct task_struct *t;
36245 @@ -1855,17 +2158,17 @@ static void wait_for_dump_helpers(struct
36246 pipe = file->f_path.dentry->d_inode->i_pipe;
36247
36248 pipe_lock(pipe);
36249 - pipe->readers++;
36250 - pipe->writers--;
36251 + atomic_inc(&pipe->readers);
36252 + atomic_dec(&pipe->writers);
36253
36254 - while ((pipe->readers > 1) && (!signal_pending(current))) {
36255 + while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
36256 wake_up_interruptible_sync(&pipe->wait);
36257 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
36258 pipe_wait(pipe);
36259 }
36260
36261 - pipe->readers--;
36262 - pipe->writers++;
36263 + atomic_dec(&pipe->readers);
36264 + atomic_inc(&pipe->writers);
36265 pipe_unlock(pipe);
36266
36267 }
36268 @@ -1926,7 +2229,7 @@ void do_coredump(long signr, int exit_co
36269 int retval = 0;
36270 int flag = 0;
36271 int ispipe;
36272 - static atomic_t core_dump_count = ATOMIC_INIT(0);
36273 + static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
36274 struct coredump_params cprm = {
36275 .signr = signr,
36276 .regs = regs,
36277 @@ -1941,6 +2244,9 @@ void do_coredump(long signr, int exit_co
36278
36279 audit_core_dumps(signr);
36280
36281 + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
36282 + gr_handle_brute_attach(current, cprm.mm_flags);
36283 +
36284 binfmt = mm->binfmt;
36285 if (!binfmt || !binfmt->core_dump)
36286 goto fail;
36287 @@ -1981,6 +2287,8 @@ void do_coredump(long signr, int exit_co
36288 goto fail_corename;
36289 }
36290
36291 + gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
36292 +
36293 if (ispipe) {
36294 int dump_count;
36295 char **helper_argv;
36296 @@ -2008,7 +2316,7 @@ void do_coredump(long signr, int exit_co
36297 }
36298 cprm.limit = RLIM_INFINITY;
36299
36300 - dump_count = atomic_inc_return(&core_dump_count);
36301 + dump_count = atomic_inc_return_unchecked(&core_dump_count);
36302 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
36303 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
36304 task_tgid_vnr(current), current->comm);
36305 @@ -2078,7 +2386,7 @@ close_fail:
36306 filp_close(cprm.file, NULL);
36307 fail_dropcount:
36308 if (ispipe)
36309 - atomic_dec(&core_dump_count);
36310 + atomic_dec_unchecked(&core_dump_count);
36311 fail_unlock:
36312 kfree(cn.corename);
36313 fail_corename:
36314 diff -urNp linux-2.6.39.4/fs/ext2/balloc.c linux-2.6.39.4/fs/ext2/balloc.c
36315 --- linux-2.6.39.4/fs/ext2/balloc.c 2011-05-19 00:06:34.000000000 -0400
36316 +++ linux-2.6.39.4/fs/ext2/balloc.c 2011-08-05 19:44:37.000000000 -0400
36317 @@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
36318
36319 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
36320 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
36321 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
36322 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
36323 sbi->s_resuid != current_fsuid() &&
36324 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
36325 return 0;
36326 diff -urNp linux-2.6.39.4/fs/ext3/balloc.c linux-2.6.39.4/fs/ext3/balloc.c
36327 --- linux-2.6.39.4/fs/ext3/balloc.c 2011-05-19 00:06:34.000000000 -0400
36328 +++ linux-2.6.39.4/fs/ext3/balloc.c 2011-08-05 19:44:37.000000000 -0400
36329 @@ -1441,7 +1441,7 @@ static int ext3_has_free_blocks(struct e
36330
36331 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
36332 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
36333 - if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
36334 + if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
36335 sbi->s_resuid != current_fsuid() &&
36336 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
36337 return 0;
36338 diff -urNp linux-2.6.39.4/fs/ext4/balloc.c linux-2.6.39.4/fs/ext4/balloc.c
36339 --- linux-2.6.39.4/fs/ext4/balloc.c 2011-05-19 00:06:34.000000000 -0400
36340 +++ linux-2.6.39.4/fs/ext4/balloc.c 2011-08-05 19:44:37.000000000 -0400
36341 @@ -522,7 +522,7 @@ static int ext4_has_free_blocks(struct e
36342 /* Hm, nope. Are (enough) root reserved blocks available? */
36343 if (sbi->s_resuid == current_fsuid() ||
36344 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
36345 - capable(CAP_SYS_RESOURCE)) {
36346 + capable_nolog(CAP_SYS_RESOURCE)) {
36347 if (free_blocks >= (nblocks + dirty_blocks))
36348 return 1;
36349 }
36350 diff -urNp linux-2.6.39.4/fs/ext4/ext4.h linux-2.6.39.4/fs/ext4/ext4.h
36351 --- linux-2.6.39.4/fs/ext4/ext4.h 2011-06-03 00:04:14.000000000 -0400
36352 +++ linux-2.6.39.4/fs/ext4/ext4.h 2011-08-05 19:44:37.000000000 -0400
36353 @@ -1166,19 +1166,19 @@ struct ext4_sb_info {
36354 unsigned long s_mb_last_start;
36355
36356 /* stats for buddy allocator */
36357 - atomic_t s_bal_reqs; /* number of reqs with len > 1 */
36358 - atomic_t s_bal_success; /* we found long enough chunks */
36359 - atomic_t s_bal_allocated; /* in blocks */
36360 - atomic_t s_bal_ex_scanned; /* total extents scanned */
36361 - atomic_t s_bal_goals; /* goal hits */
36362 - atomic_t s_bal_breaks; /* too long searches */
36363 - atomic_t s_bal_2orders; /* 2^order hits */
36364 + atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
36365 + atomic_unchecked_t s_bal_success; /* we found long enough chunks */
36366 + atomic_unchecked_t s_bal_allocated; /* in blocks */
36367 + atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
36368 + atomic_unchecked_t s_bal_goals; /* goal hits */
36369 + atomic_unchecked_t s_bal_breaks; /* too long searches */
36370 + atomic_unchecked_t s_bal_2orders; /* 2^order hits */
36371 spinlock_t s_bal_lock;
36372 unsigned long s_mb_buddies_generated;
36373 unsigned long long s_mb_generation_time;
36374 - atomic_t s_mb_lost_chunks;
36375 - atomic_t s_mb_preallocated;
36376 - atomic_t s_mb_discarded;
36377 + atomic_unchecked_t s_mb_lost_chunks;
36378 + atomic_unchecked_t s_mb_preallocated;
36379 + atomic_unchecked_t s_mb_discarded;
36380 atomic_t s_lock_busy;
36381
36382 /* locality groups */
36383 diff -urNp linux-2.6.39.4/fs/ext4/mballoc.c linux-2.6.39.4/fs/ext4/mballoc.c
36384 --- linux-2.6.39.4/fs/ext4/mballoc.c 2011-06-03 00:04:14.000000000 -0400
36385 +++ linux-2.6.39.4/fs/ext4/mballoc.c 2011-08-05 19:44:37.000000000 -0400
36386 @@ -1853,7 +1853,7 @@ void ext4_mb_simple_scan_group(struct ex
36387 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
36388
36389 if (EXT4_SB(sb)->s_mb_stats)
36390 - atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
36391 + atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
36392
36393 break;
36394 }
36395 @@ -2147,7 +2147,7 @@ repeat:
36396 ac->ac_status = AC_STATUS_CONTINUE;
36397 ac->ac_flags |= EXT4_MB_HINT_FIRST;
36398 cr = 3;
36399 - atomic_inc(&sbi->s_mb_lost_chunks);
36400 + atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
36401 goto repeat;
36402 }
36403 }
36404 @@ -2190,6 +2190,8 @@ static int ext4_mb_seq_groups_show(struc
36405 ext4_grpblk_t counters[16];
36406 } sg;
36407
36408 + pax_track_stack();
36409 +
36410 group--;
36411 if (group == 0)
36412 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
36413 @@ -2613,25 +2615,25 @@ int ext4_mb_release(struct super_block *
36414 if (sbi->s_mb_stats) {
36415 printk(KERN_INFO
36416 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
36417 - atomic_read(&sbi->s_bal_allocated),
36418 - atomic_read(&sbi->s_bal_reqs),
36419 - atomic_read(&sbi->s_bal_success));
36420 + atomic_read_unchecked(&sbi->s_bal_allocated),
36421 + atomic_read_unchecked(&sbi->s_bal_reqs),
36422 + atomic_read_unchecked(&sbi->s_bal_success));
36423 printk(KERN_INFO
36424 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
36425 "%u 2^N hits, %u breaks, %u lost\n",
36426 - atomic_read(&sbi->s_bal_ex_scanned),
36427 - atomic_read(&sbi->s_bal_goals),
36428 - atomic_read(&sbi->s_bal_2orders),
36429 - atomic_read(&sbi->s_bal_breaks),
36430 - atomic_read(&sbi->s_mb_lost_chunks));
36431 + atomic_read_unchecked(&sbi->s_bal_ex_scanned),
36432 + atomic_read_unchecked(&sbi->s_bal_goals),
36433 + atomic_read_unchecked(&sbi->s_bal_2orders),
36434 + atomic_read_unchecked(&sbi->s_bal_breaks),
36435 + atomic_read_unchecked(&sbi->s_mb_lost_chunks));
36436 printk(KERN_INFO
36437 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
36438 sbi->s_mb_buddies_generated++,
36439 sbi->s_mb_generation_time);
36440 printk(KERN_INFO
36441 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
36442 - atomic_read(&sbi->s_mb_preallocated),
36443 - atomic_read(&sbi->s_mb_discarded));
36444 + atomic_read_unchecked(&sbi->s_mb_preallocated),
36445 + atomic_read_unchecked(&sbi->s_mb_discarded));
36446 }
36447
36448 free_percpu(sbi->s_locality_groups);
36449 @@ -3107,16 +3109,16 @@ static void ext4_mb_collect_stats(struct
36450 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
36451
36452 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
36453 - atomic_inc(&sbi->s_bal_reqs);
36454 - atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
36455 + atomic_inc_unchecked(&sbi->s_bal_reqs);
36456 + atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
36457 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
36458 - atomic_inc(&sbi->s_bal_success);
36459 - atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
36460 + atomic_inc_unchecked(&sbi->s_bal_success);
36461 + atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
36462 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
36463 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
36464 - atomic_inc(&sbi->s_bal_goals);
36465 + atomic_inc_unchecked(&sbi->s_bal_goals);
36466 if (ac->ac_found > sbi->s_mb_max_to_scan)
36467 - atomic_inc(&sbi->s_bal_breaks);
36468 + atomic_inc_unchecked(&sbi->s_bal_breaks);
36469 }
36470
36471 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
36472 @@ -3514,7 +3516,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
36473 trace_ext4_mb_new_inode_pa(ac, pa);
36474
36475 ext4_mb_use_inode_pa(ac, pa);
36476 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36477 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36478
36479 ei = EXT4_I(ac->ac_inode);
36480 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
36481 @@ -3574,7 +3576,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
36482 trace_ext4_mb_new_group_pa(ac, pa);
36483
36484 ext4_mb_use_group_pa(ac, pa);
36485 - atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36486 + atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
36487
36488 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
36489 lg = ac->ac_lg;
36490 @@ -3661,7 +3663,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
36491 * from the bitmap and continue.
36492 */
36493 }
36494 - atomic_add(free, &sbi->s_mb_discarded);
36495 + atomic_add_unchecked(free, &sbi->s_mb_discarded);
36496
36497 return err;
36498 }
36499 @@ -3679,7 +3681,7 @@ ext4_mb_release_group_pa(struct ext4_bud
36500 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
36501 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
36502 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
36503 - atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
36504 + atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
36505 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
36506
36507 return 0;
36508 diff -urNp linux-2.6.39.4/fs/fcntl.c linux-2.6.39.4/fs/fcntl.c
36509 --- linux-2.6.39.4/fs/fcntl.c 2011-05-19 00:06:34.000000000 -0400
36510 +++ linux-2.6.39.4/fs/fcntl.c 2011-08-05 19:44:37.000000000 -0400
36511 @@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct
36512 if (err)
36513 return err;
36514
36515 + if (gr_handle_chroot_fowner(pid, type))
36516 + return -ENOENT;
36517 + if (gr_check_protected_task_fowner(pid, type))
36518 + return -EACCES;
36519 +
36520 f_modown(filp, pid, type, force);
36521 return 0;
36522 }
36523 @@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in
36524 switch (cmd) {
36525 case F_DUPFD:
36526 case F_DUPFD_CLOEXEC:
36527 + gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
36528 if (arg >= rlimit(RLIMIT_NOFILE))
36529 break;
36530 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
36531 @@ -835,14 +841,14 @@ static int __init fcntl_init(void)
36532 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
36533 * is defined as O_NONBLOCK on some platforms and not on others.
36534 */
36535 - BUILD_BUG_ON(19 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
36536 + BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
36537 O_RDONLY | O_WRONLY | O_RDWR |
36538 O_CREAT | O_EXCL | O_NOCTTY |
36539 O_TRUNC | O_APPEND | /* O_NONBLOCK | */
36540 __O_SYNC | O_DSYNC | FASYNC |
36541 O_DIRECT | O_LARGEFILE | O_DIRECTORY |
36542 O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
36543 - __FMODE_EXEC | O_PATH
36544 + __FMODE_EXEC | O_PATH | FMODE_GREXEC
36545 ));
36546
36547 fasync_cache = kmem_cache_create("fasync_cache",
36548 diff -urNp linux-2.6.39.4/fs/fifo.c linux-2.6.39.4/fs/fifo.c
36549 --- linux-2.6.39.4/fs/fifo.c 2011-05-19 00:06:34.000000000 -0400
36550 +++ linux-2.6.39.4/fs/fifo.c 2011-08-05 19:44:37.000000000 -0400
36551 @@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode
36552 */
36553 filp->f_op = &read_pipefifo_fops;
36554 pipe->r_counter++;
36555 - if (pipe->readers++ == 0)
36556 + if (atomic_inc_return(&pipe->readers) == 1)
36557 wake_up_partner(inode);
36558
36559 - if (!pipe->writers) {
36560 + if (!atomic_read(&pipe->writers)) {
36561 if ((filp->f_flags & O_NONBLOCK)) {
36562 /* suppress POLLHUP until we have
36563 * seen a writer */
36564 @@ -81,15 +81,15 @@ static int fifo_open(struct inode *inode
36565 * errno=ENXIO when there is no process reading the FIFO.
36566 */
36567 ret = -ENXIO;
36568 - if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
36569 + if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
36570 goto err;
36571
36572 filp->f_op = &write_pipefifo_fops;
36573 pipe->w_counter++;
36574 - if (!pipe->writers++)
36575 + if (atomic_inc_return(&pipe->writers) == 1)
36576 wake_up_partner(inode);
36577
36578 - if (!pipe->readers) {
36579 + if (!atomic_read(&pipe->readers)) {
36580 wait_for_partner(inode, &pipe->r_counter);
36581 if (signal_pending(current))
36582 goto err_wr;
36583 @@ -105,11 +105,11 @@ static int fifo_open(struct inode *inode
36584 */
36585 filp->f_op = &rdwr_pipefifo_fops;
36586
36587 - pipe->readers++;
36588 - pipe->writers++;
36589 + atomic_inc(&pipe->readers);
36590 + atomic_inc(&pipe->writers);
36591 pipe->r_counter++;
36592 pipe->w_counter++;
36593 - if (pipe->readers == 1 || pipe->writers == 1)
36594 + if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
36595 wake_up_partner(inode);
36596 break;
36597
36598 @@ -123,19 +123,19 @@ static int fifo_open(struct inode *inode
36599 return 0;
36600
36601 err_rd:
36602 - if (!--pipe->readers)
36603 + if (atomic_dec_and_test(&pipe->readers))
36604 wake_up_interruptible(&pipe->wait);
36605 ret = -ERESTARTSYS;
36606 goto err;
36607
36608 err_wr:
36609 - if (!--pipe->writers)
36610 + if (atomic_dec_and_test(&pipe->writers))
36611 wake_up_interruptible(&pipe->wait);
36612 ret = -ERESTARTSYS;
36613 goto err;
36614
36615 err:
36616 - if (!pipe->readers && !pipe->writers)
36617 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
36618 free_pipe_info(inode);
36619
36620 err_nocleanup:
36621 diff -urNp linux-2.6.39.4/fs/file.c linux-2.6.39.4/fs/file.c
36622 --- linux-2.6.39.4/fs/file.c 2011-05-19 00:06:34.000000000 -0400
36623 +++ linux-2.6.39.4/fs/file.c 2011-08-05 19:44:37.000000000 -0400
36624 @@ -15,6 +15,7 @@
36625 #include <linux/slab.h>
36626 #include <linux/vmalloc.h>
36627 #include <linux/file.h>
36628 +#include <linux/security.h>
36629 #include <linux/fdtable.h>
36630 #include <linux/bitops.h>
36631 #include <linux/interrupt.h>
36632 @@ -254,6 +255,7 @@ int expand_files(struct files_struct *fi
36633 * N.B. For clone tasks sharing a files structure, this test
36634 * will limit the total number of files that can be opened.
36635 */
36636 + gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
36637 if (nr >= rlimit(RLIMIT_NOFILE))
36638 return -EMFILE;
36639
36640 diff -urNp linux-2.6.39.4/fs/filesystems.c linux-2.6.39.4/fs/filesystems.c
36641 --- linux-2.6.39.4/fs/filesystems.c 2011-05-19 00:06:34.000000000 -0400
36642 +++ linux-2.6.39.4/fs/filesystems.c 2011-08-05 19:44:37.000000000 -0400
36643 @@ -274,7 +274,12 @@ struct file_system_type *get_fs_type(con
36644 int len = dot ? dot - name : strlen(name);
36645
36646 fs = __get_fs_type(name, len);
36647 +
36648 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
36649 + if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
36650 +#else
36651 if (!fs && (request_module("%.*s", len, name) == 0))
36652 +#endif
36653 fs = __get_fs_type(name, len);
36654
36655 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
36656 diff -urNp linux-2.6.39.4/fs/fscache/cookie.c linux-2.6.39.4/fs/fscache/cookie.c
36657 --- linux-2.6.39.4/fs/fscache/cookie.c 2011-05-19 00:06:34.000000000 -0400
36658 +++ linux-2.6.39.4/fs/fscache/cookie.c 2011-08-05 19:44:37.000000000 -0400
36659 @@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
36660 parent ? (char *) parent->def->name : "<no-parent>",
36661 def->name, netfs_data);
36662
36663 - fscache_stat(&fscache_n_acquires);
36664 + fscache_stat_unchecked(&fscache_n_acquires);
36665
36666 /* if there's no parent cookie, then we don't create one here either */
36667 if (!parent) {
36668 - fscache_stat(&fscache_n_acquires_null);
36669 + fscache_stat_unchecked(&fscache_n_acquires_null);
36670 _leave(" [no parent]");
36671 return NULL;
36672 }
36673 @@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
36674 /* allocate and initialise a cookie */
36675 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
36676 if (!cookie) {
36677 - fscache_stat(&fscache_n_acquires_oom);
36678 + fscache_stat_unchecked(&fscache_n_acquires_oom);
36679 _leave(" [ENOMEM]");
36680 return NULL;
36681 }
36682 @@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
36683
36684 switch (cookie->def->type) {
36685 case FSCACHE_COOKIE_TYPE_INDEX:
36686 - fscache_stat(&fscache_n_cookie_index);
36687 + fscache_stat_unchecked(&fscache_n_cookie_index);
36688 break;
36689 case FSCACHE_COOKIE_TYPE_DATAFILE:
36690 - fscache_stat(&fscache_n_cookie_data);
36691 + fscache_stat_unchecked(&fscache_n_cookie_data);
36692 break;
36693 default:
36694 - fscache_stat(&fscache_n_cookie_special);
36695 + fscache_stat_unchecked(&fscache_n_cookie_special);
36696 break;
36697 }
36698
36699 @@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
36700 if (fscache_acquire_non_index_cookie(cookie) < 0) {
36701 atomic_dec(&parent->n_children);
36702 __fscache_cookie_put(cookie);
36703 - fscache_stat(&fscache_n_acquires_nobufs);
36704 + fscache_stat_unchecked(&fscache_n_acquires_nobufs);
36705 _leave(" = NULL");
36706 return NULL;
36707 }
36708 }
36709
36710 - fscache_stat(&fscache_n_acquires_ok);
36711 + fscache_stat_unchecked(&fscache_n_acquires_ok);
36712 _leave(" = %p", cookie);
36713 return cookie;
36714 }
36715 @@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
36716 cache = fscache_select_cache_for_object(cookie->parent);
36717 if (!cache) {
36718 up_read(&fscache_addremove_sem);
36719 - fscache_stat(&fscache_n_acquires_no_cache);
36720 + fscache_stat_unchecked(&fscache_n_acquires_no_cache);
36721 _leave(" = -ENOMEDIUM [no cache]");
36722 return -ENOMEDIUM;
36723 }
36724 @@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
36725 object = cache->ops->alloc_object(cache, cookie);
36726 fscache_stat_d(&fscache_n_cop_alloc_object);
36727 if (IS_ERR(object)) {
36728 - fscache_stat(&fscache_n_object_no_alloc);
36729 + fscache_stat_unchecked(&fscache_n_object_no_alloc);
36730 ret = PTR_ERR(object);
36731 goto error;
36732 }
36733
36734 - fscache_stat(&fscache_n_object_alloc);
36735 + fscache_stat_unchecked(&fscache_n_object_alloc);
36736
36737 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
36738
36739 @@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
36740 struct fscache_object *object;
36741 struct hlist_node *_p;
36742
36743 - fscache_stat(&fscache_n_updates);
36744 + fscache_stat_unchecked(&fscache_n_updates);
36745
36746 if (!cookie) {
36747 - fscache_stat(&fscache_n_updates_null);
36748 + fscache_stat_unchecked(&fscache_n_updates_null);
36749 _leave(" [no cookie]");
36750 return;
36751 }
36752 @@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
36753 struct fscache_object *object;
36754 unsigned long event;
36755
36756 - fscache_stat(&fscache_n_relinquishes);
36757 + fscache_stat_unchecked(&fscache_n_relinquishes);
36758 if (retire)
36759 - fscache_stat(&fscache_n_relinquishes_retire);
36760 + fscache_stat_unchecked(&fscache_n_relinquishes_retire);
36761
36762 if (!cookie) {
36763 - fscache_stat(&fscache_n_relinquishes_null);
36764 + fscache_stat_unchecked(&fscache_n_relinquishes_null);
36765 _leave(" [no cookie]");
36766 return;
36767 }
36768 @@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
36769
36770 /* wait for the cookie to finish being instantiated (or to fail) */
36771 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
36772 - fscache_stat(&fscache_n_relinquishes_waitcrt);
36773 + fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
36774 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
36775 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
36776 }
36777 diff -urNp linux-2.6.39.4/fs/fscache/internal.h linux-2.6.39.4/fs/fscache/internal.h
36778 --- linux-2.6.39.4/fs/fscache/internal.h 2011-05-19 00:06:34.000000000 -0400
36779 +++ linux-2.6.39.4/fs/fscache/internal.h 2011-08-05 19:44:37.000000000 -0400
36780 @@ -144,94 +144,94 @@ extern void fscache_proc_cleanup(void);
36781 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
36782 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
36783
36784 -extern atomic_t fscache_n_op_pend;
36785 -extern atomic_t fscache_n_op_run;
36786 -extern atomic_t fscache_n_op_enqueue;
36787 -extern atomic_t fscache_n_op_deferred_release;
36788 -extern atomic_t fscache_n_op_release;
36789 -extern atomic_t fscache_n_op_gc;
36790 -extern atomic_t fscache_n_op_cancelled;
36791 -extern atomic_t fscache_n_op_rejected;
36792 -
36793 -extern atomic_t fscache_n_attr_changed;
36794 -extern atomic_t fscache_n_attr_changed_ok;
36795 -extern atomic_t fscache_n_attr_changed_nobufs;
36796 -extern atomic_t fscache_n_attr_changed_nomem;
36797 -extern atomic_t fscache_n_attr_changed_calls;
36798 -
36799 -extern atomic_t fscache_n_allocs;
36800 -extern atomic_t fscache_n_allocs_ok;
36801 -extern atomic_t fscache_n_allocs_wait;
36802 -extern atomic_t fscache_n_allocs_nobufs;
36803 -extern atomic_t fscache_n_allocs_intr;
36804 -extern atomic_t fscache_n_allocs_object_dead;
36805 -extern atomic_t fscache_n_alloc_ops;
36806 -extern atomic_t fscache_n_alloc_op_waits;
36807 -
36808 -extern atomic_t fscache_n_retrievals;
36809 -extern atomic_t fscache_n_retrievals_ok;
36810 -extern atomic_t fscache_n_retrievals_wait;
36811 -extern atomic_t fscache_n_retrievals_nodata;
36812 -extern atomic_t fscache_n_retrievals_nobufs;
36813 -extern atomic_t fscache_n_retrievals_intr;
36814 -extern atomic_t fscache_n_retrievals_nomem;
36815 -extern atomic_t fscache_n_retrievals_object_dead;
36816 -extern atomic_t fscache_n_retrieval_ops;
36817 -extern atomic_t fscache_n_retrieval_op_waits;
36818 -
36819 -extern atomic_t fscache_n_stores;
36820 -extern atomic_t fscache_n_stores_ok;
36821 -extern atomic_t fscache_n_stores_again;
36822 -extern atomic_t fscache_n_stores_nobufs;
36823 -extern atomic_t fscache_n_stores_oom;
36824 -extern atomic_t fscache_n_store_ops;
36825 -extern atomic_t fscache_n_store_calls;
36826 -extern atomic_t fscache_n_store_pages;
36827 -extern atomic_t fscache_n_store_radix_deletes;
36828 -extern atomic_t fscache_n_store_pages_over_limit;
36829 -
36830 -extern atomic_t fscache_n_store_vmscan_not_storing;
36831 -extern atomic_t fscache_n_store_vmscan_gone;
36832 -extern atomic_t fscache_n_store_vmscan_busy;
36833 -extern atomic_t fscache_n_store_vmscan_cancelled;
36834 -
36835 -extern atomic_t fscache_n_marks;
36836 -extern atomic_t fscache_n_uncaches;
36837 -
36838 -extern atomic_t fscache_n_acquires;
36839 -extern atomic_t fscache_n_acquires_null;
36840 -extern atomic_t fscache_n_acquires_no_cache;
36841 -extern atomic_t fscache_n_acquires_ok;
36842 -extern atomic_t fscache_n_acquires_nobufs;
36843 -extern atomic_t fscache_n_acquires_oom;
36844 -
36845 -extern atomic_t fscache_n_updates;
36846 -extern atomic_t fscache_n_updates_null;
36847 -extern atomic_t fscache_n_updates_run;
36848 -
36849 -extern atomic_t fscache_n_relinquishes;
36850 -extern atomic_t fscache_n_relinquishes_null;
36851 -extern atomic_t fscache_n_relinquishes_waitcrt;
36852 -extern atomic_t fscache_n_relinquishes_retire;
36853 -
36854 -extern atomic_t fscache_n_cookie_index;
36855 -extern atomic_t fscache_n_cookie_data;
36856 -extern atomic_t fscache_n_cookie_special;
36857 -
36858 -extern atomic_t fscache_n_object_alloc;
36859 -extern atomic_t fscache_n_object_no_alloc;
36860 -extern atomic_t fscache_n_object_lookups;
36861 -extern atomic_t fscache_n_object_lookups_negative;
36862 -extern atomic_t fscache_n_object_lookups_positive;
36863 -extern atomic_t fscache_n_object_lookups_timed_out;
36864 -extern atomic_t fscache_n_object_created;
36865 -extern atomic_t fscache_n_object_avail;
36866 -extern atomic_t fscache_n_object_dead;
36867 -
36868 -extern atomic_t fscache_n_checkaux_none;
36869 -extern atomic_t fscache_n_checkaux_okay;
36870 -extern atomic_t fscache_n_checkaux_update;
36871 -extern atomic_t fscache_n_checkaux_obsolete;
36872 +extern atomic_unchecked_t fscache_n_op_pend;
36873 +extern atomic_unchecked_t fscache_n_op_run;
36874 +extern atomic_unchecked_t fscache_n_op_enqueue;
36875 +extern atomic_unchecked_t fscache_n_op_deferred_release;
36876 +extern atomic_unchecked_t fscache_n_op_release;
36877 +extern atomic_unchecked_t fscache_n_op_gc;
36878 +extern atomic_unchecked_t fscache_n_op_cancelled;
36879 +extern atomic_unchecked_t fscache_n_op_rejected;
36880 +
36881 +extern atomic_unchecked_t fscache_n_attr_changed;
36882 +extern atomic_unchecked_t fscache_n_attr_changed_ok;
36883 +extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
36884 +extern atomic_unchecked_t fscache_n_attr_changed_nomem;
36885 +extern atomic_unchecked_t fscache_n_attr_changed_calls;
36886 +
36887 +extern atomic_unchecked_t fscache_n_allocs;
36888 +extern atomic_unchecked_t fscache_n_allocs_ok;
36889 +extern atomic_unchecked_t fscache_n_allocs_wait;
36890 +extern atomic_unchecked_t fscache_n_allocs_nobufs;
36891 +extern atomic_unchecked_t fscache_n_allocs_intr;
36892 +extern atomic_unchecked_t fscache_n_allocs_object_dead;
36893 +extern atomic_unchecked_t fscache_n_alloc_ops;
36894 +extern atomic_unchecked_t fscache_n_alloc_op_waits;
36895 +
36896 +extern atomic_unchecked_t fscache_n_retrievals;
36897 +extern atomic_unchecked_t fscache_n_retrievals_ok;
36898 +extern atomic_unchecked_t fscache_n_retrievals_wait;
36899 +extern atomic_unchecked_t fscache_n_retrievals_nodata;
36900 +extern atomic_unchecked_t fscache_n_retrievals_nobufs;
36901 +extern atomic_unchecked_t fscache_n_retrievals_intr;
36902 +extern atomic_unchecked_t fscache_n_retrievals_nomem;
36903 +extern atomic_unchecked_t fscache_n_retrievals_object_dead;
36904 +extern atomic_unchecked_t fscache_n_retrieval_ops;
36905 +extern atomic_unchecked_t fscache_n_retrieval_op_waits;
36906 +
36907 +extern atomic_unchecked_t fscache_n_stores;
36908 +extern atomic_unchecked_t fscache_n_stores_ok;
36909 +extern atomic_unchecked_t fscache_n_stores_again;
36910 +extern atomic_unchecked_t fscache_n_stores_nobufs;
36911 +extern atomic_unchecked_t fscache_n_stores_oom;
36912 +extern atomic_unchecked_t fscache_n_store_ops;
36913 +extern atomic_unchecked_t fscache_n_store_calls;
36914 +extern atomic_unchecked_t fscache_n_store_pages;
36915 +extern atomic_unchecked_t fscache_n_store_radix_deletes;
36916 +extern atomic_unchecked_t fscache_n_store_pages_over_limit;
36917 +
36918 +extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
36919 +extern atomic_unchecked_t fscache_n_store_vmscan_gone;
36920 +extern atomic_unchecked_t fscache_n_store_vmscan_busy;
36921 +extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
36922 +
36923 +extern atomic_unchecked_t fscache_n_marks;
36924 +extern atomic_unchecked_t fscache_n_uncaches;
36925 +
36926 +extern atomic_unchecked_t fscache_n_acquires;
36927 +extern atomic_unchecked_t fscache_n_acquires_null;
36928 +extern atomic_unchecked_t fscache_n_acquires_no_cache;
36929 +extern atomic_unchecked_t fscache_n_acquires_ok;
36930 +extern atomic_unchecked_t fscache_n_acquires_nobufs;
36931 +extern atomic_unchecked_t fscache_n_acquires_oom;
36932 +
36933 +extern atomic_unchecked_t fscache_n_updates;
36934 +extern atomic_unchecked_t fscache_n_updates_null;
36935 +extern atomic_unchecked_t fscache_n_updates_run;
36936 +
36937 +extern atomic_unchecked_t fscache_n_relinquishes;
36938 +extern atomic_unchecked_t fscache_n_relinquishes_null;
36939 +extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
36940 +extern atomic_unchecked_t fscache_n_relinquishes_retire;
36941 +
36942 +extern atomic_unchecked_t fscache_n_cookie_index;
36943 +extern atomic_unchecked_t fscache_n_cookie_data;
36944 +extern atomic_unchecked_t fscache_n_cookie_special;
36945 +
36946 +extern atomic_unchecked_t fscache_n_object_alloc;
36947 +extern atomic_unchecked_t fscache_n_object_no_alloc;
36948 +extern atomic_unchecked_t fscache_n_object_lookups;
36949 +extern atomic_unchecked_t fscache_n_object_lookups_negative;
36950 +extern atomic_unchecked_t fscache_n_object_lookups_positive;
36951 +extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
36952 +extern atomic_unchecked_t fscache_n_object_created;
36953 +extern atomic_unchecked_t fscache_n_object_avail;
36954 +extern atomic_unchecked_t fscache_n_object_dead;
36955 +
36956 +extern atomic_unchecked_t fscache_n_checkaux_none;
36957 +extern atomic_unchecked_t fscache_n_checkaux_okay;
36958 +extern atomic_unchecked_t fscache_n_checkaux_update;
36959 +extern atomic_unchecked_t fscache_n_checkaux_obsolete;
36960
36961 extern atomic_t fscache_n_cop_alloc_object;
36962 extern atomic_t fscache_n_cop_lookup_object;
36963 @@ -255,6 +255,11 @@ static inline void fscache_stat(atomic_t
36964 atomic_inc(stat);
36965 }
36966
36967 +static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
36968 +{
36969 + atomic_inc_unchecked(stat);
36970 +}
36971 +
36972 static inline void fscache_stat_d(atomic_t *stat)
36973 {
36974 atomic_dec(stat);
36975 @@ -267,6 +272,7 @@ extern const struct file_operations fsca
36976
36977 #define __fscache_stat(stat) (NULL)
36978 #define fscache_stat(stat) do {} while (0)
36979 +#define fscache_stat_unchecked(stat) do {} while (0)
36980 #define fscache_stat_d(stat) do {} while (0)
36981 #endif
36982
36983 diff -urNp linux-2.6.39.4/fs/fscache/object.c linux-2.6.39.4/fs/fscache/object.c
36984 --- linux-2.6.39.4/fs/fscache/object.c 2011-05-19 00:06:34.000000000 -0400
36985 +++ linux-2.6.39.4/fs/fscache/object.c 2011-08-05 19:44:37.000000000 -0400
36986 @@ -128,7 +128,7 @@ static void fscache_object_state_machine
36987 /* update the object metadata on disk */
36988 case FSCACHE_OBJECT_UPDATING:
36989 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
36990 - fscache_stat(&fscache_n_updates_run);
36991 + fscache_stat_unchecked(&fscache_n_updates_run);
36992 fscache_stat(&fscache_n_cop_update_object);
36993 object->cache->ops->update_object(object);
36994 fscache_stat_d(&fscache_n_cop_update_object);
36995 @@ -217,7 +217,7 @@ static void fscache_object_state_machine
36996 spin_lock(&object->lock);
36997 object->state = FSCACHE_OBJECT_DEAD;
36998 spin_unlock(&object->lock);
36999 - fscache_stat(&fscache_n_object_dead);
37000 + fscache_stat_unchecked(&fscache_n_object_dead);
37001 goto terminal_transit;
37002
37003 /* handle the parent cache of this object being withdrawn from
37004 @@ -232,7 +232,7 @@ static void fscache_object_state_machine
37005 spin_lock(&object->lock);
37006 object->state = FSCACHE_OBJECT_DEAD;
37007 spin_unlock(&object->lock);
37008 - fscache_stat(&fscache_n_object_dead);
37009 + fscache_stat_unchecked(&fscache_n_object_dead);
37010 goto terminal_transit;
37011
37012 /* complain about the object being woken up once it is
37013 @@ -461,7 +461,7 @@ static void fscache_lookup_object(struct
37014 parent->cookie->def->name, cookie->def->name,
37015 object->cache->tag->name);
37016
37017 - fscache_stat(&fscache_n_object_lookups);
37018 + fscache_stat_unchecked(&fscache_n_object_lookups);
37019 fscache_stat(&fscache_n_cop_lookup_object);
37020 ret = object->cache->ops->lookup_object(object);
37021 fscache_stat_d(&fscache_n_cop_lookup_object);
37022 @@ -472,7 +472,7 @@ static void fscache_lookup_object(struct
37023 if (ret == -ETIMEDOUT) {
37024 /* probably stuck behind another object, so move this one to
37025 * the back of the queue */
37026 - fscache_stat(&fscache_n_object_lookups_timed_out);
37027 + fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
37028 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
37029 }
37030
37031 @@ -495,7 +495,7 @@ void fscache_object_lookup_negative(stru
37032
37033 spin_lock(&object->lock);
37034 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
37035 - fscache_stat(&fscache_n_object_lookups_negative);
37036 + fscache_stat_unchecked(&fscache_n_object_lookups_negative);
37037
37038 /* transit here to allow write requests to begin stacking up
37039 * and read requests to begin returning ENODATA */
37040 @@ -541,7 +541,7 @@ void fscache_obtained_object(struct fsca
37041 * result, in which case there may be data available */
37042 spin_lock(&object->lock);
37043 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
37044 - fscache_stat(&fscache_n_object_lookups_positive);
37045 + fscache_stat_unchecked(&fscache_n_object_lookups_positive);
37046
37047 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
37048
37049 @@ -555,7 +555,7 @@ void fscache_obtained_object(struct fsca
37050 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
37051 } else {
37052 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
37053 - fscache_stat(&fscache_n_object_created);
37054 + fscache_stat_unchecked(&fscache_n_object_created);
37055
37056 object->state = FSCACHE_OBJECT_AVAILABLE;
37057 spin_unlock(&object->lock);
37058 @@ -602,7 +602,7 @@ static void fscache_object_available(str
37059 fscache_enqueue_dependents(object);
37060
37061 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
37062 - fscache_stat(&fscache_n_object_avail);
37063 + fscache_stat_unchecked(&fscache_n_object_avail);
37064
37065 _leave("");
37066 }
37067 @@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
37068 enum fscache_checkaux result;
37069
37070 if (!object->cookie->def->check_aux) {
37071 - fscache_stat(&fscache_n_checkaux_none);
37072 + fscache_stat_unchecked(&fscache_n_checkaux_none);
37073 return FSCACHE_CHECKAUX_OKAY;
37074 }
37075
37076 @@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
37077 switch (result) {
37078 /* entry okay as is */
37079 case FSCACHE_CHECKAUX_OKAY:
37080 - fscache_stat(&fscache_n_checkaux_okay);
37081 + fscache_stat_unchecked(&fscache_n_checkaux_okay);
37082 break;
37083
37084 /* entry requires update */
37085 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
37086 - fscache_stat(&fscache_n_checkaux_update);
37087 + fscache_stat_unchecked(&fscache_n_checkaux_update);
37088 break;
37089
37090 /* entry requires deletion */
37091 case FSCACHE_CHECKAUX_OBSOLETE:
37092 - fscache_stat(&fscache_n_checkaux_obsolete);
37093 + fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
37094 break;
37095
37096 default:
37097 diff -urNp linux-2.6.39.4/fs/fscache/operation.c linux-2.6.39.4/fs/fscache/operation.c
37098 --- linux-2.6.39.4/fs/fscache/operation.c 2011-05-19 00:06:34.000000000 -0400
37099 +++ linux-2.6.39.4/fs/fscache/operation.c 2011-08-05 19:44:37.000000000 -0400
37100 @@ -17,7 +17,7 @@
37101 #include <linux/slab.h>
37102 #include "internal.h"
37103
37104 -atomic_t fscache_op_debug_id;
37105 +atomic_unchecked_t fscache_op_debug_id;
37106 EXPORT_SYMBOL(fscache_op_debug_id);
37107
37108 /**
37109 @@ -40,7 +40,7 @@ void fscache_enqueue_operation(struct fs
37110 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
37111 ASSERTCMP(atomic_read(&op->usage), >, 0);
37112
37113 - fscache_stat(&fscache_n_op_enqueue);
37114 + fscache_stat_unchecked(&fscache_n_op_enqueue);
37115 switch (op->flags & FSCACHE_OP_TYPE) {
37116 case FSCACHE_OP_ASYNC:
37117 _debug("queue async");
37118 @@ -73,7 +73,7 @@ static void fscache_run_op(struct fscach
37119 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
37120 if (op->processor)
37121 fscache_enqueue_operation(op);
37122 - fscache_stat(&fscache_n_op_run);
37123 + fscache_stat_unchecked(&fscache_n_op_run);
37124 }
37125
37126 /*
37127 @@ -104,11 +104,11 @@ int fscache_submit_exclusive_op(struct f
37128 if (object->n_ops > 1) {
37129 atomic_inc(&op->usage);
37130 list_add_tail(&op->pend_link, &object->pending_ops);
37131 - fscache_stat(&fscache_n_op_pend);
37132 + fscache_stat_unchecked(&fscache_n_op_pend);
37133 } else if (!list_empty(&object->pending_ops)) {
37134 atomic_inc(&op->usage);
37135 list_add_tail(&op->pend_link, &object->pending_ops);
37136 - fscache_stat(&fscache_n_op_pend);
37137 + fscache_stat_unchecked(&fscache_n_op_pend);
37138 fscache_start_operations(object);
37139 } else {
37140 ASSERTCMP(object->n_in_progress, ==, 0);
37141 @@ -124,7 +124,7 @@ int fscache_submit_exclusive_op(struct f
37142 object->n_exclusive++; /* reads and writes must wait */
37143 atomic_inc(&op->usage);
37144 list_add_tail(&op->pend_link, &object->pending_ops);
37145 - fscache_stat(&fscache_n_op_pend);
37146 + fscache_stat_unchecked(&fscache_n_op_pend);
37147 ret = 0;
37148 } else {
37149 /* not allowed to submit ops in any other state */
37150 @@ -211,11 +211,11 @@ int fscache_submit_op(struct fscache_obj
37151 if (object->n_exclusive > 0) {
37152 atomic_inc(&op->usage);
37153 list_add_tail(&op->pend_link, &object->pending_ops);
37154 - fscache_stat(&fscache_n_op_pend);
37155 + fscache_stat_unchecked(&fscache_n_op_pend);
37156 } else if (!list_empty(&object->pending_ops)) {
37157 atomic_inc(&op->usage);
37158 list_add_tail(&op->pend_link, &object->pending_ops);
37159 - fscache_stat(&fscache_n_op_pend);
37160 + fscache_stat_unchecked(&fscache_n_op_pend);
37161 fscache_start_operations(object);
37162 } else {
37163 ASSERTCMP(object->n_exclusive, ==, 0);
37164 @@ -227,12 +227,12 @@ int fscache_submit_op(struct fscache_obj
37165 object->n_ops++;
37166 atomic_inc(&op->usage);
37167 list_add_tail(&op->pend_link, &object->pending_ops);
37168 - fscache_stat(&fscache_n_op_pend);
37169 + fscache_stat_unchecked(&fscache_n_op_pend);
37170 ret = 0;
37171 } else if (object->state == FSCACHE_OBJECT_DYING ||
37172 object->state == FSCACHE_OBJECT_LC_DYING ||
37173 object->state == FSCACHE_OBJECT_WITHDRAWING) {
37174 - fscache_stat(&fscache_n_op_rejected);
37175 + fscache_stat_unchecked(&fscache_n_op_rejected);
37176 ret = -ENOBUFS;
37177 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
37178 fscache_report_unexpected_submission(object, op, ostate);
37179 @@ -302,7 +302,7 @@ int fscache_cancel_op(struct fscache_ope
37180
37181 ret = -EBUSY;
37182 if (!list_empty(&op->pend_link)) {
37183 - fscache_stat(&fscache_n_op_cancelled);
37184 + fscache_stat_unchecked(&fscache_n_op_cancelled);
37185 list_del_init(&op->pend_link);
37186 object->n_ops--;
37187 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
37188 @@ -341,7 +341,7 @@ void fscache_put_operation(struct fscach
37189 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
37190 BUG();
37191
37192 - fscache_stat(&fscache_n_op_release);
37193 + fscache_stat_unchecked(&fscache_n_op_release);
37194
37195 if (op->release) {
37196 op->release(op);
37197 @@ -358,7 +358,7 @@ void fscache_put_operation(struct fscach
37198 * lock, and defer it otherwise */
37199 if (!spin_trylock(&object->lock)) {
37200 _debug("defer put");
37201 - fscache_stat(&fscache_n_op_deferred_release);
37202 + fscache_stat_unchecked(&fscache_n_op_deferred_release);
37203
37204 cache = object->cache;
37205 spin_lock(&cache->op_gc_list_lock);
37206 @@ -420,7 +420,7 @@ void fscache_operation_gc(struct work_st
37207
37208 _debug("GC DEFERRED REL OBJ%x OP%x",
37209 object->debug_id, op->debug_id);
37210 - fscache_stat(&fscache_n_op_gc);
37211 + fscache_stat_unchecked(&fscache_n_op_gc);
37212
37213 ASSERTCMP(atomic_read(&op->usage), ==, 0);
37214
37215 diff -urNp linux-2.6.39.4/fs/fscache/page.c linux-2.6.39.4/fs/fscache/page.c
37216 --- linux-2.6.39.4/fs/fscache/page.c 2011-08-05 21:11:51.000000000 -0400
37217 +++ linux-2.6.39.4/fs/fscache/page.c 2011-08-05 21:12:20.000000000 -0400
37218 @@ -60,7 +60,7 @@ bool __fscache_maybe_release_page(struct
37219 val = radix_tree_lookup(&cookie->stores, page->index);
37220 if (!val) {
37221 rcu_read_unlock();
37222 - fscache_stat(&fscache_n_store_vmscan_not_storing);
37223 + fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
37224 __fscache_uncache_page(cookie, page);
37225 return true;
37226 }
37227 @@ -90,11 +90,11 @@ bool __fscache_maybe_release_page(struct
37228 spin_unlock(&cookie->stores_lock);
37229
37230 if (xpage) {
37231 - fscache_stat(&fscache_n_store_vmscan_cancelled);
37232 - fscache_stat(&fscache_n_store_radix_deletes);
37233 + fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
37234 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
37235 ASSERTCMP(xpage, ==, page);
37236 } else {
37237 - fscache_stat(&fscache_n_store_vmscan_gone);
37238 + fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
37239 }
37240
37241 wake_up_bit(&cookie->flags, 0);
37242 @@ -107,7 +107,7 @@ page_busy:
37243 /* we might want to wait here, but that could deadlock the allocator as
37244 * the work threads writing to the cache may all end up sleeping
37245 * on memory allocation */
37246 - fscache_stat(&fscache_n_store_vmscan_busy);
37247 + fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
37248 return false;
37249 }
37250 EXPORT_SYMBOL(__fscache_maybe_release_page);
37251 @@ -131,7 +131,7 @@ static void fscache_end_page_write(struc
37252 FSCACHE_COOKIE_STORING_TAG);
37253 if (!radix_tree_tag_get(&cookie->stores, page->index,
37254 FSCACHE_COOKIE_PENDING_TAG)) {
37255 - fscache_stat(&fscache_n_store_radix_deletes);
37256 + fscache_stat_unchecked(&fscache_n_store_radix_deletes);
37257 xpage = radix_tree_delete(&cookie->stores, page->index);
37258 }
37259 spin_unlock(&cookie->stores_lock);
37260 @@ -152,7 +152,7 @@ static void fscache_attr_changed_op(stru
37261
37262 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
37263
37264 - fscache_stat(&fscache_n_attr_changed_calls);
37265 + fscache_stat_unchecked(&fscache_n_attr_changed_calls);
37266
37267 if (fscache_object_is_active(object)) {
37268 fscache_set_op_state(op, "CallFS");
37269 @@ -179,11 +179,11 @@ int __fscache_attr_changed(struct fscach
37270
37271 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37272
37273 - fscache_stat(&fscache_n_attr_changed);
37274 + fscache_stat_unchecked(&fscache_n_attr_changed);
37275
37276 op = kzalloc(sizeof(*op), GFP_KERNEL);
37277 if (!op) {
37278 - fscache_stat(&fscache_n_attr_changed_nomem);
37279 + fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
37280 _leave(" = -ENOMEM");
37281 return -ENOMEM;
37282 }
37283 @@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
37284 if (fscache_submit_exclusive_op(object, op) < 0)
37285 goto nobufs;
37286 spin_unlock(&cookie->lock);
37287 - fscache_stat(&fscache_n_attr_changed_ok);
37288 + fscache_stat_unchecked(&fscache_n_attr_changed_ok);
37289 fscache_put_operation(op);
37290 _leave(" = 0");
37291 return 0;
37292 @@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
37293 nobufs:
37294 spin_unlock(&cookie->lock);
37295 kfree(op);
37296 - fscache_stat(&fscache_n_attr_changed_nobufs);
37297 + fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
37298 _leave(" = %d", -ENOBUFS);
37299 return -ENOBUFS;
37300 }
37301 @@ -246,7 +246,7 @@ static struct fscache_retrieval *fscache
37302 /* allocate a retrieval operation and attempt to submit it */
37303 op = kzalloc(sizeof(*op), GFP_NOIO);
37304 if (!op) {
37305 - fscache_stat(&fscache_n_retrievals_nomem);
37306 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37307 return NULL;
37308 }
37309
37310 @@ -275,13 +275,13 @@ static int fscache_wait_for_deferred_loo
37311 return 0;
37312 }
37313
37314 - fscache_stat(&fscache_n_retrievals_wait);
37315 + fscache_stat_unchecked(&fscache_n_retrievals_wait);
37316
37317 jif = jiffies;
37318 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
37319 fscache_wait_bit_interruptible,
37320 TASK_INTERRUPTIBLE) != 0) {
37321 - fscache_stat(&fscache_n_retrievals_intr);
37322 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
37323 _leave(" = -ERESTARTSYS");
37324 return -ERESTARTSYS;
37325 }
37326 @@ -299,8 +299,8 @@ static int fscache_wait_for_deferred_loo
37327 */
37328 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
37329 struct fscache_retrieval *op,
37330 - atomic_t *stat_op_waits,
37331 - atomic_t *stat_object_dead)
37332 + atomic_unchecked_t *stat_op_waits,
37333 + atomic_unchecked_t *stat_object_dead)
37334 {
37335 int ret;
37336
37337 @@ -308,7 +308,7 @@ static int fscache_wait_for_retrieval_ac
37338 goto check_if_dead;
37339
37340 _debug(">>> WT");
37341 - fscache_stat(stat_op_waits);
37342 + fscache_stat_unchecked(stat_op_waits);
37343 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
37344 fscache_wait_bit_interruptible,
37345 TASK_INTERRUPTIBLE) < 0) {
37346 @@ -325,7 +325,7 @@ static int fscache_wait_for_retrieval_ac
37347
37348 check_if_dead:
37349 if (unlikely(fscache_object_is_dead(object))) {
37350 - fscache_stat(stat_object_dead);
37351 + fscache_stat_unchecked(stat_object_dead);
37352 return -ENOBUFS;
37353 }
37354 return 0;
37355 @@ -352,7 +352,7 @@ int __fscache_read_or_alloc_page(struct
37356
37357 _enter("%p,%p,,,", cookie, page);
37358
37359 - fscache_stat(&fscache_n_retrievals);
37360 + fscache_stat_unchecked(&fscache_n_retrievals);
37361
37362 if (hlist_empty(&cookie->backing_objects))
37363 goto nobufs;
37364 @@ -386,7 +386,7 @@ int __fscache_read_or_alloc_page(struct
37365 goto nobufs_unlock;
37366 spin_unlock(&cookie->lock);
37367
37368 - fscache_stat(&fscache_n_retrieval_ops);
37369 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
37370
37371 /* pin the netfs read context in case we need to do the actual netfs
37372 * read because we've encountered a cache read failure */
37373 @@ -416,15 +416,15 @@ int __fscache_read_or_alloc_page(struct
37374
37375 error:
37376 if (ret == -ENOMEM)
37377 - fscache_stat(&fscache_n_retrievals_nomem);
37378 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37379 else if (ret == -ERESTARTSYS)
37380 - fscache_stat(&fscache_n_retrievals_intr);
37381 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
37382 else if (ret == -ENODATA)
37383 - fscache_stat(&fscache_n_retrievals_nodata);
37384 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
37385 else if (ret < 0)
37386 - fscache_stat(&fscache_n_retrievals_nobufs);
37387 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37388 else
37389 - fscache_stat(&fscache_n_retrievals_ok);
37390 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
37391
37392 fscache_put_retrieval(op);
37393 _leave(" = %d", ret);
37394 @@ -434,7 +434,7 @@ nobufs_unlock:
37395 spin_unlock(&cookie->lock);
37396 kfree(op);
37397 nobufs:
37398 - fscache_stat(&fscache_n_retrievals_nobufs);
37399 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37400 _leave(" = -ENOBUFS");
37401 return -ENOBUFS;
37402 }
37403 @@ -472,7 +472,7 @@ int __fscache_read_or_alloc_pages(struct
37404
37405 _enter("%p,,%d,,,", cookie, *nr_pages);
37406
37407 - fscache_stat(&fscache_n_retrievals);
37408 + fscache_stat_unchecked(&fscache_n_retrievals);
37409
37410 if (hlist_empty(&cookie->backing_objects))
37411 goto nobufs;
37412 @@ -503,7 +503,7 @@ int __fscache_read_or_alloc_pages(struct
37413 goto nobufs_unlock;
37414 spin_unlock(&cookie->lock);
37415
37416 - fscache_stat(&fscache_n_retrieval_ops);
37417 + fscache_stat_unchecked(&fscache_n_retrieval_ops);
37418
37419 /* pin the netfs read context in case we need to do the actual netfs
37420 * read because we've encountered a cache read failure */
37421 @@ -533,15 +533,15 @@ int __fscache_read_or_alloc_pages(struct
37422
37423 error:
37424 if (ret == -ENOMEM)
37425 - fscache_stat(&fscache_n_retrievals_nomem);
37426 + fscache_stat_unchecked(&fscache_n_retrievals_nomem);
37427 else if (ret == -ERESTARTSYS)
37428 - fscache_stat(&fscache_n_retrievals_intr);
37429 + fscache_stat_unchecked(&fscache_n_retrievals_intr);
37430 else if (ret == -ENODATA)
37431 - fscache_stat(&fscache_n_retrievals_nodata);
37432 + fscache_stat_unchecked(&fscache_n_retrievals_nodata);
37433 else if (ret < 0)
37434 - fscache_stat(&fscache_n_retrievals_nobufs);
37435 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37436 else
37437 - fscache_stat(&fscache_n_retrievals_ok);
37438 + fscache_stat_unchecked(&fscache_n_retrievals_ok);
37439
37440 fscache_put_retrieval(op);
37441 _leave(" = %d", ret);
37442 @@ -551,7 +551,7 @@ nobufs_unlock:
37443 spin_unlock(&cookie->lock);
37444 kfree(op);
37445 nobufs:
37446 - fscache_stat(&fscache_n_retrievals_nobufs);
37447 + fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
37448 _leave(" = -ENOBUFS");
37449 return -ENOBUFS;
37450 }
37451 @@ -575,7 +575,7 @@ int __fscache_alloc_page(struct fscache_
37452
37453 _enter("%p,%p,,,", cookie, page);
37454
37455 - fscache_stat(&fscache_n_allocs);
37456 + fscache_stat_unchecked(&fscache_n_allocs);
37457
37458 if (hlist_empty(&cookie->backing_objects))
37459 goto nobufs;
37460 @@ -602,7 +602,7 @@ int __fscache_alloc_page(struct fscache_
37461 goto nobufs_unlock;
37462 spin_unlock(&cookie->lock);
37463
37464 - fscache_stat(&fscache_n_alloc_ops);
37465 + fscache_stat_unchecked(&fscache_n_alloc_ops);
37466
37467 ret = fscache_wait_for_retrieval_activation(
37468 object, op,
37469 @@ -618,11 +618,11 @@ int __fscache_alloc_page(struct fscache_
37470
37471 error:
37472 if (ret == -ERESTARTSYS)
37473 - fscache_stat(&fscache_n_allocs_intr);
37474 + fscache_stat_unchecked(&fscache_n_allocs_intr);
37475 else if (ret < 0)
37476 - fscache_stat(&fscache_n_allocs_nobufs);
37477 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
37478 else
37479 - fscache_stat(&fscache_n_allocs_ok);
37480 + fscache_stat_unchecked(&fscache_n_allocs_ok);
37481
37482 fscache_put_retrieval(op);
37483 _leave(" = %d", ret);
37484 @@ -632,7 +632,7 @@ nobufs_unlock:
37485 spin_unlock(&cookie->lock);
37486 kfree(op);
37487 nobufs:
37488 - fscache_stat(&fscache_n_allocs_nobufs);
37489 + fscache_stat_unchecked(&fscache_n_allocs_nobufs);
37490 _leave(" = -ENOBUFS");
37491 return -ENOBUFS;
37492 }
37493 @@ -675,7 +675,7 @@ static void fscache_write_op(struct fsca
37494
37495 spin_lock(&cookie->stores_lock);
37496
37497 - fscache_stat(&fscache_n_store_calls);
37498 + fscache_stat_unchecked(&fscache_n_store_calls);
37499
37500 /* find a page to store */
37501 page = NULL;
37502 @@ -686,7 +686,7 @@ static void fscache_write_op(struct fsca
37503 page = results[0];
37504 _debug("gang %d [%lx]", n, page->index);
37505 if (page->index > op->store_limit) {
37506 - fscache_stat(&fscache_n_store_pages_over_limit);
37507 + fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
37508 goto superseded;
37509 }
37510
37511 @@ -699,7 +699,7 @@ static void fscache_write_op(struct fsca
37512 spin_unlock(&object->lock);
37513
37514 fscache_set_op_state(&op->op, "Store");
37515 - fscache_stat(&fscache_n_store_pages);
37516 + fscache_stat_unchecked(&fscache_n_store_pages);
37517 fscache_stat(&fscache_n_cop_write_page);
37518 ret = object->cache->ops->write_page(op, page);
37519 fscache_stat_d(&fscache_n_cop_write_page);
37520 @@ -769,7 +769,7 @@ int __fscache_write_page(struct fscache_
37521 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37522 ASSERT(PageFsCache(page));
37523
37524 - fscache_stat(&fscache_n_stores);
37525 + fscache_stat_unchecked(&fscache_n_stores);
37526
37527 op = kzalloc(sizeof(*op), GFP_NOIO);
37528 if (!op)
37529 @@ -821,7 +821,7 @@ int __fscache_write_page(struct fscache_
37530 spin_unlock(&cookie->stores_lock);
37531 spin_unlock(&object->lock);
37532
37533 - op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
37534 + op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
37535 op->store_limit = object->store_limit;
37536
37537 if (fscache_submit_op(object, &op->op) < 0)
37538 @@ -829,8 +829,8 @@ int __fscache_write_page(struct fscache_
37539
37540 spin_unlock(&cookie->lock);
37541 radix_tree_preload_end();
37542 - fscache_stat(&fscache_n_store_ops);
37543 - fscache_stat(&fscache_n_stores_ok);
37544 + fscache_stat_unchecked(&fscache_n_store_ops);
37545 + fscache_stat_unchecked(&fscache_n_stores_ok);
37546
37547 /* the work queue now carries its own ref on the object */
37548 fscache_put_operation(&op->op);
37549 @@ -838,14 +838,14 @@ int __fscache_write_page(struct fscache_
37550 return 0;
37551
37552 already_queued:
37553 - fscache_stat(&fscache_n_stores_again);
37554 + fscache_stat_unchecked(&fscache_n_stores_again);
37555 already_pending:
37556 spin_unlock(&cookie->stores_lock);
37557 spin_unlock(&object->lock);
37558 spin_unlock(&cookie->lock);
37559 radix_tree_preload_end();
37560 kfree(op);
37561 - fscache_stat(&fscache_n_stores_ok);
37562 + fscache_stat_unchecked(&fscache_n_stores_ok);
37563 _leave(" = 0");
37564 return 0;
37565
37566 @@ -864,14 +864,14 @@ nobufs:
37567 spin_unlock(&cookie->lock);
37568 radix_tree_preload_end();
37569 kfree(op);
37570 - fscache_stat(&fscache_n_stores_nobufs);
37571 + fscache_stat_unchecked(&fscache_n_stores_nobufs);
37572 _leave(" = -ENOBUFS");
37573 return -ENOBUFS;
37574
37575 nomem_free:
37576 kfree(op);
37577 nomem:
37578 - fscache_stat(&fscache_n_stores_oom);
37579 + fscache_stat_unchecked(&fscache_n_stores_oom);
37580 _leave(" = -ENOMEM");
37581 return -ENOMEM;
37582 }
37583 @@ -889,7 +889,7 @@ void __fscache_uncache_page(struct fscac
37584 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
37585 ASSERTCMP(page, !=, NULL);
37586
37587 - fscache_stat(&fscache_n_uncaches);
37588 + fscache_stat_unchecked(&fscache_n_uncaches);
37589
37590 /* cache withdrawal may beat us to it */
37591 if (!PageFsCache(page))
37592 @@ -942,7 +942,7 @@ void fscache_mark_pages_cached(struct fs
37593 unsigned long loop;
37594
37595 #ifdef CONFIG_FSCACHE_STATS
37596 - atomic_add(pagevec->nr, &fscache_n_marks);
37597 + atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
37598 #endif
37599
37600 for (loop = 0; loop < pagevec->nr; loop++) {
37601 diff -urNp linux-2.6.39.4/fs/fscache/stats.c linux-2.6.39.4/fs/fscache/stats.c
37602 --- linux-2.6.39.4/fs/fscache/stats.c 2011-05-19 00:06:34.000000000 -0400
37603 +++ linux-2.6.39.4/fs/fscache/stats.c 2011-08-05 19:44:37.000000000 -0400
37604 @@ -18,95 +18,95 @@
37605 /*
37606 * operation counters
37607 */
37608 -atomic_t fscache_n_op_pend;
37609 -atomic_t fscache_n_op_run;
37610 -atomic_t fscache_n_op_enqueue;
37611 -atomic_t fscache_n_op_requeue;
37612 -atomic_t fscache_n_op_deferred_release;
37613 -atomic_t fscache_n_op_release;
37614 -atomic_t fscache_n_op_gc;
37615 -atomic_t fscache_n_op_cancelled;
37616 -atomic_t fscache_n_op_rejected;
37617 -
37618 -atomic_t fscache_n_attr_changed;
37619 -atomic_t fscache_n_attr_changed_ok;
37620 -atomic_t fscache_n_attr_changed_nobufs;
37621 -atomic_t fscache_n_attr_changed_nomem;
37622 -atomic_t fscache_n_attr_changed_calls;
37623 -
37624 -atomic_t fscache_n_allocs;
37625 -atomic_t fscache_n_allocs_ok;
37626 -atomic_t fscache_n_allocs_wait;
37627 -atomic_t fscache_n_allocs_nobufs;
37628 -atomic_t fscache_n_allocs_intr;
37629 -atomic_t fscache_n_allocs_object_dead;
37630 -atomic_t fscache_n_alloc_ops;
37631 -atomic_t fscache_n_alloc_op_waits;
37632 -
37633 -atomic_t fscache_n_retrievals;
37634 -atomic_t fscache_n_retrievals_ok;
37635 -atomic_t fscache_n_retrievals_wait;
37636 -atomic_t fscache_n_retrievals_nodata;
37637 -atomic_t fscache_n_retrievals_nobufs;
37638 -atomic_t fscache_n_retrievals_intr;
37639 -atomic_t fscache_n_retrievals_nomem;
37640 -atomic_t fscache_n_retrievals_object_dead;
37641 -atomic_t fscache_n_retrieval_ops;
37642 -atomic_t fscache_n_retrieval_op_waits;
37643 -
37644 -atomic_t fscache_n_stores;
37645 -atomic_t fscache_n_stores_ok;
37646 -atomic_t fscache_n_stores_again;
37647 -atomic_t fscache_n_stores_nobufs;
37648 -atomic_t fscache_n_stores_oom;
37649 -atomic_t fscache_n_store_ops;
37650 -atomic_t fscache_n_store_calls;
37651 -atomic_t fscache_n_store_pages;
37652 -atomic_t fscache_n_store_radix_deletes;
37653 -atomic_t fscache_n_store_pages_over_limit;
37654 -
37655 -atomic_t fscache_n_store_vmscan_not_storing;
37656 -atomic_t fscache_n_store_vmscan_gone;
37657 -atomic_t fscache_n_store_vmscan_busy;
37658 -atomic_t fscache_n_store_vmscan_cancelled;
37659 -
37660 -atomic_t fscache_n_marks;
37661 -atomic_t fscache_n_uncaches;
37662 -
37663 -atomic_t fscache_n_acquires;
37664 -atomic_t fscache_n_acquires_null;
37665 -atomic_t fscache_n_acquires_no_cache;
37666 -atomic_t fscache_n_acquires_ok;
37667 -atomic_t fscache_n_acquires_nobufs;
37668 -atomic_t fscache_n_acquires_oom;
37669 -
37670 -atomic_t fscache_n_updates;
37671 -atomic_t fscache_n_updates_null;
37672 -atomic_t fscache_n_updates_run;
37673 -
37674 -atomic_t fscache_n_relinquishes;
37675 -atomic_t fscache_n_relinquishes_null;
37676 -atomic_t fscache_n_relinquishes_waitcrt;
37677 -atomic_t fscache_n_relinquishes_retire;
37678 -
37679 -atomic_t fscache_n_cookie_index;
37680 -atomic_t fscache_n_cookie_data;
37681 -atomic_t fscache_n_cookie_special;
37682 -
37683 -atomic_t fscache_n_object_alloc;
37684 -atomic_t fscache_n_object_no_alloc;
37685 -atomic_t fscache_n_object_lookups;
37686 -atomic_t fscache_n_object_lookups_negative;
37687 -atomic_t fscache_n_object_lookups_positive;
37688 -atomic_t fscache_n_object_lookups_timed_out;
37689 -atomic_t fscache_n_object_created;
37690 -atomic_t fscache_n_object_avail;
37691 -atomic_t fscache_n_object_dead;
37692 -
37693 -atomic_t fscache_n_checkaux_none;
37694 -atomic_t fscache_n_checkaux_okay;
37695 -atomic_t fscache_n_checkaux_update;
37696 -atomic_t fscache_n_checkaux_obsolete;
37697 +atomic_unchecked_t fscache_n_op_pend;
37698 +atomic_unchecked_t fscache_n_op_run;
37699 +atomic_unchecked_t fscache_n_op_enqueue;
37700 +atomic_unchecked_t fscache_n_op_requeue;
37701 +atomic_unchecked_t fscache_n_op_deferred_release;
37702 +atomic_unchecked_t fscache_n_op_release;
37703 +atomic_unchecked_t fscache_n_op_gc;
37704 +atomic_unchecked_t fscache_n_op_cancelled;
37705 +atomic_unchecked_t fscache_n_op_rejected;
37706 +
37707 +atomic_unchecked_t fscache_n_attr_changed;
37708 +atomic_unchecked_t fscache_n_attr_changed_ok;
37709 +atomic_unchecked_t fscache_n_attr_changed_nobufs;
37710 +atomic_unchecked_t fscache_n_attr_changed_nomem;
37711 +atomic_unchecked_t fscache_n_attr_changed_calls;
37712 +
37713 +atomic_unchecked_t fscache_n_allocs;
37714 +atomic_unchecked_t fscache_n_allocs_ok;
37715 +atomic_unchecked_t fscache_n_allocs_wait;
37716 +atomic_unchecked_t fscache_n_allocs_nobufs;
37717 +atomic_unchecked_t fscache_n_allocs_intr;
37718 +atomic_unchecked_t fscache_n_allocs_object_dead;
37719 +atomic_unchecked_t fscache_n_alloc_ops;
37720 +atomic_unchecked_t fscache_n_alloc_op_waits;
37721 +
37722 +atomic_unchecked_t fscache_n_retrievals;
37723 +atomic_unchecked_t fscache_n_retrievals_ok;
37724 +atomic_unchecked_t fscache_n_retrievals_wait;
37725 +atomic_unchecked_t fscache_n_retrievals_nodata;
37726 +atomic_unchecked_t fscache_n_retrievals_nobufs;
37727 +atomic_unchecked_t fscache_n_retrievals_intr;
37728 +atomic_unchecked_t fscache_n_retrievals_nomem;
37729 +atomic_unchecked_t fscache_n_retrievals_object_dead;
37730 +atomic_unchecked_t fscache_n_retrieval_ops;
37731 +atomic_unchecked_t fscache_n_retrieval_op_waits;
37732 +
37733 +atomic_unchecked_t fscache_n_stores;
37734 +atomic_unchecked_t fscache_n_stores_ok;
37735 +atomic_unchecked_t fscache_n_stores_again;
37736 +atomic_unchecked_t fscache_n_stores_nobufs;
37737 +atomic_unchecked_t fscache_n_stores_oom;
37738 +atomic_unchecked_t fscache_n_store_ops;
37739 +atomic_unchecked_t fscache_n_store_calls;
37740 +atomic_unchecked_t fscache_n_store_pages;
37741 +atomic_unchecked_t fscache_n_store_radix_deletes;
37742 +atomic_unchecked_t fscache_n_store_pages_over_limit;
37743 +
37744 +atomic_unchecked_t fscache_n_store_vmscan_not_storing;
37745 +atomic_unchecked_t fscache_n_store_vmscan_gone;
37746 +atomic_unchecked_t fscache_n_store_vmscan_busy;
37747 +atomic_unchecked_t fscache_n_store_vmscan_cancelled;
37748 +
37749 +atomic_unchecked_t fscache_n_marks;
37750 +atomic_unchecked_t fscache_n_uncaches;
37751 +
37752 +atomic_unchecked_t fscache_n_acquires;
37753 +atomic_unchecked_t fscache_n_acquires_null;
37754 +atomic_unchecked_t fscache_n_acquires_no_cache;
37755 +atomic_unchecked_t fscache_n_acquires_ok;
37756 +atomic_unchecked_t fscache_n_acquires_nobufs;
37757 +atomic_unchecked_t fscache_n_acquires_oom;
37758 +
37759 +atomic_unchecked_t fscache_n_updates;
37760 +atomic_unchecked_t fscache_n_updates_null;
37761 +atomic_unchecked_t fscache_n_updates_run;
37762 +
37763 +atomic_unchecked_t fscache_n_relinquishes;
37764 +atomic_unchecked_t fscache_n_relinquishes_null;
37765 +atomic_unchecked_t fscache_n_relinquishes_waitcrt;
37766 +atomic_unchecked_t fscache_n_relinquishes_retire;
37767 +
37768 +atomic_unchecked_t fscache_n_cookie_index;
37769 +atomic_unchecked_t fscache_n_cookie_data;
37770 +atomic_unchecked_t fscache_n_cookie_special;
37771 +
37772 +atomic_unchecked_t fscache_n_object_alloc;
37773 +atomic_unchecked_t fscache_n_object_no_alloc;
37774 +atomic_unchecked_t fscache_n_object_lookups;
37775 +atomic_unchecked_t fscache_n_object_lookups_negative;
37776 +atomic_unchecked_t fscache_n_object_lookups_positive;
37777 +atomic_unchecked_t fscache_n_object_lookups_timed_out;
37778 +atomic_unchecked_t fscache_n_object_created;
37779 +atomic_unchecked_t fscache_n_object_avail;
37780 +atomic_unchecked_t fscache_n_object_dead;
37781 +
37782 +atomic_unchecked_t fscache_n_checkaux_none;
37783 +atomic_unchecked_t fscache_n_checkaux_okay;
37784 +atomic_unchecked_t fscache_n_checkaux_update;
37785 +atomic_unchecked_t fscache_n_checkaux_obsolete;
37786
37787 atomic_t fscache_n_cop_alloc_object;
37788 atomic_t fscache_n_cop_lookup_object;
37789 @@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
37790 seq_puts(m, "FS-Cache statistics\n");
37791
37792 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
37793 - atomic_read(&fscache_n_cookie_index),
37794 - atomic_read(&fscache_n_cookie_data),
37795 - atomic_read(&fscache_n_cookie_special));
37796 + atomic_read_unchecked(&fscache_n_cookie_index),
37797 + atomic_read_unchecked(&fscache_n_cookie_data),
37798 + atomic_read_unchecked(&fscache_n_cookie_special));
37799
37800 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
37801 - atomic_read(&fscache_n_object_alloc),
37802 - atomic_read(&fscache_n_object_no_alloc),
37803 - atomic_read(&fscache_n_object_avail),
37804 - atomic_read(&fscache_n_object_dead));
37805 + atomic_read_unchecked(&fscache_n_object_alloc),
37806 + atomic_read_unchecked(&fscache_n_object_no_alloc),
37807 + atomic_read_unchecked(&fscache_n_object_avail),
37808 + atomic_read_unchecked(&fscache_n_object_dead));
37809 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
37810 - atomic_read(&fscache_n_checkaux_none),
37811 - atomic_read(&fscache_n_checkaux_okay),
37812 - atomic_read(&fscache_n_checkaux_update),
37813 - atomic_read(&fscache_n_checkaux_obsolete));
37814 + atomic_read_unchecked(&fscache_n_checkaux_none),
37815 + atomic_read_unchecked(&fscache_n_checkaux_okay),
37816 + atomic_read_unchecked(&fscache_n_checkaux_update),
37817 + atomic_read_unchecked(&fscache_n_checkaux_obsolete));
37818
37819 seq_printf(m, "Pages : mrk=%u unc=%u\n",
37820 - atomic_read(&fscache_n_marks),
37821 - atomic_read(&fscache_n_uncaches));
37822 + atomic_read_unchecked(&fscache_n_marks),
37823 + atomic_read_unchecked(&fscache_n_uncaches));
37824
37825 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
37826 " oom=%u\n",
37827 - atomic_read(&fscache_n_acquires),
37828 - atomic_read(&fscache_n_acquires_null),
37829 - atomic_read(&fscache_n_acquires_no_cache),
37830 - atomic_read(&fscache_n_acquires_ok),
37831 - atomic_read(&fscache_n_acquires_nobufs),
37832 - atomic_read(&fscache_n_acquires_oom));
37833 + atomic_read_unchecked(&fscache_n_acquires),
37834 + atomic_read_unchecked(&fscache_n_acquires_null),
37835 + atomic_read_unchecked(&fscache_n_acquires_no_cache),
37836 + atomic_read_unchecked(&fscache_n_acquires_ok),
37837 + atomic_read_unchecked(&fscache_n_acquires_nobufs),
37838 + atomic_read_unchecked(&fscache_n_acquires_oom));
37839
37840 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
37841 - atomic_read(&fscache_n_object_lookups),
37842 - atomic_read(&fscache_n_object_lookups_negative),
37843 - atomic_read(&fscache_n_object_lookups_positive),
37844 - atomic_read(&fscache_n_object_created),
37845 - atomic_read(&fscache_n_object_lookups_timed_out));
37846 + atomic_read_unchecked(&fscache_n_object_lookups),
37847 + atomic_read_unchecked(&fscache_n_object_lookups_negative),
37848 + atomic_read_unchecked(&fscache_n_object_lookups_positive),
37849 + atomic_read_unchecked(&fscache_n_object_created),
37850 + atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
37851
37852 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
37853 - atomic_read(&fscache_n_updates),
37854 - atomic_read(&fscache_n_updates_null),
37855 - atomic_read(&fscache_n_updates_run));
37856 + atomic_read_unchecked(&fscache_n_updates),
37857 + atomic_read_unchecked(&fscache_n_updates_null),
37858 + atomic_read_unchecked(&fscache_n_updates_run));
37859
37860 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
37861 - atomic_read(&fscache_n_relinquishes),
37862 - atomic_read(&fscache_n_relinquishes_null),
37863 - atomic_read(&fscache_n_relinquishes_waitcrt),
37864 - atomic_read(&fscache_n_relinquishes_retire));
37865 + atomic_read_unchecked(&fscache_n_relinquishes),
37866 + atomic_read_unchecked(&fscache_n_relinquishes_null),
37867 + atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
37868 + atomic_read_unchecked(&fscache_n_relinquishes_retire));
37869
37870 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
37871 - atomic_read(&fscache_n_attr_changed),
37872 - atomic_read(&fscache_n_attr_changed_ok),
37873 - atomic_read(&fscache_n_attr_changed_nobufs),
37874 - atomic_read(&fscache_n_attr_changed_nomem),
37875 - atomic_read(&fscache_n_attr_changed_calls));
37876 + atomic_read_unchecked(&fscache_n_attr_changed),
37877 + atomic_read_unchecked(&fscache_n_attr_changed_ok),
37878 + atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
37879 + atomic_read_unchecked(&fscache_n_attr_changed_nomem),
37880 + atomic_read_unchecked(&fscache_n_attr_changed_calls));
37881
37882 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
37883 - atomic_read(&fscache_n_allocs),
37884 - atomic_read(&fscache_n_allocs_ok),
37885 - atomic_read(&fscache_n_allocs_wait),
37886 - atomic_read(&fscache_n_allocs_nobufs),
37887 - atomic_read(&fscache_n_allocs_intr));
37888 + atomic_read_unchecked(&fscache_n_allocs),
37889 + atomic_read_unchecked(&fscache_n_allocs_ok),
37890 + atomic_read_unchecked(&fscache_n_allocs_wait),
37891 + atomic_read_unchecked(&fscache_n_allocs_nobufs),
37892 + atomic_read_unchecked(&fscache_n_allocs_intr));
37893 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
37894 - atomic_read(&fscache_n_alloc_ops),
37895 - atomic_read(&fscache_n_alloc_op_waits),
37896 - atomic_read(&fscache_n_allocs_object_dead));
37897 + atomic_read_unchecked(&fscache_n_alloc_ops),
37898 + atomic_read_unchecked(&fscache_n_alloc_op_waits),
37899 + atomic_read_unchecked(&fscache_n_allocs_object_dead));
37900
37901 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
37902 " int=%u oom=%u\n",
37903 - atomic_read(&fscache_n_retrievals),
37904 - atomic_read(&fscache_n_retrievals_ok),
37905 - atomic_read(&fscache_n_retrievals_wait),
37906 - atomic_read(&fscache_n_retrievals_nodata),
37907 - atomic_read(&fscache_n_retrievals_nobufs),
37908 - atomic_read(&fscache_n_retrievals_intr),
37909 - atomic_read(&fscache_n_retrievals_nomem));
37910 + atomic_read_unchecked(&fscache_n_retrievals),
37911 + atomic_read_unchecked(&fscache_n_retrievals_ok),
37912 + atomic_read_unchecked(&fscache_n_retrievals_wait),
37913 + atomic_read_unchecked(&fscache_n_retrievals_nodata),
37914 + atomic_read_unchecked(&fscache_n_retrievals_nobufs),
37915 + atomic_read_unchecked(&fscache_n_retrievals_intr),
37916 + atomic_read_unchecked(&fscache_n_retrievals_nomem));
37917 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
37918 - atomic_read(&fscache_n_retrieval_ops),
37919 - atomic_read(&fscache_n_retrieval_op_waits),
37920 - atomic_read(&fscache_n_retrievals_object_dead));
37921 + atomic_read_unchecked(&fscache_n_retrieval_ops),
37922 + atomic_read_unchecked(&fscache_n_retrieval_op_waits),
37923 + atomic_read_unchecked(&fscache_n_retrievals_object_dead));
37924
37925 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
37926 - atomic_read(&fscache_n_stores),
37927 - atomic_read(&fscache_n_stores_ok),
37928 - atomic_read(&fscache_n_stores_again),
37929 - atomic_read(&fscache_n_stores_nobufs),
37930 - atomic_read(&fscache_n_stores_oom));
37931 + atomic_read_unchecked(&fscache_n_stores),
37932 + atomic_read_unchecked(&fscache_n_stores_ok),
37933 + atomic_read_unchecked(&fscache_n_stores_again),
37934 + atomic_read_unchecked(&fscache_n_stores_nobufs),
37935 + atomic_read_unchecked(&fscache_n_stores_oom));
37936 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
37937 - atomic_read(&fscache_n_store_ops),
37938 - atomic_read(&fscache_n_store_calls),
37939 - atomic_read(&fscache_n_store_pages),
37940 - atomic_read(&fscache_n_store_radix_deletes),
37941 - atomic_read(&fscache_n_store_pages_over_limit));
37942 + atomic_read_unchecked(&fscache_n_store_ops),
37943 + atomic_read_unchecked(&fscache_n_store_calls),
37944 + atomic_read_unchecked(&fscache_n_store_pages),
37945 + atomic_read_unchecked(&fscache_n_store_radix_deletes),
37946 + atomic_read_unchecked(&fscache_n_store_pages_over_limit));
37947
37948 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
37949 - atomic_read(&fscache_n_store_vmscan_not_storing),
37950 - atomic_read(&fscache_n_store_vmscan_gone),
37951 - atomic_read(&fscache_n_store_vmscan_busy),
37952 - atomic_read(&fscache_n_store_vmscan_cancelled));
37953 + atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
37954 + atomic_read_unchecked(&fscache_n_store_vmscan_gone),
37955 + atomic_read_unchecked(&fscache_n_store_vmscan_busy),
37956 + atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
37957
37958 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
37959 - atomic_read(&fscache_n_op_pend),
37960 - atomic_read(&fscache_n_op_run),
37961 - atomic_read(&fscache_n_op_enqueue),
37962 - atomic_read(&fscache_n_op_cancelled),
37963 - atomic_read(&fscache_n_op_rejected));
37964 + atomic_read_unchecked(&fscache_n_op_pend),
37965 + atomic_read_unchecked(&fscache_n_op_run),
37966 + atomic_read_unchecked(&fscache_n_op_enqueue),
37967 + atomic_read_unchecked(&fscache_n_op_cancelled),
37968 + atomic_read_unchecked(&fscache_n_op_rejected));
37969 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
37970 - atomic_read(&fscache_n_op_deferred_release),
37971 - atomic_read(&fscache_n_op_release),
37972 - atomic_read(&fscache_n_op_gc));
37973 + atomic_read_unchecked(&fscache_n_op_deferred_release),
37974 + atomic_read_unchecked(&fscache_n_op_release),
37975 + atomic_read_unchecked(&fscache_n_op_gc));
37976
37977 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
37978 atomic_read(&fscache_n_cop_alloc_object),
37979 diff -urNp linux-2.6.39.4/fs/fs_struct.c linux-2.6.39.4/fs/fs_struct.c
37980 --- linux-2.6.39.4/fs/fs_struct.c 2011-05-19 00:06:34.000000000 -0400
37981 +++ linux-2.6.39.4/fs/fs_struct.c 2011-08-05 19:44:37.000000000 -0400
37982 @@ -4,6 +4,7 @@
37983 #include <linux/path.h>
37984 #include <linux/slab.h>
37985 #include <linux/fs_struct.h>
37986 +#include <linux/grsecurity.h>
37987 #include "internal.h"
37988
37989 static inline void path_get_longterm(struct path *path)
37990 @@ -31,6 +32,7 @@ void set_fs_root(struct fs_struct *fs, s
37991 old_root = fs->root;
37992 fs->root = *path;
37993 path_get_longterm(path);
37994 + gr_set_chroot_entries(current, path);
37995 write_seqcount_end(&fs->seq);
37996 spin_unlock(&fs->lock);
37997 if (old_root.dentry)
37998 @@ -74,6 +76,7 @@ void chroot_fs_refs(struct path *old_roo
37999 && fs->root.mnt == old_root->mnt) {
38000 path_get_longterm(new_root);
38001 fs->root = *new_root;
38002 + gr_set_chroot_entries(p, new_root);
38003 count++;
38004 }
38005 if (fs->pwd.dentry == old_root->dentry
38006 @@ -109,7 +112,8 @@ void exit_fs(struct task_struct *tsk)
38007 spin_lock(&fs->lock);
38008 write_seqcount_begin(&fs->seq);
38009 tsk->fs = NULL;
38010 - kill = !--fs->users;
38011 + gr_clear_chroot_entries(tsk);
38012 + kill = !atomic_dec_return(&fs->users);
38013 write_seqcount_end(&fs->seq);
38014 spin_unlock(&fs->lock);
38015 task_unlock(tsk);
38016 @@ -123,7 +127,7 @@ struct fs_struct *copy_fs_struct(struct
38017 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
38018 /* We don't need to lock fs - think why ;-) */
38019 if (fs) {
38020 - fs->users = 1;
38021 + atomic_set(&fs->users, 1);
38022 fs->in_exec = 0;
38023 spin_lock_init(&fs->lock);
38024 seqcount_init(&fs->seq);
38025 @@ -132,6 +136,9 @@ struct fs_struct *copy_fs_struct(struct
38026 spin_lock(&old->lock);
38027 fs->root = old->root;
38028 path_get_longterm(&fs->root);
38029 + /* instead of calling gr_set_chroot_entries here,
38030 + we call it from every caller of this function
38031 + */
38032 fs->pwd = old->pwd;
38033 path_get_longterm(&fs->pwd);
38034 spin_unlock(&old->lock);
38035 @@ -150,8 +157,9 @@ int unshare_fs_struct(void)
38036
38037 task_lock(current);
38038 spin_lock(&fs->lock);
38039 - kill = !--fs->users;
38040 + kill = !atomic_dec_return(&fs->users);
38041 current->fs = new_fs;
38042 + gr_set_chroot_entries(current, &new_fs->root);
38043 spin_unlock(&fs->lock);
38044 task_unlock(current);
38045
38046 @@ -170,7 +178,7 @@ EXPORT_SYMBOL(current_umask);
38047
38048 /* to be mentioned only in INIT_TASK */
38049 struct fs_struct init_fs = {
38050 - .users = 1,
38051 + .users = ATOMIC_INIT(1),
38052 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
38053 .seq = SEQCNT_ZERO,
38054 .umask = 0022,
38055 @@ -186,12 +194,13 @@ void daemonize_fs_struct(void)
38056 task_lock(current);
38057
38058 spin_lock(&init_fs.lock);
38059 - init_fs.users++;
38060 + atomic_inc(&init_fs.users);
38061 spin_unlock(&init_fs.lock);
38062
38063 spin_lock(&fs->lock);
38064 current->fs = &init_fs;
38065 - kill = !--fs->users;
38066 + gr_set_chroot_entries(current, &current->fs->root);
38067 + kill = !atomic_dec_return(&fs->users);
38068 spin_unlock(&fs->lock);
38069
38070 task_unlock(current);
38071 diff -urNp linux-2.6.39.4/fs/fuse/cuse.c linux-2.6.39.4/fs/fuse/cuse.c
38072 --- linux-2.6.39.4/fs/fuse/cuse.c 2011-05-19 00:06:34.000000000 -0400
38073 +++ linux-2.6.39.4/fs/fuse/cuse.c 2011-08-05 20:34:06.000000000 -0400
38074 @@ -586,10 +586,12 @@ static int __init cuse_init(void)
38075 INIT_LIST_HEAD(&cuse_conntbl[i]);
38076
38077 /* inherit and extend fuse_dev_operations */
38078 - cuse_channel_fops = fuse_dev_operations;
38079 - cuse_channel_fops.owner = THIS_MODULE;
38080 - cuse_channel_fops.open = cuse_channel_open;
38081 - cuse_channel_fops.release = cuse_channel_release;
38082 + pax_open_kernel();
38083 + memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
38084 + *(void **)&cuse_channel_fops.owner = THIS_MODULE;
38085 + *(void **)&cuse_channel_fops.open = cuse_channel_open;
38086 + *(void **)&cuse_channel_fops.release = cuse_channel_release;
38087 + pax_close_kernel();
38088
38089 cuse_class = class_create(THIS_MODULE, "cuse");
38090 if (IS_ERR(cuse_class))
38091 diff -urNp linux-2.6.39.4/fs/fuse/dev.c linux-2.6.39.4/fs/fuse/dev.c
38092 --- linux-2.6.39.4/fs/fuse/dev.c 2011-05-19 00:06:34.000000000 -0400
38093 +++ linux-2.6.39.4/fs/fuse/dev.c 2011-08-05 20:34:06.000000000 -0400
38094 @@ -1238,7 +1238,7 @@ static ssize_t fuse_dev_splice_read(stru
38095 ret = 0;
38096 pipe_lock(pipe);
38097
38098 - if (!pipe->readers) {
38099 + if (!atomic_read(&pipe->readers)) {
38100 send_sig(SIGPIPE, current, 0);
38101 if (!ret)
38102 ret = -EPIPE;
38103 diff -urNp linux-2.6.39.4/fs/fuse/dir.c linux-2.6.39.4/fs/fuse/dir.c
38104 --- linux-2.6.39.4/fs/fuse/dir.c 2011-05-19 00:06:34.000000000 -0400
38105 +++ linux-2.6.39.4/fs/fuse/dir.c 2011-08-05 19:44:37.000000000 -0400
38106 @@ -1147,7 +1147,7 @@ static char *read_link(struct dentry *de
38107 return link;
38108 }
38109
38110 -static void free_link(char *link)
38111 +static void free_link(const char *link)
38112 {
38113 if (!IS_ERR(link))
38114 free_page((unsigned long) link);
38115 diff -urNp linux-2.6.39.4/fs/gfs2/ops_inode.c linux-2.6.39.4/fs/gfs2/ops_inode.c
38116 --- linux-2.6.39.4/fs/gfs2/ops_inode.c 2011-05-19 00:06:34.000000000 -0400
38117 +++ linux-2.6.39.4/fs/gfs2/ops_inode.c 2011-08-05 19:44:37.000000000 -0400
38118 @@ -740,6 +740,8 @@ static int gfs2_rename(struct inode *odi
38119 unsigned int x;
38120 int error;
38121
38122 + pax_track_stack();
38123 +
38124 if (ndentry->d_inode) {
38125 nip = GFS2_I(ndentry->d_inode);
38126 if (ip == nip)
38127 @@ -1019,7 +1021,7 @@ out:
38128
38129 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
38130 {
38131 - char *s = nd_get_link(nd);
38132 + const char *s = nd_get_link(nd);
38133 if (!IS_ERR(s))
38134 kfree(s);
38135 }
38136 diff -urNp linux-2.6.39.4/fs/hfsplus/catalog.c linux-2.6.39.4/fs/hfsplus/catalog.c
38137 --- linux-2.6.39.4/fs/hfsplus/catalog.c 2011-05-19 00:06:34.000000000 -0400
38138 +++ linux-2.6.39.4/fs/hfsplus/catalog.c 2011-08-05 19:44:37.000000000 -0400
38139 @@ -179,6 +179,8 @@ int hfsplus_find_cat(struct super_block
38140 int err;
38141 u16 type;
38142
38143 + pax_track_stack();
38144 +
38145 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
38146 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
38147 if (err)
38148 @@ -210,6 +212,8 @@ int hfsplus_create_cat(u32 cnid, struct
38149 int entry_size;
38150 int err;
38151
38152 + pax_track_stack();
38153 +
38154 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n",
38155 str->name, cnid, inode->i_nlink);
38156 hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd);
38157 @@ -349,6 +353,8 @@ int hfsplus_rename_cat(u32 cnid,
38158 int entry_size, type;
38159 int err = 0;
38160
38161 + pax_track_stack();
38162 +
38163 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n",
38164 cnid, src_dir->i_ino, src_name->name,
38165 dst_dir->i_ino, dst_name->name);
38166 diff -urNp linux-2.6.39.4/fs/hfsplus/dir.c linux-2.6.39.4/fs/hfsplus/dir.c
38167 --- linux-2.6.39.4/fs/hfsplus/dir.c 2011-05-19 00:06:34.000000000 -0400
38168 +++ linux-2.6.39.4/fs/hfsplus/dir.c 2011-08-05 19:44:37.000000000 -0400
38169 @@ -129,6 +129,8 @@ static int hfsplus_readdir(struct file *
38170 struct hfsplus_readdir_data *rd;
38171 u16 type;
38172
38173 + pax_track_stack();
38174 +
38175 if (filp->f_pos >= inode->i_size)
38176 return 0;
38177
38178 diff -urNp linux-2.6.39.4/fs/hfsplus/inode.c linux-2.6.39.4/fs/hfsplus/inode.c
38179 --- linux-2.6.39.4/fs/hfsplus/inode.c 2011-05-19 00:06:34.000000000 -0400
38180 +++ linux-2.6.39.4/fs/hfsplus/inode.c 2011-08-05 19:44:37.000000000 -0400
38181 @@ -489,6 +489,8 @@ int hfsplus_cat_read_inode(struct inode
38182 int res = 0;
38183 u16 type;
38184
38185 + pax_track_stack();
38186 +
38187 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
38188
38189 HFSPLUS_I(inode)->linkid = 0;
38190 @@ -552,6 +554,8 @@ int hfsplus_cat_write_inode(struct inode
38191 struct hfs_find_data fd;
38192 hfsplus_cat_entry entry;
38193
38194 + pax_track_stack();
38195 +
38196 if (HFSPLUS_IS_RSRC(inode))
38197 main_inode = HFSPLUS_I(inode)->rsrc_inode;
38198
38199 diff -urNp linux-2.6.39.4/fs/hfsplus/ioctl.c linux-2.6.39.4/fs/hfsplus/ioctl.c
38200 --- linux-2.6.39.4/fs/hfsplus/ioctl.c 2011-05-19 00:06:34.000000000 -0400
38201 +++ linux-2.6.39.4/fs/hfsplus/ioctl.c 2011-08-05 19:44:37.000000000 -0400
38202 @@ -122,6 +122,8 @@ int hfsplus_setxattr(struct dentry *dent
38203 struct hfsplus_cat_file *file;
38204 int res;
38205
38206 + pax_track_stack();
38207 +
38208 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
38209 return -EOPNOTSUPP;
38210
38211 @@ -166,6 +168,8 @@ ssize_t hfsplus_getxattr(struct dentry *
38212 struct hfsplus_cat_file *file;
38213 ssize_t res = 0;
38214
38215 + pax_track_stack();
38216 +
38217 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
38218 return -EOPNOTSUPP;
38219
38220 diff -urNp linux-2.6.39.4/fs/hfsplus/super.c linux-2.6.39.4/fs/hfsplus/super.c
38221 --- linux-2.6.39.4/fs/hfsplus/super.c 2011-05-19 00:06:34.000000000 -0400
38222 +++ linux-2.6.39.4/fs/hfsplus/super.c 2011-08-05 19:44:37.000000000 -0400
38223 @@ -340,6 +340,8 @@ static int hfsplus_fill_super(struct sup
38224 struct nls_table *nls = NULL;
38225 int err;
38226
38227 + pax_track_stack();
38228 +
38229 err = -EINVAL;
38230 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
38231 if (!sbi)
38232 diff -urNp linux-2.6.39.4/fs/hugetlbfs/inode.c linux-2.6.39.4/fs/hugetlbfs/inode.c
38233 --- linux-2.6.39.4/fs/hugetlbfs/inode.c 2011-05-19 00:06:34.000000000 -0400
38234 +++ linux-2.6.39.4/fs/hugetlbfs/inode.c 2011-08-05 19:44:37.000000000 -0400
38235 @@ -914,7 +914,7 @@ static struct file_system_type hugetlbfs
38236 .kill_sb = kill_litter_super,
38237 };
38238
38239 -static struct vfsmount *hugetlbfs_vfsmount;
38240 +struct vfsmount *hugetlbfs_vfsmount;
38241
38242 static int can_do_hugetlb_shm(void)
38243 {
38244 diff -urNp linux-2.6.39.4/fs/inode.c linux-2.6.39.4/fs/inode.c
38245 --- linux-2.6.39.4/fs/inode.c 2011-05-19 00:06:34.000000000 -0400
38246 +++ linux-2.6.39.4/fs/inode.c 2011-08-05 19:44:37.000000000 -0400
38247 @@ -862,8 +862,8 @@ unsigned int get_next_ino(void)
38248
38249 #ifdef CONFIG_SMP
38250 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
38251 - static atomic_t shared_last_ino;
38252 - int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
38253 + static atomic_unchecked_t shared_last_ino;
38254 + int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
38255
38256 res = next - LAST_INO_BATCH;
38257 }
38258 diff -urNp linux-2.6.39.4/fs/jbd/checkpoint.c linux-2.6.39.4/fs/jbd/checkpoint.c
38259 --- linux-2.6.39.4/fs/jbd/checkpoint.c 2011-05-19 00:06:34.000000000 -0400
38260 +++ linux-2.6.39.4/fs/jbd/checkpoint.c 2011-08-05 19:44:37.000000000 -0400
38261 @@ -350,6 +350,8 @@ int log_do_checkpoint(journal_t *journal
38262 tid_t this_tid;
38263 int result;
38264
38265 + pax_track_stack();
38266 +
38267 jbd_debug(1, "Start checkpoint\n");
38268
38269 /*
38270 diff -urNp linux-2.6.39.4/fs/jffs2/compr_rtime.c linux-2.6.39.4/fs/jffs2/compr_rtime.c
38271 --- linux-2.6.39.4/fs/jffs2/compr_rtime.c 2011-05-19 00:06:34.000000000 -0400
38272 +++ linux-2.6.39.4/fs/jffs2/compr_rtime.c 2011-08-05 19:44:37.000000000 -0400
38273 @@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
38274 int outpos = 0;
38275 int pos=0;
38276
38277 + pax_track_stack();
38278 +
38279 memset(positions,0,sizeof(positions));
38280
38281 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
38282 @@ -78,6 +80,8 @@ static int jffs2_rtime_decompress(unsign
38283 int outpos = 0;
38284 int pos=0;
38285
38286 + pax_track_stack();
38287 +
38288 memset(positions,0,sizeof(positions));
38289
38290 while (outpos<destlen) {
38291 diff -urNp linux-2.6.39.4/fs/jffs2/compr_rubin.c linux-2.6.39.4/fs/jffs2/compr_rubin.c
38292 --- linux-2.6.39.4/fs/jffs2/compr_rubin.c 2011-05-19 00:06:34.000000000 -0400
38293 +++ linux-2.6.39.4/fs/jffs2/compr_rubin.c 2011-08-05 19:44:37.000000000 -0400
38294 @@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
38295 int ret;
38296 uint32_t mysrclen, mydstlen;
38297
38298 + pax_track_stack();
38299 +
38300 mysrclen = *sourcelen;
38301 mydstlen = *dstlen - 8;
38302
38303 diff -urNp linux-2.6.39.4/fs/jffs2/erase.c linux-2.6.39.4/fs/jffs2/erase.c
38304 --- linux-2.6.39.4/fs/jffs2/erase.c 2011-05-19 00:06:34.000000000 -0400
38305 +++ linux-2.6.39.4/fs/jffs2/erase.c 2011-08-05 19:44:37.000000000 -0400
38306 @@ -439,7 +439,8 @@ static void jffs2_mark_erased_block(stru
38307 struct jffs2_unknown_node marker = {
38308 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
38309 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
38310 - .totlen = cpu_to_je32(c->cleanmarker_size)
38311 + .totlen = cpu_to_je32(c->cleanmarker_size),
38312 + .hdr_crc = cpu_to_je32(0)
38313 };
38314
38315 jffs2_prealloc_raw_node_refs(c, jeb, 1);
38316 diff -urNp linux-2.6.39.4/fs/jffs2/wbuf.c linux-2.6.39.4/fs/jffs2/wbuf.c
38317 --- linux-2.6.39.4/fs/jffs2/wbuf.c 2011-05-19 00:06:34.000000000 -0400
38318 +++ linux-2.6.39.4/fs/jffs2/wbuf.c 2011-08-05 19:44:37.000000000 -0400
38319 @@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
38320 {
38321 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
38322 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
38323 - .totlen = constant_cpu_to_je32(8)
38324 + .totlen = constant_cpu_to_je32(8),
38325 + .hdr_crc = constant_cpu_to_je32(0)
38326 };
38327
38328 /*
38329 diff -urNp linux-2.6.39.4/fs/jffs2/xattr.c linux-2.6.39.4/fs/jffs2/xattr.c
38330 --- linux-2.6.39.4/fs/jffs2/xattr.c 2011-05-19 00:06:34.000000000 -0400
38331 +++ linux-2.6.39.4/fs/jffs2/xattr.c 2011-08-05 19:44:37.000000000 -0400
38332 @@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
38333
38334 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
38335
38336 + pax_track_stack();
38337 +
38338 /* Phase.1 : Merge same xref */
38339 for (i=0; i < XREF_TMPHASH_SIZE; i++)
38340 xref_tmphash[i] = NULL;
38341 diff -urNp linux-2.6.39.4/fs/jfs/super.c linux-2.6.39.4/fs/jfs/super.c
38342 --- linux-2.6.39.4/fs/jfs/super.c 2011-05-19 00:06:34.000000000 -0400
38343 +++ linux-2.6.39.4/fs/jfs/super.c 2011-08-05 19:44:37.000000000 -0400
38344 @@ -803,7 +803,7 @@ static int __init init_jfs_fs(void)
38345
38346 jfs_inode_cachep =
38347 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
38348 - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
38349 + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
38350 init_once);
38351 if (jfs_inode_cachep == NULL)
38352 return -ENOMEM;
38353 diff -urNp linux-2.6.39.4/fs/Kconfig.binfmt linux-2.6.39.4/fs/Kconfig.binfmt
38354 --- linux-2.6.39.4/fs/Kconfig.binfmt 2011-05-19 00:06:34.000000000 -0400
38355 +++ linux-2.6.39.4/fs/Kconfig.binfmt 2011-08-05 19:44:37.000000000 -0400
38356 @@ -86,7 +86,7 @@ config HAVE_AOUT
38357
38358 config BINFMT_AOUT
38359 tristate "Kernel support for a.out and ECOFF binaries"
38360 - depends on HAVE_AOUT
38361 + depends on HAVE_AOUT && BROKEN
38362 ---help---
38363 A.out (Assembler.OUTput) is a set of formats for libraries and
38364 executables used in the earliest versions of UNIX. Linux used
38365 diff -urNp linux-2.6.39.4/fs/libfs.c linux-2.6.39.4/fs/libfs.c
38366 --- linux-2.6.39.4/fs/libfs.c 2011-05-19 00:06:34.000000000 -0400
38367 +++ linux-2.6.39.4/fs/libfs.c 2011-08-05 19:44:37.000000000 -0400
38368 @@ -163,6 +163,9 @@ int dcache_readdir(struct file * filp, v
38369
38370 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
38371 struct dentry *next;
38372 + char d_name[sizeof(next->d_iname)];
38373 + const unsigned char *name;
38374 +
38375 next = list_entry(p, struct dentry, d_u.d_child);
38376 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
38377 if (!simple_positive(next)) {
38378 @@ -172,7 +175,12 @@ int dcache_readdir(struct file * filp, v
38379
38380 spin_unlock(&next->d_lock);
38381 spin_unlock(&dentry->d_lock);
38382 - if (filldir(dirent, next->d_name.name,
38383 + name = next->d_name.name;
38384 + if (name == next->d_iname) {
38385 + memcpy(d_name, name, next->d_name.len);
38386 + name = d_name;
38387 + }
38388 + if (filldir(dirent, name,
38389 next->d_name.len, filp->f_pos,
38390 next->d_inode->i_ino,
38391 dt_type(next->d_inode)) < 0)
38392 diff -urNp linux-2.6.39.4/fs/lockd/clntproc.c linux-2.6.39.4/fs/lockd/clntproc.c
38393 --- linux-2.6.39.4/fs/lockd/clntproc.c 2011-07-09 09:18:51.000000000 -0400
38394 +++ linux-2.6.39.4/fs/lockd/clntproc.c 2011-08-05 19:44:37.000000000 -0400
38395 @@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
38396 /*
38397 * Cookie counter for NLM requests
38398 */
38399 -static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
38400 +static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
38401
38402 void nlmclnt_next_cookie(struct nlm_cookie *c)
38403 {
38404 - u32 cookie = atomic_inc_return(&nlm_cookie);
38405 + u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
38406
38407 memcpy(c->data, &cookie, 4);
38408 c->len=4;
38409 @@ -620,6 +620,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
38410 struct nlm_rqst reqst, *req;
38411 int status;
38412
38413 + pax_track_stack();
38414 +
38415 req = &reqst;
38416 memset(req, 0, sizeof(*req));
38417 locks_init_lock(&req->a_args.lock.fl);
38418 diff -urNp linux-2.6.39.4/fs/locks.c linux-2.6.39.4/fs/locks.c
38419 --- linux-2.6.39.4/fs/locks.c 2011-07-09 09:18:51.000000000 -0400
38420 +++ linux-2.6.39.4/fs/locks.c 2011-08-05 19:44:37.000000000 -0400
38421 @@ -2043,16 +2043,16 @@ void locks_remove_flock(struct file *fil
38422 return;
38423
38424 if (filp->f_op && filp->f_op->flock) {
38425 - struct file_lock fl = {
38426 + struct file_lock flock = {
38427 .fl_pid = current->tgid,
38428 .fl_file = filp,
38429 .fl_flags = FL_FLOCK,
38430 .fl_type = F_UNLCK,
38431 .fl_end = OFFSET_MAX,
38432 };
38433 - filp->f_op->flock(filp, F_SETLKW, &fl);
38434 - if (fl.fl_ops && fl.fl_ops->fl_release_private)
38435 - fl.fl_ops->fl_release_private(&fl);
38436 + filp->f_op->flock(filp, F_SETLKW, &flock);
38437 + if (flock.fl_ops && flock.fl_ops->fl_release_private)
38438 + flock.fl_ops->fl_release_private(&flock);
38439 }
38440
38441 lock_flocks();
38442 diff -urNp linux-2.6.39.4/fs/logfs/super.c linux-2.6.39.4/fs/logfs/super.c
38443 --- linux-2.6.39.4/fs/logfs/super.c 2011-05-19 00:06:34.000000000 -0400
38444 +++ linux-2.6.39.4/fs/logfs/super.c 2011-08-05 19:44:37.000000000 -0400
38445 @@ -266,6 +266,8 @@ static int logfs_recover_sb(struct super
38446 struct logfs_disk_super _ds1, *ds1 = &_ds1;
38447 int err, valid0, valid1;
38448
38449 + pax_track_stack();
38450 +
38451 /* read first superblock */
38452 err = wbuf_read(sb, super->s_sb_ofs[0], sizeof(*ds0), ds0);
38453 if (err)
38454 diff -urNp linux-2.6.39.4/fs/namei.c linux-2.6.39.4/fs/namei.c
38455 --- linux-2.6.39.4/fs/namei.c 2011-08-05 21:11:51.000000000 -0400
38456 +++ linux-2.6.39.4/fs/namei.c 2011-08-05 21:12:20.000000000 -0400
38457 @@ -237,20 +237,30 @@ int generic_permission(struct inode *ino
38458 return ret;
38459
38460 /*
38461 - * Read/write DACs are always overridable.
38462 - * Executable DACs are overridable if at least one exec bit is set.
38463 + * Searching includes executable on directories, else just read.
38464 */
38465 - if (!(mask & MAY_EXEC) || execute_ok(inode))
38466 - if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
38467 + mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
38468 + if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE))) {
38469 +#ifdef CONFIG_GRKERNSEC
38470 + if (flags & IPERM_FLAG_RCU)
38471 + return -ECHILD;
38472 +#endif
38473 + if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
38474 return 0;
38475 + }
38476
38477 /*
38478 - * Searching includes executable on directories, else just read.
38479 + * Read/write DACs are always overridable.
38480 + * Executable DACs are overridable if at least one exec bit is set.
38481 */
38482 - mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
38483 - if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
38484 - if (ns_capable(inode_userns(inode), CAP_DAC_READ_SEARCH))
38485 + if (!(mask & MAY_EXEC) || execute_ok(inode)) {
38486 +#ifdef CONFIG_GRKERNSEC
38487 + if (flags & IPERM_FLAG_RCU)
38488 + return -ECHILD;
38489 +#endif
38490 + if (ns_capable(inode_userns(inode), CAP_DAC_OVERRIDE))
38491 return 0;
38492 + }
38493
38494 return -EACCES;
38495 }
38496 @@ -626,6 +636,9 @@ static inline int handle_reval_path(stru
38497 struct dentry *dentry = nd->path.dentry;
38498 int status;
38499
38500 + if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
38501 + return -ENOENT;
38502 +
38503 if (likely(!(nd->flags & LOOKUP_JUMPED)))
38504 return 0;
38505
38506 @@ -671,9 +684,16 @@ static inline int exec_permission(struct
38507 if (ret == -ECHILD)
38508 return ret;
38509
38510 - if (ns_capable(ns, CAP_DAC_OVERRIDE) ||
38511 - ns_capable(ns, CAP_DAC_READ_SEARCH))
38512 + if (ns_capable_nolog(ns, CAP_DAC_OVERRIDE))
38513 goto ok;
38514 + else {
38515 +#ifdef CONFIG_GRKERNSEC
38516 + if (flags & IPERM_FLAG_RCU)
38517 + return -ECHILD;
38518 +#endif
38519 + if (ns_capable(ns, CAP_DAC_READ_SEARCH) || ns_capable(ns, CAP_DAC_OVERRIDE))
38520 + goto ok;
38521 + }
38522
38523 return ret;
38524 ok:
38525 @@ -781,11 +801,19 @@ follow_link(struct path *link, struct na
38526 return error;
38527 }
38528
38529 + if (gr_handle_follow_link(dentry->d_parent->d_inode,
38530 + dentry->d_inode, dentry, nd->path.mnt)) {
38531 + error = -EACCES;
38532 + *p = ERR_PTR(error); /* no ->put_link(), please */
38533 + path_put(&nd->path);
38534 + return error;
38535 + }
38536 +
38537 nd->last_type = LAST_BIND;
38538 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
38539 error = PTR_ERR(*p);
38540 if (!IS_ERR(*p)) {
38541 - char *s = nd_get_link(nd);
38542 + const char *s = nd_get_link(nd);
38543 error = 0;
38544 if (s)
38545 error = __vfs_follow_link(nd, s);
38546 @@ -1702,6 +1730,9 @@ static int do_path_lookup(int dfd, const
38547 retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
38548
38549 if (likely(!retval)) {
38550 + if (*name != '/' && nd->path.dentry && nd->inode && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
38551 + return -ENOENT;
38552 +
38553 if (unlikely(!audit_dummy_context())) {
38554 if (nd->path.dentry && nd->inode)
38555 audit_inode(name, nd->path.dentry);
38556 @@ -2012,6 +2043,30 @@ int vfs_create(struct inode *dir, struct
38557 return error;
38558 }
38559
38560 +/*
38561 + * Note that while the flag value (low two bits) for sys_open means:
38562 + * 00 - read-only
38563 + * 01 - write-only
38564 + * 10 - read-write
38565 + * 11 - special
38566 + * it is changed into
38567 + * 00 - no permissions needed
38568 + * 01 - read-permission
38569 + * 10 - write-permission
38570 + * 11 - read-write
38571 + * for the internal routines (ie open_namei()/follow_link() etc)
38572 + * This is more logical, and also allows the 00 "no perm needed"
38573 + * to be used for symlinks (where the permissions are checked
38574 + * later).
38575 + *
38576 +*/
38577 +static inline int open_to_namei_flags(int flag)
38578 +{
38579 + if ((flag+1) & O_ACCMODE)
38580 + flag++;
38581 + return flag;
38582 +}
38583 +
38584 static int may_open(struct path *path, int acc_mode, int flag)
38585 {
38586 struct dentry *dentry = path->dentry;
38587 @@ -2064,7 +2119,27 @@ static int may_open(struct path *path, i
38588 /*
38589 * Ensure there are no outstanding leases on the file.
38590 */
38591 - return break_lease(inode, flag);
38592 + error = break_lease(inode, flag);
38593 +
38594 + if (error)
38595 + return error;
38596 +
38597 + if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
38598 + error = -EPERM;
38599 + goto exit;
38600 + }
38601 +
38602 + if (gr_handle_rawio(inode)) {
38603 + error = -EPERM;
38604 + goto exit;
38605 + }
38606 +
38607 + if (!gr_acl_handle_open(dentry, path->mnt, open_to_namei_flags(flag))) {
38608 + error = -EACCES;
38609 + goto exit;
38610 + }
38611 +exit:
38612 + return error;
38613 }
38614
38615 static int handle_truncate(struct file *filp)
38616 @@ -2090,30 +2165,6 @@ static int handle_truncate(struct file *
38617 }
38618
38619 /*
38620 - * Note that while the flag value (low two bits) for sys_open means:
38621 - * 00 - read-only
38622 - * 01 - write-only
38623 - * 10 - read-write
38624 - * 11 - special
38625 - * it is changed into
38626 - * 00 - no permissions needed
38627 - * 01 - read-permission
38628 - * 10 - write-permission
38629 - * 11 - read-write
38630 - * for the internal routines (ie open_namei()/follow_link() etc)
38631 - * This is more logical, and also allows the 00 "no perm needed"
38632 - * to be used for symlinks (where the permissions are checked
38633 - * later).
38634 - *
38635 -*/
38636 -static inline int open_to_namei_flags(int flag)
38637 -{
38638 - if ((flag+1) & O_ACCMODE)
38639 - flag++;
38640 - return flag;
38641 -}
38642 -
38643 -/*
38644 * Handle the last step of open()
38645 */
38646 static struct file *do_last(struct nameidata *nd, struct path *path,
38647 @@ -2122,6 +2173,7 @@ static struct file *do_last(struct namei
38648 struct dentry *dir = nd->path.dentry;
38649 struct dentry *dentry;
38650 int open_flag = op->open_flag;
38651 + int flag = open_to_namei_flags(open_flag);
38652 int will_truncate = open_flag & O_TRUNC;
38653 int want_write = 0;
38654 int acc_mode = op->acc_mode;
38655 @@ -2217,6 +2269,12 @@ static struct file *do_last(struct namei
38656 /* Negative dentry, just create the file */
38657 if (!dentry->d_inode) {
38658 int mode = op->mode;
38659 +
38660 + if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, path->mnt, flag, mode)) {
38661 + error = -EACCES;
38662 + goto exit_mutex_unlock;
38663 + }
38664 +
38665 if (!IS_POSIXACL(dir->d_inode))
38666 mode &= ~current_umask();
38667 /*
38668 @@ -2240,6 +2298,8 @@ static struct file *do_last(struct namei
38669 error = vfs_create(dir->d_inode, dentry, mode, nd);
38670 if (error)
38671 goto exit_mutex_unlock;
38672 + else
38673 + gr_handle_create(path->dentry, path->mnt);
38674 mutex_unlock(&dir->d_inode->i_mutex);
38675 dput(nd->path.dentry);
38676 nd->path.dentry = dentry;
38677 @@ -2249,6 +2309,14 @@ static struct file *do_last(struct namei
38678 /*
38679 * It already exists.
38680 */
38681 +
38682 + /* only check if O_CREAT is specified, all other checks need to go
38683 + into may_open */
38684 + if (gr_handle_fifo(path->dentry, path->mnt, dir, flag, acc_mode)) {
38685 + error = -EACCES;
38686 + goto exit_mutex_unlock;
38687 + }
38688 +
38689 mutex_unlock(&dir->d_inode->i_mutex);
38690 audit_inode(pathname, path->dentry);
38691
38692 @@ -2535,6 +2603,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
38693 error = may_mknod(mode);
38694 if (error)
38695 goto out_dput;
38696 +
38697 + if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
38698 + error = -EPERM;
38699 + goto out_dput;
38700 + }
38701 +
38702 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
38703 + error = -EACCES;
38704 + goto out_dput;
38705 + }
38706 +
38707 error = mnt_want_write(nd.path.mnt);
38708 if (error)
38709 goto out_dput;
38710 @@ -2555,6 +2634,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
38711 }
38712 out_drop_write:
38713 mnt_drop_write(nd.path.mnt);
38714 +
38715 + if (!error)
38716 + gr_handle_create(dentry, nd.path.mnt);
38717 out_dput:
38718 dput(dentry);
38719 out_unlock:
38720 @@ -2607,6 +2689,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
38721 if (IS_ERR(dentry))
38722 goto out_unlock;
38723
38724 + if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
38725 + error = -EACCES;
38726 + goto out_dput;
38727 + }
38728 +
38729 if (!IS_POSIXACL(nd.path.dentry->d_inode))
38730 mode &= ~current_umask();
38731 error = mnt_want_write(nd.path.mnt);
38732 @@ -2618,6 +2705,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
38733 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
38734 out_drop_write:
38735 mnt_drop_write(nd.path.mnt);
38736 +
38737 + if (!error)
38738 + gr_handle_create(dentry, nd.path.mnt);
38739 +
38740 out_dput:
38741 dput(dentry);
38742 out_unlock:
38743 @@ -2697,6 +2788,8 @@ static long do_rmdir(int dfd, const char
38744 char * name;
38745 struct dentry *dentry;
38746 struct nameidata nd;
38747 + ino_t saved_ino = 0;
38748 + dev_t saved_dev = 0;
38749
38750 error = user_path_parent(dfd, pathname, &nd, &name);
38751 if (error)
38752 @@ -2721,6 +2814,19 @@ static long do_rmdir(int dfd, const char
38753 error = PTR_ERR(dentry);
38754 if (IS_ERR(dentry))
38755 goto exit2;
38756 +
38757 + if (dentry->d_inode != NULL) {
38758 + if (dentry->d_inode->i_nlink <= 1) {
38759 + saved_ino = dentry->d_inode->i_ino;
38760 + saved_dev = gr_get_dev_from_dentry(dentry);
38761 + }
38762 +
38763 + if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
38764 + error = -EACCES;
38765 + goto exit3;
38766 + }
38767 + }
38768 +
38769 error = mnt_want_write(nd.path.mnt);
38770 if (error)
38771 goto exit3;
38772 @@ -2728,6 +2834,8 @@ static long do_rmdir(int dfd, const char
38773 if (error)
38774 goto exit4;
38775 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
38776 + if (!error && (saved_dev || saved_ino))
38777 + gr_handle_delete(saved_ino, saved_dev);
38778 exit4:
38779 mnt_drop_write(nd.path.mnt);
38780 exit3:
38781 @@ -2790,6 +2898,8 @@ static long do_unlinkat(int dfd, const c
38782 struct dentry *dentry;
38783 struct nameidata nd;
38784 struct inode *inode = NULL;
38785 + ino_t saved_ino = 0;
38786 + dev_t saved_dev = 0;
38787
38788 error = user_path_parent(dfd, pathname, &nd, &name);
38789 if (error)
38790 @@ -2809,8 +2919,17 @@ static long do_unlinkat(int dfd, const c
38791 if (nd.last.name[nd.last.len])
38792 goto slashes;
38793 inode = dentry->d_inode;
38794 - if (inode)
38795 + if (inode) {
38796 ihold(inode);
38797 + if (inode->i_nlink <= 1) {
38798 + saved_ino = inode->i_ino;
38799 + saved_dev = gr_get_dev_from_dentry(dentry);
38800 + }
38801 + if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
38802 + error = -EACCES;
38803 + goto exit2;
38804 + }
38805 + }
38806 error = mnt_want_write(nd.path.mnt);
38807 if (error)
38808 goto exit2;
38809 @@ -2818,6 +2937,8 @@ static long do_unlinkat(int dfd, const c
38810 if (error)
38811 goto exit3;
38812 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
38813 + if (!error && (saved_ino || saved_dev))
38814 + gr_handle_delete(saved_ino, saved_dev);
38815 exit3:
38816 mnt_drop_write(nd.path.mnt);
38817 exit2:
38818 @@ -2895,6 +3016,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
38819 if (IS_ERR(dentry))
38820 goto out_unlock;
38821
38822 + if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
38823 + error = -EACCES;
38824 + goto out_dput;
38825 + }
38826 +
38827 error = mnt_want_write(nd.path.mnt);
38828 if (error)
38829 goto out_dput;
38830 @@ -2902,6 +3028,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
38831 if (error)
38832 goto out_drop_write;
38833 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
38834 + if (!error)
38835 + gr_handle_create(dentry, nd.path.mnt);
38836 out_drop_write:
38837 mnt_drop_write(nd.path.mnt);
38838 out_dput:
38839 @@ -3010,6 +3138,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
38840 error = PTR_ERR(new_dentry);
38841 if (IS_ERR(new_dentry))
38842 goto out_unlock;
38843 +
38844 + if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
38845 + old_path.dentry->d_inode,
38846 + old_path.dentry->d_inode->i_mode, to)) {
38847 + error = -EACCES;
38848 + goto out_dput;
38849 + }
38850 +
38851 + if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
38852 + old_path.dentry, old_path.mnt, to)) {
38853 + error = -EACCES;
38854 + goto out_dput;
38855 + }
38856 +
38857 error = mnt_want_write(nd.path.mnt);
38858 if (error)
38859 goto out_dput;
38860 @@ -3017,6 +3159,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
38861 if (error)
38862 goto out_drop_write;
38863 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
38864 + if (!error)
38865 + gr_handle_create(new_dentry, nd.path.mnt);
38866 out_drop_write:
38867 mnt_drop_write(nd.path.mnt);
38868 out_dput:
38869 @@ -3194,6 +3338,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38870 char *to;
38871 int error;
38872
38873 + pax_track_stack();
38874 +
38875 error = user_path_parent(olddfd, oldname, &oldnd, &from);
38876 if (error)
38877 goto exit;
38878 @@ -3250,6 +3396,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38879 if (new_dentry == trap)
38880 goto exit5;
38881
38882 + error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
38883 + old_dentry, old_dir->d_inode, oldnd.path.mnt,
38884 + to);
38885 + if (error)
38886 + goto exit5;
38887 +
38888 error = mnt_want_write(oldnd.path.mnt);
38889 if (error)
38890 goto exit5;
38891 @@ -3259,6 +3411,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
38892 goto exit6;
38893 error = vfs_rename(old_dir->d_inode, old_dentry,
38894 new_dir->d_inode, new_dentry);
38895 + if (!error)
38896 + gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
38897 + new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
38898 exit6:
38899 mnt_drop_write(oldnd.path.mnt);
38900 exit5:
38901 @@ -3284,6 +3439,8 @@ SYSCALL_DEFINE2(rename, const char __use
38902
38903 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
38904 {
38905 + char tmpbuf[64];
38906 + const char *newlink;
38907 int len;
38908
38909 len = PTR_ERR(link);
38910 @@ -3293,7 +3450,14 @@ int vfs_readlink(struct dentry *dentry,
38911 len = strlen(link);
38912 if (len > (unsigned) buflen)
38913 len = buflen;
38914 - if (copy_to_user(buffer, link, len))
38915 +
38916 + if (len < sizeof(tmpbuf)) {
38917 + memcpy(tmpbuf, link, len);
38918 + newlink = tmpbuf;
38919 + } else
38920 + newlink = link;
38921 +
38922 + if (copy_to_user(buffer, newlink, len))
38923 len = -EFAULT;
38924 out:
38925 return len;
38926 diff -urNp linux-2.6.39.4/fs/namespace.c linux-2.6.39.4/fs/namespace.c
38927 --- linux-2.6.39.4/fs/namespace.c 2011-05-19 00:06:34.000000000 -0400
38928 +++ linux-2.6.39.4/fs/namespace.c 2011-08-05 19:44:37.000000000 -0400
38929 @@ -1328,6 +1328,9 @@ static int do_umount(struct vfsmount *mn
38930 if (!(sb->s_flags & MS_RDONLY))
38931 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
38932 up_write(&sb->s_umount);
38933 +
38934 + gr_log_remount(mnt->mnt_devname, retval);
38935 +
38936 return retval;
38937 }
38938
38939 @@ -1347,6 +1350,9 @@ static int do_umount(struct vfsmount *mn
38940 br_write_unlock(vfsmount_lock);
38941 up_write(&namespace_sem);
38942 release_mounts(&umount_list);
38943 +
38944 + gr_log_unmount(mnt->mnt_devname, retval);
38945 +
38946 return retval;
38947 }
38948
38949 @@ -2338,6 +2344,16 @@ long do_mount(char *dev_name, char *dir_
38950 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
38951 MS_STRICTATIME);
38952
38953 + if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
38954 + retval = -EPERM;
38955 + goto dput_out;
38956 + }
38957 +
38958 + if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
38959 + retval = -EPERM;
38960 + goto dput_out;
38961 + }
38962 +
38963 if (flags & MS_REMOUNT)
38964 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
38965 data_page);
38966 @@ -2352,6 +2368,9 @@ long do_mount(char *dev_name, char *dir_
38967 dev_name, data_page);
38968 dput_out:
38969 path_put(&path);
38970 +
38971 + gr_log_mount(dev_name, dir_name, retval);
38972 +
38973 return retval;
38974 }
38975
38976 @@ -2575,6 +2594,11 @@ SYSCALL_DEFINE2(pivot_root, const char _
38977 if (error)
38978 goto out2;
38979
38980 + if (gr_handle_chroot_pivot()) {
38981 + error = -EPERM;
38982 + goto out2;
38983 + }
38984 +
38985 get_fs_root(current->fs, &root);
38986 error = lock_mount(&old);
38987 if (error)
38988 diff -urNp linux-2.6.39.4/fs/ncpfs/dir.c linux-2.6.39.4/fs/ncpfs/dir.c
38989 --- linux-2.6.39.4/fs/ncpfs/dir.c 2011-05-19 00:06:34.000000000 -0400
38990 +++ linux-2.6.39.4/fs/ncpfs/dir.c 2011-08-05 19:44:37.000000000 -0400
38991 @@ -299,6 +299,8 @@ ncp_lookup_validate(struct dentry *dentr
38992 int res, val = 0, len;
38993 __u8 __name[NCP_MAXPATHLEN + 1];
38994
38995 + pax_track_stack();
38996 +
38997 if (dentry == dentry->d_sb->s_root)
38998 return 1;
38999
39000 @@ -844,6 +846,8 @@ static struct dentry *ncp_lookup(struct
39001 int error, res, len;
39002 __u8 __name[NCP_MAXPATHLEN + 1];
39003
39004 + pax_track_stack();
39005 +
39006 error = -EIO;
39007 if (!ncp_conn_valid(server))
39008 goto finished;
39009 @@ -931,6 +935,8 @@ int ncp_create_new(struct inode *dir, st
39010 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
39011 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
39012
39013 + pax_track_stack();
39014 +
39015 ncp_age_dentry(server, dentry);
39016 len = sizeof(__name);
39017 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
39018 @@ -992,6 +998,8 @@ static int ncp_mkdir(struct inode *dir,
39019 int error, len;
39020 __u8 __name[NCP_MAXPATHLEN + 1];
39021
39022 + pax_track_stack();
39023 +
39024 DPRINTK("ncp_mkdir: making %s/%s\n",
39025 dentry->d_parent->d_name.name, dentry->d_name.name);
39026
39027 @@ -1135,6 +1143,8 @@ static int ncp_rename(struct inode *old_
39028 int old_len, new_len;
39029 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
39030
39031 + pax_track_stack();
39032 +
39033 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
39034 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
39035 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
39036 diff -urNp linux-2.6.39.4/fs/ncpfs/inode.c linux-2.6.39.4/fs/ncpfs/inode.c
39037 --- linux-2.6.39.4/fs/ncpfs/inode.c 2011-05-19 00:06:34.000000000 -0400
39038 +++ linux-2.6.39.4/fs/ncpfs/inode.c 2011-08-05 19:44:37.000000000 -0400
39039 @@ -461,6 +461,8 @@ static int ncp_fill_super(struct super_b
39040 #endif
39041 struct ncp_entry_info finfo;
39042
39043 + pax_track_stack();
39044 +
39045 data.wdog_pid = NULL;
39046 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
39047 if (!server)
39048 diff -urNp linux-2.6.39.4/fs/nfs/inode.c linux-2.6.39.4/fs/nfs/inode.c
39049 --- linux-2.6.39.4/fs/nfs/inode.c 2011-07-09 09:18:51.000000000 -0400
39050 +++ linux-2.6.39.4/fs/nfs/inode.c 2011-08-05 19:44:37.000000000 -0400
39051 @@ -150,7 +150,7 @@ static void nfs_zap_caches_locked(struct
39052 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
39053 nfsi->attrtimeo_timestamp = jiffies;
39054
39055 - memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
39056 + memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
39057 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
39058 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
39059 else
39060 @@ -1000,16 +1000,16 @@ static int nfs_size_need_update(const st
39061 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
39062 }
39063
39064 -static atomic_long_t nfs_attr_generation_counter;
39065 +static atomic_long_unchecked_t nfs_attr_generation_counter;
39066
39067 static unsigned long nfs_read_attr_generation_counter(void)
39068 {
39069 - return atomic_long_read(&nfs_attr_generation_counter);
39070 + return atomic_long_read_unchecked(&nfs_attr_generation_counter);
39071 }
39072
39073 unsigned long nfs_inc_attr_generation_counter(void)
39074 {
39075 - return atomic_long_inc_return(&nfs_attr_generation_counter);
39076 + return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
39077 }
39078
39079 void nfs_fattr_init(struct nfs_fattr *fattr)
39080 diff -urNp linux-2.6.39.4/fs/nfsd/nfs4state.c linux-2.6.39.4/fs/nfsd/nfs4state.c
39081 --- linux-2.6.39.4/fs/nfsd/nfs4state.c 2011-05-19 00:06:34.000000000 -0400
39082 +++ linux-2.6.39.4/fs/nfsd/nfs4state.c 2011-08-05 19:44:37.000000000 -0400
39083 @@ -3784,6 +3784,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
39084 unsigned int strhashval;
39085 int err;
39086
39087 + pax_track_stack();
39088 +
39089 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
39090 (long long) lock->lk_offset,
39091 (long long) lock->lk_length);
39092 diff -urNp linux-2.6.39.4/fs/nfsd/nfs4xdr.c linux-2.6.39.4/fs/nfsd/nfs4xdr.c
39093 --- linux-2.6.39.4/fs/nfsd/nfs4xdr.c 2011-05-19 00:06:34.000000000 -0400
39094 +++ linux-2.6.39.4/fs/nfsd/nfs4xdr.c 2011-08-05 19:44:37.000000000 -0400
39095 @@ -1793,6 +1793,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
39096 .dentry = dentry,
39097 };
39098
39099 + pax_track_stack();
39100 +
39101 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
39102 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
39103 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
39104 diff -urNp linux-2.6.39.4/fs/nfsd/vfs.c linux-2.6.39.4/fs/nfsd/vfs.c
39105 --- linux-2.6.39.4/fs/nfsd/vfs.c 2011-07-09 09:18:51.000000000 -0400
39106 +++ linux-2.6.39.4/fs/nfsd/vfs.c 2011-08-05 19:44:37.000000000 -0400
39107 @@ -901,7 +901,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
39108 } else {
39109 oldfs = get_fs();
39110 set_fs(KERNEL_DS);
39111 - host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
39112 + host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
39113 set_fs(oldfs);
39114 }
39115
39116 @@ -1005,7 +1005,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
39117
39118 /* Write the data. */
39119 oldfs = get_fs(); set_fs(KERNEL_DS);
39120 - host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
39121 + host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
39122 set_fs(oldfs);
39123 if (host_err < 0)
39124 goto out_nfserr;
39125 @@ -1528,7 +1528,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
39126 */
39127
39128 oldfs = get_fs(); set_fs(KERNEL_DS);
39129 - host_err = inode->i_op->readlink(dentry, buf, *lenp);
39130 + host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
39131 set_fs(oldfs);
39132
39133 if (host_err < 0)
39134 diff -urNp linux-2.6.39.4/fs/notify/fanotify/fanotify_user.c linux-2.6.39.4/fs/notify/fanotify/fanotify_user.c
39135 --- linux-2.6.39.4/fs/notify/fanotify/fanotify_user.c 2011-05-19 00:06:34.000000000 -0400
39136 +++ linux-2.6.39.4/fs/notify/fanotify/fanotify_user.c 2011-08-14 11:28:46.000000000 -0400
39137 @@ -276,7 +276,8 @@ static ssize_t copy_event_to_user(struct
39138 goto out_close_fd;
39139
39140 ret = -EFAULT;
39141 - if (copy_to_user(buf, &fanotify_event_metadata,
39142 + if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
39143 + copy_to_user(buf, &fanotify_event_metadata,
39144 fanotify_event_metadata.event_len))
39145 goto out_kill_access_response;
39146
39147 diff -urNp linux-2.6.39.4/fs/notify/notification.c linux-2.6.39.4/fs/notify/notification.c
39148 --- linux-2.6.39.4/fs/notify/notification.c 2011-05-19 00:06:34.000000000 -0400
39149 +++ linux-2.6.39.4/fs/notify/notification.c 2011-08-05 19:44:37.000000000 -0400
39150 @@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
39151 * get set to 0 so it will never get 'freed'
39152 */
39153 static struct fsnotify_event *q_overflow_event;
39154 -static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
39155 +static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
39156
39157 /**
39158 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
39159 @@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
39160 */
39161 u32 fsnotify_get_cookie(void)
39162 {
39163 - return atomic_inc_return(&fsnotify_sync_cookie);
39164 + return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
39165 }
39166 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
39167
39168 diff -urNp linux-2.6.39.4/fs/ntfs/dir.c linux-2.6.39.4/fs/ntfs/dir.c
39169 --- linux-2.6.39.4/fs/ntfs/dir.c 2011-05-19 00:06:34.000000000 -0400
39170 +++ linux-2.6.39.4/fs/ntfs/dir.c 2011-08-05 19:44:37.000000000 -0400
39171 @@ -1329,7 +1329,7 @@ find_next_index_buffer:
39172 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
39173 ~(s64)(ndir->itype.index.block_size - 1)));
39174 /* Bounds checks. */
39175 - if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
39176 + if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
39177 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
39178 "inode 0x%lx or driver bug.", vdir->i_ino);
39179 goto err_out;
39180 diff -urNp linux-2.6.39.4/fs/ntfs/file.c linux-2.6.39.4/fs/ntfs/file.c
39181 --- linux-2.6.39.4/fs/ntfs/file.c 2011-05-19 00:06:34.000000000 -0400
39182 +++ linux-2.6.39.4/fs/ntfs/file.c 2011-08-05 19:44:37.000000000 -0400
39183 @@ -2222,6 +2222,6 @@ const struct inode_operations ntfs_file_
39184 #endif /* NTFS_RW */
39185 };
39186
39187 -const struct file_operations ntfs_empty_file_ops = {};
39188 +const struct file_operations ntfs_empty_file_ops __read_only;
39189
39190 -const struct inode_operations ntfs_empty_inode_ops = {};
39191 +const struct inode_operations ntfs_empty_inode_ops __read_only;
39192 diff -urNp linux-2.6.39.4/fs/ocfs2/localalloc.c linux-2.6.39.4/fs/ocfs2/localalloc.c
39193 --- linux-2.6.39.4/fs/ocfs2/localalloc.c 2011-05-19 00:06:34.000000000 -0400
39194 +++ linux-2.6.39.4/fs/ocfs2/localalloc.c 2011-08-05 19:44:37.000000000 -0400
39195 @@ -1283,7 +1283,7 @@ static int ocfs2_local_alloc_slide_windo
39196 goto bail;
39197 }
39198
39199 - atomic_inc(&osb->alloc_stats.moves);
39200 + atomic_inc_unchecked(&osb->alloc_stats.moves);
39201
39202 bail:
39203 if (handle)
39204 diff -urNp linux-2.6.39.4/fs/ocfs2/namei.c linux-2.6.39.4/fs/ocfs2/namei.c
39205 --- linux-2.6.39.4/fs/ocfs2/namei.c 2011-05-19 00:06:34.000000000 -0400
39206 +++ linux-2.6.39.4/fs/ocfs2/namei.c 2011-08-05 19:44:37.000000000 -0400
39207 @@ -1063,6 +1063,8 @@ static int ocfs2_rename(struct inode *ol
39208 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
39209 struct ocfs2_dir_lookup_result target_insert = { NULL, };
39210
39211 + pax_track_stack();
39212 +
39213 /* At some point it might be nice to break this function up a
39214 * bit. */
39215
39216 diff -urNp linux-2.6.39.4/fs/ocfs2/ocfs2.h linux-2.6.39.4/fs/ocfs2/ocfs2.h
39217 --- linux-2.6.39.4/fs/ocfs2/ocfs2.h 2011-05-19 00:06:34.000000000 -0400
39218 +++ linux-2.6.39.4/fs/ocfs2/ocfs2.h 2011-08-05 19:44:37.000000000 -0400
39219 @@ -235,11 +235,11 @@ enum ocfs2_vol_state
39220
39221 struct ocfs2_alloc_stats
39222 {
39223 - atomic_t moves;
39224 - atomic_t local_data;
39225 - atomic_t bitmap_data;
39226 - atomic_t bg_allocs;
39227 - atomic_t bg_extends;
39228 + atomic_unchecked_t moves;
39229 + atomic_unchecked_t local_data;
39230 + atomic_unchecked_t bitmap_data;
39231 + atomic_unchecked_t bg_allocs;
39232 + atomic_unchecked_t bg_extends;
39233 };
39234
39235 enum ocfs2_local_alloc_state
39236 diff -urNp linux-2.6.39.4/fs/ocfs2/suballoc.c linux-2.6.39.4/fs/ocfs2/suballoc.c
39237 --- linux-2.6.39.4/fs/ocfs2/suballoc.c 2011-05-19 00:06:34.000000000 -0400
39238 +++ linux-2.6.39.4/fs/ocfs2/suballoc.c 2011-08-05 19:44:37.000000000 -0400
39239 @@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(s
39240 mlog_errno(status);
39241 goto bail;
39242 }
39243 - atomic_inc(&osb->alloc_stats.bg_extends);
39244 + atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
39245
39246 /* You should never ask for this much metadata */
39247 BUG_ON(bits_wanted >
39248 @@ -2008,7 +2008,7 @@ int ocfs2_claim_metadata(handle_t *handl
39249 mlog_errno(status);
39250 goto bail;
39251 }
39252 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39253 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39254
39255 *suballoc_loc = res.sr_bg_blkno;
39256 *suballoc_bit_start = res.sr_bit_offset;
39257 @@ -2172,7 +2172,7 @@ int ocfs2_claim_new_inode_at_loc(handle_
39258 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
39259 res->sr_bits);
39260
39261 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39262 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39263
39264 BUG_ON(res->sr_bits != 1);
39265
39266 @@ -2214,7 +2214,7 @@ int ocfs2_claim_new_inode(handle_t *hand
39267 mlog_errno(status);
39268 goto bail;
39269 }
39270 - atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39271 + atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
39272
39273 BUG_ON(res.sr_bits != 1);
39274
39275 @@ -2318,7 +2318,7 @@ int __ocfs2_claim_clusters(handle_t *han
39276 cluster_start,
39277 num_clusters);
39278 if (!status)
39279 - atomic_inc(&osb->alloc_stats.local_data);
39280 + atomic_inc_unchecked(&osb->alloc_stats.local_data);
39281 } else {
39282 if (min_clusters > (osb->bitmap_cpg - 1)) {
39283 /* The only paths asking for contiguousness
39284 @@ -2344,7 +2344,7 @@ int __ocfs2_claim_clusters(handle_t *han
39285 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
39286 res.sr_bg_blkno,
39287 res.sr_bit_offset);
39288 - atomic_inc(&osb->alloc_stats.bitmap_data);
39289 + atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
39290 *num_clusters = res.sr_bits;
39291 }
39292 }
39293 diff -urNp linux-2.6.39.4/fs/ocfs2/super.c linux-2.6.39.4/fs/ocfs2/super.c
39294 --- linux-2.6.39.4/fs/ocfs2/super.c 2011-05-19 00:06:34.000000000 -0400
39295 +++ linux-2.6.39.4/fs/ocfs2/super.c 2011-08-05 19:44:37.000000000 -0400
39296 @@ -299,11 +299,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
39297 "%10s => GlobalAllocs: %d LocalAllocs: %d "
39298 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
39299 "Stats",
39300 - atomic_read(&osb->alloc_stats.bitmap_data),
39301 - atomic_read(&osb->alloc_stats.local_data),
39302 - atomic_read(&osb->alloc_stats.bg_allocs),
39303 - atomic_read(&osb->alloc_stats.moves),
39304 - atomic_read(&osb->alloc_stats.bg_extends));
39305 + atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
39306 + atomic_read_unchecked(&osb->alloc_stats.local_data),
39307 + atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
39308 + atomic_read_unchecked(&osb->alloc_stats.moves),
39309 + atomic_read_unchecked(&osb->alloc_stats.bg_extends));
39310
39311 out += snprintf(buf + out, len - out,
39312 "%10s => State: %u Descriptor: %llu Size: %u bits "
39313 @@ -2111,11 +2111,11 @@ static int ocfs2_initialize_super(struct
39314 spin_lock_init(&osb->osb_xattr_lock);
39315 ocfs2_init_steal_slots(osb);
39316
39317 - atomic_set(&osb->alloc_stats.moves, 0);
39318 - atomic_set(&osb->alloc_stats.local_data, 0);
39319 - atomic_set(&osb->alloc_stats.bitmap_data, 0);
39320 - atomic_set(&osb->alloc_stats.bg_allocs, 0);
39321 - atomic_set(&osb->alloc_stats.bg_extends, 0);
39322 + atomic_set_unchecked(&osb->alloc_stats.moves, 0);
39323 + atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
39324 + atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
39325 + atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
39326 + atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
39327
39328 /* Copy the blockcheck stats from the superblock probe */
39329 osb->osb_ecc_stats = *stats;
39330 diff -urNp linux-2.6.39.4/fs/ocfs2/symlink.c linux-2.6.39.4/fs/ocfs2/symlink.c
39331 --- linux-2.6.39.4/fs/ocfs2/symlink.c 2011-05-19 00:06:34.000000000 -0400
39332 +++ linux-2.6.39.4/fs/ocfs2/symlink.c 2011-08-05 19:44:37.000000000 -0400
39333 @@ -142,7 +142,7 @@ bail:
39334
39335 static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
39336 {
39337 - char *link = nd_get_link(nd);
39338 + const char *link = nd_get_link(nd);
39339 if (!IS_ERR(link))
39340 kfree(link);
39341 }
39342 diff -urNp linux-2.6.39.4/fs/open.c linux-2.6.39.4/fs/open.c
39343 --- linux-2.6.39.4/fs/open.c 2011-05-19 00:06:34.000000000 -0400
39344 +++ linux-2.6.39.4/fs/open.c 2011-08-05 19:44:37.000000000 -0400
39345 @@ -112,6 +112,10 @@ static long do_sys_truncate(const char _
39346 error = locks_verify_truncate(inode, NULL, length);
39347 if (!error)
39348 error = security_path_truncate(&path);
39349 +
39350 + if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
39351 + error = -EACCES;
39352 +
39353 if (!error)
39354 error = do_truncate(path.dentry, length, 0, NULL);
39355
39356 @@ -358,6 +362,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
39357 if (__mnt_is_readonly(path.mnt))
39358 res = -EROFS;
39359
39360 + if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
39361 + res = -EACCES;
39362 +
39363 out_path_release:
39364 path_put(&path);
39365 out:
39366 @@ -384,6 +391,8 @@ SYSCALL_DEFINE1(chdir, const char __user
39367 if (error)
39368 goto dput_and_out;
39369
39370 + gr_log_chdir(path.dentry, path.mnt);
39371 +
39372 set_fs_pwd(current->fs, &path);
39373
39374 dput_and_out:
39375 @@ -410,6 +419,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
39376 goto out_putf;
39377
39378 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
39379 +
39380 + if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
39381 + error = -EPERM;
39382 +
39383 + if (!error)
39384 + gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
39385 +
39386 if (!error)
39387 set_fs_pwd(current->fs, &file->f_path);
39388 out_putf:
39389 @@ -438,7 +454,18 @@ SYSCALL_DEFINE1(chroot, const char __use
39390 if (error)
39391 goto dput_and_out;
39392
39393 + if (gr_handle_chroot_chroot(path.dentry, path.mnt))
39394 + goto dput_and_out;
39395 +
39396 + if (gr_handle_chroot_caps(&path)) {
39397 + error = -ENOMEM;
39398 + goto dput_and_out;
39399 + }
39400 +
39401 set_fs_root(current->fs, &path);
39402 +
39403 + gr_handle_chroot_chdir(&path);
39404 +
39405 error = 0;
39406 dput_and_out:
39407 path_put(&path);
39408 @@ -466,12 +493,25 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
39409 err = mnt_want_write_file(file);
39410 if (err)
39411 goto out_putf;
39412 +
39413 mutex_lock(&inode->i_mutex);
39414 +
39415 + if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
39416 + err = -EACCES;
39417 + goto out_unlock;
39418 + }
39419 +
39420 err = security_path_chmod(dentry, file->f_vfsmnt, mode);
39421 if (err)
39422 goto out_unlock;
39423 if (mode == (mode_t) -1)
39424 mode = inode->i_mode;
39425 +
39426 + if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
39427 + err = -EACCES;
39428 + goto out_unlock;
39429 + }
39430 +
39431 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
39432 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
39433 err = notify_change(dentry, &newattrs);
39434 @@ -499,12 +539,25 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
39435 error = mnt_want_write(path.mnt);
39436 if (error)
39437 goto dput_and_out;
39438 +
39439 mutex_lock(&inode->i_mutex);
39440 +
39441 + if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
39442 + error = -EACCES;
39443 + goto out_unlock;
39444 + }
39445 +
39446 error = security_path_chmod(path.dentry, path.mnt, mode);
39447 if (error)
39448 goto out_unlock;
39449 if (mode == (mode_t) -1)
39450 mode = inode->i_mode;
39451 +
39452 + if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
39453 + error = -EACCES;
39454 + goto out_unlock;
39455 + }
39456 +
39457 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
39458 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
39459 error = notify_change(path.dentry, &newattrs);
39460 @@ -528,6 +581,9 @@ static int chown_common(struct path *pat
39461 int error;
39462 struct iattr newattrs;
39463
39464 + if (!gr_acl_handle_chown(path->dentry, path->mnt))
39465 + return -EACCES;
39466 +
39467 newattrs.ia_valid = ATTR_CTIME;
39468 if (user != (uid_t) -1) {
39469 newattrs.ia_valid |= ATTR_UID;
39470 @@ -998,7 +1054,10 @@ long do_sys_open(int dfd, const char __u
39471 if (!IS_ERR(tmp)) {
39472 fd = get_unused_fd_flags(flags);
39473 if (fd >= 0) {
39474 - struct file *f = do_filp_open(dfd, tmp, &op, lookup);
39475 + struct file *f;
39476 + /* don't allow to be set by userland */
39477 + flags &= ~FMODE_GREXEC;
39478 + f = do_filp_open(dfd, tmp, &op, lookup);
39479 if (IS_ERR(f)) {
39480 put_unused_fd(fd);
39481 fd = PTR_ERR(f);
39482 diff -urNp linux-2.6.39.4/fs/partitions/ldm.c linux-2.6.39.4/fs/partitions/ldm.c
39483 --- linux-2.6.39.4/fs/partitions/ldm.c 2011-06-03 00:04:14.000000000 -0400
39484 +++ linux-2.6.39.4/fs/partitions/ldm.c 2011-08-05 19:44:37.000000000 -0400
39485 @@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
39486 ldm_error ("A VBLK claims to have %d parts.", num);
39487 return false;
39488 }
39489 +
39490 if (rec >= num) {
39491 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
39492 return false;
39493 @@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
39494 goto found;
39495 }
39496
39497 - f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
39498 + f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
39499 if (!f) {
39500 ldm_crit ("Out of memory.");
39501 return false;
39502 diff -urNp linux-2.6.39.4/fs/pipe.c linux-2.6.39.4/fs/pipe.c
39503 --- linux-2.6.39.4/fs/pipe.c 2011-05-19 00:06:34.000000000 -0400
39504 +++ linux-2.6.39.4/fs/pipe.c 2011-08-05 19:44:37.000000000 -0400
39505 @@ -420,9 +420,9 @@ redo:
39506 }
39507 if (bufs) /* More to do? */
39508 continue;
39509 - if (!pipe->writers)
39510 + if (!atomic_read(&pipe->writers))
39511 break;
39512 - if (!pipe->waiting_writers) {
39513 + if (!atomic_read(&pipe->waiting_writers)) {
39514 /* syscall merging: Usually we must not sleep
39515 * if O_NONBLOCK is set, or if we got some data.
39516 * But if a writer sleeps in kernel space, then
39517 @@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str
39518 mutex_lock(&inode->i_mutex);
39519 pipe = inode->i_pipe;
39520
39521 - if (!pipe->readers) {
39522 + if (!atomic_read(&pipe->readers)) {
39523 send_sig(SIGPIPE, current, 0);
39524 ret = -EPIPE;
39525 goto out;
39526 @@ -530,7 +530,7 @@ redo1:
39527 for (;;) {
39528 int bufs;
39529
39530 - if (!pipe->readers) {
39531 + if (!atomic_read(&pipe->readers)) {
39532 send_sig(SIGPIPE, current, 0);
39533 if (!ret)
39534 ret = -EPIPE;
39535 @@ -616,9 +616,9 @@ redo2:
39536 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
39537 do_wakeup = 0;
39538 }
39539 - pipe->waiting_writers++;
39540 + atomic_inc(&pipe->waiting_writers);
39541 pipe_wait(pipe);
39542 - pipe->waiting_writers--;
39543 + atomic_dec(&pipe->waiting_writers);
39544 }
39545 out:
39546 mutex_unlock(&inode->i_mutex);
39547 @@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table
39548 mask = 0;
39549 if (filp->f_mode & FMODE_READ) {
39550 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
39551 - if (!pipe->writers && filp->f_version != pipe->w_counter)
39552 + if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
39553 mask |= POLLHUP;
39554 }
39555
39556 @@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table
39557 * Most Unices do not set POLLERR for FIFOs but on Linux they
39558 * behave exactly like pipes for poll().
39559 */
39560 - if (!pipe->readers)
39561 + if (!atomic_read(&pipe->readers))
39562 mask |= POLLERR;
39563 }
39564
39565 @@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de
39566
39567 mutex_lock(&inode->i_mutex);
39568 pipe = inode->i_pipe;
39569 - pipe->readers -= decr;
39570 - pipe->writers -= decw;
39571 + atomic_sub(decr, &pipe->readers);
39572 + atomic_sub(decw, &pipe->writers);
39573
39574 - if (!pipe->readers && !pipe->writers) {
39575 + if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
39576 free_pipe_info(inode);
39577 } else {
39578 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
39579 @@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru
39580
39581 if (inode->i_pipe) {
39582 ret = 0;
39583 - inode->i_pipe->readers++;
39584 + atomic_inc(&inode->i_pipe->readers);
39585 }
39586
39587 mutex_unlock(&inode->i_mutex);
39588 @@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str
39589
39590 if (inode->i_pipe) {
39591 ret = 0;
39592 - inode->i_pipe->writers++;
39593 + atomic_inc(&inode->i_pipe->writers);
39594 }
39595
39596 mutex_unlock(&inode->i_mutex);
39597 @@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru
39598 if (inode->i_pipe) {
39599 ret = 0;
39600 if (filp->f_mode & FMODE_READ)
39601 - inode->i_pipe->readers++;
39602 + atomic_inc(&inode->i_pipe->readers);
39603 if (filp->f_mode & FMODE_WRITE)
39604 - inode->i_pipe->writers++;
39605 + atomic_inc(&inode->i_pipe->writers);
39606 }
39607
39608 mutex_unlock(&inode->i_mutex);
39609 @@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode)
39610 inode->i_pipe = NULL;
39611 }
39612
39613 -static struct vfsmount *pipe_mnt __read_mostly;
39614 +struct vfsmount *pipe_mnt __read_mostly;
39615
39616 /*
39617 * pipefs_dname() is called from d_path().
39618 @@ -961,7 +961,8 @@ static struct inode * get_pipe_inode(voi
39619 goto fail_iput;
39620 inode->i_pipe = pipe;
39621
39622 - pipe->readers = pipe->writers = 1;
39623 + atomic_set(&pipe->readers, 1);
39624 + atomic_set(&pipe->writers, 1);
39625 inode->i_fop = &rdwr_pipefifo_fops;
39626
39627 /*
39628 diff -urNp linux-2.6.39.4/fs/proc/array.c linux-2.6.39.4/fs/proc/array.c
39629 --- linux-2.6.39.4/fs/proc/array.c 2011-05-19 00:06:34.000000000 -0400
39630 +++ linux-2.6.39.4/fs/proc/array.c 2011-08-05 19:44:37.000000000 -0400
39631 @@ -60,6 +60,7 @@
39632 #include <linux/tty.h>
39633 #include <linux/string.h>
39634 #include <linux/mman.h>
39635 +#include <linux/grsecurity.h>
39636 #include <linux/proc_fs.h>
39637 #include <linux/ioport.h>
39638 #include <linux/uaccess.h>
39639 @@ -337,6 +338,21 @@ static void task_cpus_allowed(struct seq
39640 seq_putc(m, '\n');
39641 }
39642
39643 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39644 +static inline void task_pax(struct seq_file *m, struct task_struct *p)
39645 +{
39646 + if (p->mm)
39647 + seq_printf(m, "PaX:\t%c%c%c%c%c\n",
39648 + p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
39649 + p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
39650 + p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
39651 + p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
39652 + p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
39653 + else
39654 + seq_printf(m, "PaX:\t-----\n");
39655 +}
39656 +#endif
39657 +
39658 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
39659 struct pid *pid, struct task_struct *task)
39660 {
39661 @@ -354,9 +370,24 @@ int proc_pid_status(struct seq_file *m,
39662 task_cpus_allowed(m, task);
39663 cpuset_task_status_allowed(m, task);
39664 task_context_switch_counts(m, task);
39665 +
39666 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39667 + task_pax(m, task);
39668 +#endif
39669 +
39670 +#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
39671 + task_grsec_rbac(m, task);
39672 +#endif
39673 +
39674 return 0;
39675 }
39676
39677 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39678 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
39679 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
39680 + _mm->pax_flags & MF_PAX_SEGMEXEC))
39681 +#endif
39682 +
39683 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
39684 struct pid *pid, struct task_struct *task, int whole)
39685 {
39686 @@ -375,9 +406,11 @@ static int do_task_stat(struct seq_file
39687 cputime_t cutime, cstime, utime, stime;
39688 cputime_t cgtime, gtime;
39689 unsigned long rsslim = 0;
39690 - char tcomm[sizeof(task->comm)];
39691 + char tcomm[sizeof(task->comm)] = { 0 };
39692 unsigned long flags;
39693
39694 + pax_track_stack();
39695 +
39696 state = *get_task_state(task);
39697 vsize = eip = esp = 0;
39698 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
39699 @@ -449,6 +482,19 @@ static int do_task_stat(struct seq_file
39700 gtime = task->gtime;
39701 }
39702
39703 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39704 + if (PAX_RAND_FLAGS(mm)) {
39705 + eip = 0;
39706 + esp = 0;
39707 + wchan = 0;
39708 + }
39709 +#endif
39710 +#ifdef CONFIG_GRKERNSEC_HIDESYM
39711 + wchan = 0;
39712 + eip =0;
39713 + esp =0;
39714 +#endif
39715 +
39716 /* scale priority and nice values from timeslices to -20..20 */
39717 /* to make it look like a "normal" Unix priority/nice value */
39718 priority = task_prio(task);
39719 @@ -489,9 +535,15 @@ static int do_task_stat(struct seq_file
39720 vsize,
39721 mm ? get_mm_rss(mm) : 0,
39722 rsslim,
39723 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39724 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
39725 + PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
39726 + PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
39727 +#else
39728 mm ? (permitted ? mm->start_code : 1) : 0,
39729 mm ? (permitted ? mm->end_code : 1) : 0,
39730 (permitted && mm) ? mm->start_stack : 0,
39731 +#endif
39732 esp,
39733 eip,
39734 /* The signal information here is obsolete.
39735 @@ -544,3 +596,18 @@ int proc_pid_statm(struct seq_file *m, s
39736
39737 return 0;
39738 }
39739 +
39740 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
39741 +int proc_pid_ipaddr(struct task_struct *task, char *buffer)
39742 +{
39743 + u32 curr_ip = 0;
39744 + unsigned long flags;
39745 +
39746 + if (lock_task_sighand(task, &flags)) {
39747 + curr_ip = task->signal->curr_ip;
39748 + unlock_task_sighand(task, &flags);
39749 + }
39750 +
39751 + return sprintf(buffer, "%pI4\n", &curr_ip);
39752 +}
39753 +#endif
39754 diff -urNp linux-2.6.39.4/fs/proc/base.c linux-2.6.39.4/fs/proc/base.c
39755 --- linux-2.6.39.4/fs/proc/base.c 2011-08-05 21:11:51.000000000 -0400
39756 +++ linux-2.6.39.4/fs/proc/base.c 2011-08-05 21:13:18.000000000 -0400
39757 @@ -104,6 +104,22 @@ struct pid_entry {
39758 union proc_op op;
39759 };
39760
39761 +struct getdents_callback {
39762 + struct linux_dirent __user * current_dir;
39763 + struct linux_dirent __user * previous;
39764 + struct file * file;
39765 + int count;
39766 + int error;
39767 +};
39768 +
39769 +static int gr_fake_filldir(void * __buf, const char *name, int namlen,
39770 + loff_t offset, u64 ino, unsigned int d_type)
39771 +{
39772 + struct getdents_callback * buf = (struct getdents_callback *) __buf;
39773 + buf->error = -EINVAL;
39774 + return 0;
39775 +}
39776 +
39777 #define NOD(NAME, MODE, IOP, FOP, OP) { \
39778 .name = (NAME), \
39779 .len = sizeof(NAME) - 1, \
39780 @@ -206,6 +222,9 @@ static struct mm_struct *__check_mem_per
39781 if (task == current)
39782 return mm;
39783
39784 + if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
39785 + return ERR_PTR(-EPERM);
39786 +
39787 /*
39788 * If current is actively ptrace'ing, and would also be
39789 * permitted to freshly attach with ptrace now, permit it.
39790 @@ -279,6 +298,9 @@ static int proc_pid_cmdline(struct task_
39791 if (!mm->arg_end)
39792 goto out_mm; /* Shh! No looking before we're done */
39793
39794 + if (gr_acl_handle_procpidmem(task))
39795 + goto out_mm;
39796 +
39797 len = mm->arg_end - mm->arg_start;
39798
39799 if (len > PAGE_SIZE)
39800 @@ -306,12 +328,28 @@ out:
39801 return res;
39802 }
39803
39804 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39805 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
39806 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
39807 + _mm->pax_flags & MF_PAX_SEGMEXEC))
39808 +#endif
39809 +
39810 static int proc_pid_auxv(struct task_struct *task, char *buffer)
39811 {
39812 struct mm_struct *mm = mm_for_maps(task);
39813 int res = PTR_ERR(mm);
39814 if (mm && !IS_ERR(mm)) {
39815 unsigned int nwords = 0;
39816 +
39817 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
39818 + /* allow if we're currently ptracing this task */
39819 + if (PAX_RAND_FLAGS(mm) &&
39820 + (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
39821 + mmput(mm);
39822 + return res;
39823 + }
39824 +#endif
39825 +
39826 do {
39827 nwords += 2;
39828 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
39829 @@ -325,7 +363,7 @@ static int proc_pid_auxv(struct task_str
39830 }
39831
39832
39833 -#ifdef CONFIG_KALLSYMS
39834 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39835 /*
39836 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
39837 * Returns the resolved symbol. If that fails, simply return the address.
39838 @@ -364,7 +402,7 @@ static void unlock_trace(struct task_str
39839 mutex_unlock(&task->signal->cred_guard_mutex);
39840 }
39841
39842 -#ifdef CONFIG_STACKTRACE
39843 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
39844
39845 #define MAX_STACK_TRACE_DEPTH 64
39846
39847 @@ -555,7 +593,7 @@ static int proc_pid_limits(struct task_s
39848 return count;
39849 }
39850
39851 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
39852 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
39853 static int proc_pid_syscall(struct task_struct *task, char *buffer)
39854 {
39855 long nr;
39856 @@ -584,7 +622,7 @@ static int proc_pid_syscall(struct task_
39857 /************************************************************************/
39858
39859 /* permission checks */
39860 -static int proc_fd_access_allowed(struct inode *inode)
39861 +static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
39862 {
39863 struct task_struct *task;
39864 int allowed = 0;
39865 @@ -594,7 +632,10 @@ static int proc_fd_access_allowed(struct
39866 */
39867 task = get_proc_task(inode);
39868 if (task) {
39869 - allowed = ptrace_may_access(task, PTRACE_MODE_READ);
39870 + if (log)
39871 + allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
39872 + else
39873 + allowed = ptrace_may_access(task, PTRACE_MODE_READ);
39874 put_task_struct(task);
39875 }
39876 return allowed;
39877 @@ -973,6 +1014,9 @@ static ssize_t environ_read(struct file
39878 if (!task)
39879 goto out_no_task;
39880
39881 + if (gr_acl_handle_procpidmem(task))
39882 + goto out;
39883 +
39884 ret = -ENOMEM;
39885 page = (char *)__get_free_page(GFP_TEMPORARY);
39886 if (!page)
39887 @@ -1660,7 +1704,7 @@ static void *proc_pid_follow_link(struct
39888 path_put(&nd->path);
39889
39890 /* Are we allowed to snoop on the tasks file descriptors? */
39891 - if (!proc_fd_access_allowed(inode))
39892 + if (!proc_fd_access_allowed(inode,0))
39893 goto out;
39894
39895 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
39896 @@ -1699,8 +1743,18 @@ static int proc_pid_readlink(struct dent
39897 struct path path;
39898
39899 /* Are we allowed to snoop on the tasks file descriptors? */
39900 - if (!proc_fd_access_allowed(inode))
39901 - goto out;
39902 + /* logging this is needed for learning on chromium to work properly,
39903 + but we don't want to flood the logs from 'ps' which does a readlink
39904 + on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
39905 + CAP_SYS_PTRACE as it's not necessary for its basic functionality
39906 + */
39907 + if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
39908 + if (!proc_fd_access_allowed(inode,0))
39909 + goto out;
39910 + } else {
39911 + if (!proc_fd_access_allowed(inode,1))
39912 + goto out;
39913 + }
39914
39915 error = PROC_I(inode)->op.proc_get_link(inode, &path);
39916 if (error)
39917 @@ -1766,7 +1820,11 @@ static struct inode *proc_pid_make_inode
39918 rcu_read_lock();
39919 cred = __task_cred(task);
39920 inode->i_uid = cred->euid;
39921 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39922 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39923 +#else
39924 inode->i_gid = cred->egid;
39925 +#endif
39926 rcu_read_unlock();
39927 }
39928 security_task_to_inode(task, inode);
39929 @@ -1784,6 +1842,9 @@ static int pid_getattr(struct vfsmount *
39930 struct inode *inode = dentry->d_inode;
39931 struct task_struct *task;
39932 const struct cred *cred;
39933 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39934 + const struct cred *tmpcred = current_cred();
39935 +#endif
39936
39937 generic_fillattr(inode, stat);
39938
39939 @@ -1791,13 +1852,41 @@ static int pid_getattr(struct vfsmount *
39940 stat->uid = 0;
39941 stat->gid = 0;
39942 task = pid_task(proc_pid(inode), PIDTYPE_PID);
39943 +
39944 + if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
39945 + rcu_read_unlock();
39946 + return -ENOENT;
39947 + }
39948 +
39949 if (task) {
39950 + cred = __task_cred(task);
39951 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39952 + if (!tmpcred->uid || (tmpcred->uid == cred->uid)
39953 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39954 + || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
39955 +#endif
39956 + ) {
39957 +#endif
39958 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
39959 +#ifdef CONFIG_GRKERNSEC_PROC_USER
39960 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
39961 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39962 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
39963 +#endif
39964 task_dumpable(task)) {
39965 - cred = __task_cred(task);
39966 stat->uid = cred->euid;
39967 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39968 + stat->gid = CONFIG_GRKERNSEC_PROC_GID;
39969 +#else
39970 stat->gid = cred->egid;
39971 +#endif
39972 }
39973 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39974 + } else {
39975 + rcu_read_unlock();
39976 + return -ENOENT;
39977 + }
39978 +#endif
39979 }
39980 rcu_read_unlock();
39981 return 0;
39982 @@ -1834,11 +1923,20 @@ static int pid_revalidate(struct dentry
39983
39984 if (task) {
39985 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
39986 +#ifdef CONFIG_GRKERNSEC_PROC_USER
39987 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
39988 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
39989 + (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
39990 +#endif
39991 task_dumpable(task)) {
39992 rcu_read_lock();
39993 cred = __task_cred(task);
39994 inode->i_uid = cred->euid;
39995 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
39996 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
39997 +#else
39998 inode->i_gid = cred->egid;
39999 +#endif
40000 rcu_read_unlock();
40001 } else {
40002 inode->i_uid = 0;
40003 @@ -1959,7 +2057,8 @@ static int proc_fd_info(struct inode *in
40004 int fd = proc_fd(inode);
40005
40006 if (task) {
40007 - files = get_files_struct(task);
40008 + if (!gr_acl_handle_procpidmem(task))
40009 + files = get_files_struct(task);
40010 put_task_struct(task);
40011 }
40012 if (files) {
40013 @@ -2219,15 +2318,25 @@ static const struct file_operations proc
40014 */
40015 static int proc_fd_permission(struct inode *inode, int mask, unsigned int flags)
40016 {
40017 + struct task_struct *task;
40018 int rv;
40019
40020 if (flags & IPERM_FLAG_RCU)
40021 return -ECHILD;
40022 rv = generic_permission(inode, mask, flags, NULL);
40023 - if (rv == 0)
40024 - return 0;
40025 +
40026 if (task_pid(current) == proc_pid(inode))
40027 rv = 0;
40028 +
40029 + task = get_proc_task(inode);
40030 + if (task == NULL)
40031 + return rv;
40032 +
40033 + if (gr_acl_handle_procpidmem(task))
40034 + rv = -EACCES;
40035 +
40036 + put_task_struct(task);
40037 +
40038 return rv;
40039 }
40040
40041 @@ -2337,6 +2446,9 @@ static struct dentry *proc_pident_lookup
40042 if (!task)
40043 goto out_no_task;
40044
40045 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
40046 + goto out;
40047 +
40048 /*
40049 * Yes, it does not scale. And it should not. Don't add
40050 * new entries into /proc/<tgid>/ without very good reasons.
40051 @@ -2381,6 +2493,9 @@ static int proc_pident_readdir(struct fi
40052 if (!task)
40053 goto out_no_task;
40054
40055 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
40056 + goto out;
40057 +
40058 ret = 0;
40059 i = filp->f_pos;
40060 switch (i) {
40061 @@ -2651,7 +2766,7 @@ static void *proc_self_follow_link(struc
40062 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
40063 void *cookie)
40064 {
40065 - char *s = nd_get_link(nd);
40066 + const char *s = nd_get_link(nd);
40067 if (!IS_ERR(s))
40068 __putname(s);
40069 }
40070 @@ -2838,7 +2953,7 @@ static const struct pid_entry tgid_base_
40071 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
40072 #endif
40073 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
40074 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
40075 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
40076 INF("syscall", S_IRUGO, proc_pid_syscall),
40077 #endif
40078 INF("cmdline", S_IRUGO, proc_pid_cmdline),
40079 @@ -2863,10 +2978,10 @@ static const struct pid_entry tgid_base_
40080 #ifdef CONFIG_SECURITY
40081 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
40082 #endif
40083 -#ifdef CONFIG_KALLSYMS
40084 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40085 INF("wchan", S_IRUGO, proc_pid_wchan),
40086 #endif
40087 -#ifdef CONFIG_STACKTRACE
40088 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40089 ONE("stack", S_IRUGO, proc_pid_stack),
40090 #endif
40091 #ifdef CONFIG_SCHEDSTATS
40092 @@ -2897,6 +3012,9 @@ static const struct pid_entry tgid_base_
40093 #ifdef CONFIG_TASK_IO_ACCOUNTING
40094 INF("io", S_IRUSR, proc_tgid_io_accounting),
40095 #endif
40096 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
40097 + INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
40098 +#endif
40099 };
40100
40101 static int proc_tgid_base_readdir(struct file * filp,
40102 @@ -3022,7 +3140,14 @@ static struct dentry *proc_pid_instantia
40103 if (!inode)
40104 goto out;
40105
40106 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40107 + inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
40108 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40109 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
40110 + inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
40111 +#else
40112 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
40113 +#endif
40114 inode->i_op = &proc_tgid_base_inode_operations;
40115 inode->i_fop = &proc_tgid_base_operations;
40116 inode->i_flags|=S_IMMUTABLE;
40117 @@ -3064,7 +3189,11 @@ struct dentry *proc_pid_lookup(struct in
40118 if (!task)
40119 goto out;
40120
40121 + if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
40122 + goto out_put_task;
40123 +
40124 result = proc_pid_instantiate(dir, dentry, task, NULL);
40125 +out_put_task:
40126 put_task_struct(task);
40127 out:
40128 return result;
40129 @@ -3129,6 +3258,11 @@ int proc_pid_readdir(struct file * filp,
40130 {
40131 unsigned int nr;
40132 struct task_struct *reaper;
40133 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40134 + const struct cred *tmpcred = current_cred();
40135 + const struct cred *itercred;
40136 +#endif
40137 + filldir_t __filldir = filldir;
40138 struct tgid_iter iter;
40139 struct pid_namespace *ns;
40140
40141 @@ -3152,8 +3286,27 @@ int proc_pid_readdir(struct file * filp,
40142 for (iter = next_tgid(ns, iter);
40143 iter.task;
40144 iter.tgid += 1, iter = next_tgid(ns, iter)) {
40145 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40146 + rcu_read_lock();
40147 + itercred = __task_cred(iter.task);
40148 +#endif
40149 + if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
40150 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40151 + || (tmpcred->uid && (itercred->uid != tmpcred->uid)
40152 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
40153 + && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
40154 +#endif
40155 + )
40156 +#endif
40157 + )
40158 + __filldir = &gr_fake_filldir;
40159 + else
40160 + __filldir = filldir;
40161 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40162 + rcu_read_unlock();
40163 +#endif
40164 filp->f_pos = iter.tgid + TGID_OFFSET;
40165 - if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
40166 + if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
40167 put_task_struct(iter.task);
40168 goto out;
40169 }
40170 @@ -3180,7 +3333,7 @@ static const struct pid_entry tid_base_s
40171 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
40172 #endif
40173 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
40174 -#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
40175 +#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
40176 INF("syscall", S_IRUGO, proc_pid_syscall),
40177 #endif
40178 INF("cmdline", S_IRUGO, proc_pid_cmdline),
40179 @@ -3204,10 +3357,10 @@ static const struct pid_entry tid_base_s
40180 #ifdef CONFIG_SECURITY
40181 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
40182 #endif
40183 -#ifdef CONFIG_KALLSYMS
40184 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40185 INF("wchan", S_IRUGO, proc_pid_wchan),
40186 #endif
40187 -#ifdef CONFIG_STACKTRACE
40188 +#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
40189 ONE("stack", S_IRUGO, proc_pid_stack),
40190 #endif
40191 #ifdef CONFIG_SCHEDSTATS
40192 diff -urNp linux-2.6.39.4/fs/proc/cmdline.c linux-2.6.39.4/fs/proc/cmdline.c
40193 --- linux-2.6.39.4/fs/proc/cmdline.c 2011-05-19 00:06:34.000000000 -0400
40194 +++ linux-2.6.39.4/fs/proc/cmdline.c 2011-08-05 19:44:37.000000000 -0400
40195 @@ -23,7 +23,11 @@ static const struct file_operations cmdl
40196
40197 static int __init proc_cmdline_init(void)
40198 {
40199 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40200 + proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
40201 +#else
40202 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
40203 +#endif
40204 return 0;
40205 }
40206 module_init(proc_cmdline_init);
40207 diff -urNp linux-2.6.39.4/fs/proc/devices.c linux-2.6.39.4/fs/proc/devices.c
40208 --- linux-2.6.39.4/fs/proc/devices.c 2011-05-19 00:06:34.000000000 -0400
40209 +++ linux-2.6.39.4/fs/proc/devices.c 2011-08-05 19:44:37.000000000 -0400
40210 @@ -64,7 +64,11 @@ static const struct file_operations proc
40211
40212 static int __init proc_devices_init(void)
40213 {
40214 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40215 + proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
40216 +#else
40217 proc_create("devices", 0, NULL, &proc_devinfo_operations);
40218 +#endif
40219 return 0;
40220 }
40221 module_init(proc_devices_init);
40222 diff -urNp linux-2.6.39.4/fs/proc/inode.c linux-2.6.39.4/fs/proc/inode.c
40223 --- linux-2.6.39.4/fs/proc/inode.c 2011-05-19 00:06:34.000000000 -0400
40224 +++ linux-2.6.39.4/fs/proc/inode.c 2011-08-05 19:44:37.000000000 -0400
40225 @@ -433,7 +433,11 @@ struct inode *proc_get_inode(struct supe
40226 if (de->mode) {
40227 inode->i_mode = de->mode;
40228 inode->i_uid = de->uid;
40229 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
40230 + inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
40231 +#else
40232 inode->i_gid = de->gid;
40233 +#endif
40234 }
40235 if (de->size)
40236 inode->i_size = de->size;
40237 diff -urNp linux-2.6.39.4/fs/proc/internal.h linux-2.6.39.4/fs/proc/internal.h
40238 --- linux-2.6.39.4/fs/proc/internal.h 2011-05-19 00:06:34.000000000 -0400
40239 +++ linux-2.6.39.4/fs/proc/internal.h 2011-08-05 19:44:37.000000000 -0400
40240 @@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
40241 struct pid *pid, struct task_struct *task);
40242 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
40243 struct pid *pid, struct task_struct *task);
40244 +#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
40245 +extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
40246 +#endif
40247 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
40248
40249 extern const struct file_operations proc_maps_operations;
40250 diff -urNp linux-2.6.39.4/fs/proc/Kconfig linux-2.6.39.4/fs/proc/Kconfig
40251 --- linux-2.6.39.4/fs/proc/Kconfig 2011-05-19 00:06:34.000000000 -0400
40252 +++ linux-2.6.39.4/fs/proc/Kconfig 2011-08-05 19:44:37.000000000 -0400
40253 @@ -30,12 +30,12 @@ config PROC_FS
40254
40255 config PROC_KCORE
40256 bool "/proc/kcore support" if !ARM
40257 - depends on PROC_FS && MMU
40258 + depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
40259
40260 config PROC_VMCORE
40261 bool "/proc/vmcore support"
40262 - depends on PROC_FS && CRASH_DUMP
40263 - default y
40264 + depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
40265 + default n
40266 help
40267 Exports the dump image of crashed kernel in ELF format.
40268
40269 @@ -59,8 +59,8 @@ config PROC_SYSCTL
40270 limited in memory.
40271
40272 config PROC_PAGE_MONITOR
40273 - default y
40274 - depends on PROC_FS && MMU
40275 + default n
40276 + depends on PROC_FS && MMU && !GRKERNSEC
40277 bool "Enable /proc page monitoring" if EXPERT
40278 help
40279 Various /proc files exist to monitor process memory utilization:
40280 diff -urNp linux-2.6.39.4/fs/proc/kcore.c linux-2.6.39.4/fs/proc/kcore.c
40281 --- linux-2.6.39.4/fs/proc/kcore.c 2011-05-19 00:06:34.000000000 -0400
40282 +++ linux-2.6.39.4/fs/proc/kcore.c 2011-08-05 19:44:37.000000000 -0400
40283 @@ -321,6 +321,8 @@ static void elf_kcore_store_hdr(char *bu
40284 off_t offset = 0;
40285 struct kcore_list *m;
40286
40287 + pax_track_stack();
40288 +
40289 /* setup ELF header */
40290 elf = (struct elfhdr *) bufp;
40291 bufp += sizeof(struct elfhdr);
40292 @@ -478,9 +480,10 @@ read_kcore(struct file *file, char __use
40293 * the addresses in the elf_phdr on our list.
40294 */
40295 start = kc_offset_to_vaddr(*fpos - elf_buflen);
40296 - if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
40297 + tsz = PAGE_SIZE - (start & ~PAGE_MASK);
40298 + if (tsz > buflen)
40299 tsz = buflen;
40300 -
40301 +
40302 while (buflen) {
40303 struct kcore_list *m;
40304
40305 @@ -509,20 +512,23 @@ read_kcore(struct file *file, char __use
40306 kfree(elf_buf);
40307 } else {
40308 if (kern_addr_valid(start)) {
40309 - unsigned long n;
40310 + char *elf_buf;
40311 + mm_segment_t oldfs;
40312
40313 - n = copy_to_user(buffer, (char *)start, tsz);
40314 - /*
40315 - * We cannot distingush between fault on source
40316 - * and fault on destination. When this happens
40317 - * we clear too and hope it will trigger the
40318 - * EFAULT again.
40319 - */
40320 - if (n) {
40321 - if (clear_user(buffer + tsz - n,
40322 - n))
40323 + elf_buf = kmalloc(tsz, GFP_KERNEL);
40324 + if (!elf_buf)
40325 + return -ENOMEM;
40326 + oldfs = get_fs();
40327 + set_fs(KERNEL_DS);
40328 + if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
40329 + set_fs(oldfs);
40330 + if (copy_to_user(buffer, elf_buf, tsz)) {
40331 + kfree(elf_buf);
40332 return -EFAULT;
40333 + }
40334 }
40335 + set_fs(oldfs);
40336 + kfree(elf_buf);
40337 } else {
40338 if (clear_user(buffer, tsz))
40339 return -EFAULT;
40340 @@ -542,6 +548,9 @@ read_kcore(struct file *file, char __use
40341
40342 static int open_kcore(struct inode *inode, struct file *filp)
40343 {
40344 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
40345 + return -EPERM;
40346 +#endif
40347 if (!capable(CAP_SYS_RAWIO))
40348 return -EPERM;
40349 if (kcore_need_update)
40350 diff -urNp linux-2.6.39.4/fs/proc/meminfo.c linux-2.6.39.4/fs/proc/meminfo.c
40351 --- linux-2.6.39.4/fs/proc/meminfo.c 2011-05-19 00:06:34.000000000 -0400
40352 +++ linux-2.6.39.4/fs/proc/meminfo.c 2011-08-05 19:44:37.000000000 -0400
40353 @@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
40354 unsigned long pages[NR_LRU_LISTS];
40355 int lru;
40356
40357 + pax_track_stack();
40358 +
40359 /*
40360 * display in kilobytes.
40361 */
40362 @@ -157,7 +159,7 @@ static int meminfo_proc_show(struct seq_
40363 vmi.used >> 10,
40364 vmi.largest_chunk >> 10
40365 #ifdef CONFIG_MEMORY_FAILURE
40366 - ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
40367 + ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
40368 #endif
40369 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
40370 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
40371 diff -urNp linux-2.6.39.4/fs/proc/nommu.c linux-2.6.39.4/fs/proc/nommu.c
40372 --- linux-2.6.39.4/fs/proc/nommu.c 2011-05-19 00:06:34.000000000 -0400
40373 +++ linux-2.6.39.4/fs/proc/nommu.c 2011-08-05 19:44:37.000000000 -0400
40374 @@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_
40375 if (len < 1)
40376 len = 1;
40377 seq_printf(m, "%*c", len, ' ');
40378 - seq_path(m, &file->f_path, "");
40379 + seq_path(m, &file->f_path, "\n\\");
40380 }
40381
40382 seq_putc(m, '\n');
40383 diff -urNp linux-2.6.39.4/fs/proc/proc_net.c linux-2.6.39.4/fs/proc/proc_net.c
40384 --- linux-2.6.39.4/fs/proc/proc_net.c 2011-05-19 00:06:34.000000000 -0400
40385 +++ linux-2.6.39.4/fs/proc/proc_net.c 2011-08-05 19:44:37.000000000 -0400
40386 @@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str
40387 struct task_struct *task;
40388 struct nsproxy *ns;
40389 struct net *net = NULL;
40390 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40391 + const struct cred *cred = current_cred();
40392 +#endif
40393 +
40394 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40395 + if (cred->fsuid)
40396 + return net;
40397 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40398 + if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
40399 + return net;
40400 +#endif
40401
40402 rcu_read_lock();
40403 task = pid_task(proc_pid(dir), PIDTYPE_PID);
40404 diff -urNp linux-2.6.39.4/fs/proc/proc_sysctl.c linux-2.6.39.4/fs/proc/proc_sysctl.c
40405 --- linux-2.6.39.4/fs/proc/proc_sysctl.c 2011-05-19 00:06:34.000000000 -0400
40406 +++ linux-2.6.39.4/fs/proc/proc_sysctl.c 2011-08-05 19:44:37.000000000 -0400
40407 @@ -8,6 +8,8 @@
40408 #include <linux/namei.h>
40409 #include "internal.h"
40410
40411 +extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
40412 +
40413 static const struct dentry_operations proc_sys_dentry_operations;
40414 static const struct file_operations proc_sys_file_operations;
40415 static const struct inode_operations proc_sys_inode_operations;
40416 @@ -111,6 +113,9 @@ static struct dentry *proc_sys_lookup(st
40417 if (!p)
40418 goto out;
40419
40420 + if (gr_handle_sysctl(p, MAY_EXEC))
40421 + goto out;
40422 +
40423 err = ERR_PTR(-ENOMEM);
40424 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
40425 if (h)
40426 @@ -230,6 +235,9 @@ static int scan(struct ctl_table_header
40427 if (*pos < file->f_pos)
40428 continue;
40429
40430 + if (gr_handle_sysctl(table, 0))
40431 + continue;
40432 +
40433 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
40434 if (res)
40435 return res;
40436 @@ -358,6 +366,9 @@ static int proc_sys_getattr(struct vfsmo
40437 if (IS_ERR(head))
40438 return PTR_ERR(head);
40439
40440 + if (table && gr_handle_sysctl(table, MAY_EXEC))
40441 + return -ENOENT;
40442 +
40443 generic_fillattr(inode, stat);
40444 if (table)
40445 stat->mode = (stat->mode & S_IFMT) | table->mode;
40446 diff -urNp linux-2.6.39.4/fs/proc/root.c linux-2.6.39.4/fs/proc/root.c
40447 --- linux-2.6.39.4/fs/proc/root.c 2011-05-19 00:06:34.000000000 -0400
40448 +++ linux-2.6.39.4/fs/proc/root.c 2011-08-05 19:44:37.000000000 -0400
40449 @@ -122,7 +122,15 @@ void __init proc_root_init(void)
40450 #ifdef CONFIG_PROC_DEVICETREE
40451 proc_device_tree_init();
40452 #endif
40453 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
40454 +#ifdef CONFIG_GRKERNSEC_PROC_USER
40455 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
40456 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40457 + proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40458 +#endif
40459 +#else
40460 proc_mkdir("bus", NULL);
40461 +#endif
40462 proc_sys_init();
40463 }
40464
40465 diff -urNp linux-2.6.39.4/fs/proc/task_mmu.c linux-2.6.39.4/fs/proc/task_mmu.c
40466 --- linux-2.6.39.4/fs/proc/task_mmu.c 2011-05-19 00:06:34.000000000 -0400
40467 +++ linux-2.6.39.4/fs/proc/task_mmu.c 2011-08-05 19:44:37.000000000 -0400
40468 @@ -51,8 +51,13 @@ void task_mem(struct seq_file *m, struct
40469 "VmExe:\t%8lu kB\n"
40470 "VmLib:\t%8lu kB\n"
40471 "VmPTE:\t%8lu kB\n"
40472 - "VmSwap:\t%8lu kB\n",
40473 - hiwater_vm << (PAGE_SHIFT-10),
40474 + "VmSwap:\t%8lu kB\n"
40475 +
40476 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40477 + "CsBase:\t%8lx\nCsLim:\t%8lx\n"
40478 +#endif
40479 +
40480 + ,hiwater_vm << (PAGE_SHIFT-10),
40481 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
40482 mm->locked_vm << (PAGE_SHIFT-10),
40483 hiwater_rss << (PAGE_SHIFT-10),
40484 @@ -60,7 +65,13 @@ void task_mem(struct seq_file *m, struct
40485 data << (PAGE_SHIFT-10),
40486 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
40487 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
40488 - swap << (PAGE_SHIFT-10));
40489 + swap << (PAGE_SHIFT-10)
40490 +
40491 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40492 + , mm->context.user_cs_base, mm->context.user_cs_limit
40493 +#endif
40494 +
40495 + );
40496 }
40497
40498 unsigned long task_vsize(struct mm_struct *mm)
40499 @@ -207,6 +218,12 @@ static int do_maps_open(struct inode *in
40500 return ret;
40501 }
40502
40503 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40504 +#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
40505 + (_mm->pax_flags & MF_PAX_RANDMMAP || \
40506 + _mm->pax_flags & MF_PAX_SEGMEXEC))
40507 +#endif
40508 +
40509 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
40510 {
40511 struct mm_struct *mm = vma->vm_mm;
40512 @@ -225,13 +242,13 @@ static void show_map_vma(struct seq_file
40513 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
40514 }
40515
40516 - /* We don't show the stack guard page in /proc/maps */
40517 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40518 + start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
40519 + end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
40520 +#else
40521 start = vma->vm_start;
40522 - if (stack_guard_page_start(vma, start))
40523 - start += PAGE_SIZE;
40524 end = vma->vm_end;
40525 - if (stack_guard_page_end(vma, end))
40526 - end -= PAGE_SIZE;
40527 +#endif
40528
40529 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
40530 start,
40531 @@ -240,7 +257,11 @@ static void show_map_vma(struct seq_file
40532 flags & VM_WRITE ? 'w' : '-',
40533 flags & VM_EXEC ? 'x' : '-',
40534 flags & VM_MAYSHARE ? 's' : 'p',
40535 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40536 + PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
40537 +#else
40538 pgoff,
40539 +#endif
40540 MAJOR(dev), MINOR(dev), ino, &len);
40541
40542 /*
40543 @@ -249,7 +270,7 @@ static void show_map_vma(struct seq_file
40544 */
40545 if (file) {
40546 pad_len_spaces(m, len);
40547 - seq_path(m, &file->f_path, "\n");
40548 + seq_path(m, &file->f_path, "\n\\");
40549 } else {
40550 const char *name = arch_vma_name(vma);
40551 if (!name) {
40552 @@ -257,8 +278,9 @@ static void show_map_vma(struct seq_file
40553 if (vma->vm_start <= mm->brk &&
40554 vma->vm_end >= mm->start_brk) {
40555 name = "[heap]";
40556 - } else if (vma->vm_start <= mm->start_stack &&
40557 - vma->vm_end >= mm->start_stack) {
40558 + } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
40559 + (vma->vm_start <= mm->start_stack &&
40560 + vma->vm_end >= mm->start_stack)) {
40561 name = "[stack]";
40562 }
40563 } else {
40564 @@ -433,11 +455,16 @@ static int show_smap(struct seq_file *m,
40565 };
40566
40567 memset(&mss, 0, sizeof mss);
40568 - mss.vma = vma;
40569 - /* mmap_sem is held in m_start */
40570 - if (vma->vm_mm && !is_vm_hugetlb_page(vma))
40571 - walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
40572 -
40573 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40574 + if (!PAX_RAND_FLAGS(vma->vm_mm)) {
40575 +#endif
40576 + mss.vma = vma;
40577 + /* mmap_sem is held in m_start */
40578 + if (vma->vm_mm && !is_vm_hugetlb_page(vma))
40579 + walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
40580 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40581 + }
40582 +#endif
40583 show_map_vma(m, vma);
40584
40585 seq_printf(m,
40586 @@ -455,7 +482,11 @@ static int show_smap(struct seq_file *m,
40587 "KernelPageSize: %8lu kB\n"
40588 "MMUPageSize: %8lu kB\n"
40589 "Locked: %8lu kB\n",
40590 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
40591 + PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
40592 +#else
40593 (vma->vm_end - vma->vm_start) >> 10,
40594 +#endif
40595 mss.resident >> 10,
40596 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
40597 mss.shared_clean >> 10,
40598 diff -urNp linux-2.6.39.4/fs/proc/task_nommu.c linux-2.6.39.4/fs/proc/task_nommu.c
40599 --- linux-2.6.39.4/fs/proc/task_nommu.c 2011-05-19 00:06:34.000000000 -0400
40600 +++ linux-2.6.39.4/fs/proc/task_nommu.c 2011-08-05 19:44:37.000000000 -0400
40601 @@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct
40602 else
40603 bytes += kobjsize(mm);
40604
40605 - if (current->fs && current->fs->users > 1)
40606 + if (current->fs && atomic_read(&current->fs->users) > 1)
40607 sbytes += kobjsize(current->fs);
40608 else
40609 bytes += kobjsize(current->fs);
40610 @@ -166,7 +166,7 @@ static int nommu_vma_show(struct seq_fil
40611
40612 if (file) {
40613 pad_len_spaces(m, len);
40614 - seq_path(m, &file->f_path, "");
40615 + seq_path(m, &file->f_path, "\n\\");
40616 } else if (mm) {
40617 if (vma->vm_start <= mm->start_stack &&
40618 vma->vm_end >= mm->start_stack) {
40619 diff -urNp linux-2.6.39.4/fs/quota/netlink.c linux-2.6.39.4/fs/quota/netlink.c
40620 --- linux-2.6.39.4/fs/quota/netlink.c 2011-05-19 00:06:34.000000000 -0400
40621 +++ linux-2.6.39.4/fs/quota/netlink.c 2011-08-05 19:44:37.000000000 -0400
40622 @@ -33,7 +33,7 @@ static struct genl_family quota_genl_fam
40623 void quota_send_warning(short type, unsigned int id, dev_t dev,
40624 const char warntype)
40625 {
40626 - static atomic_t seq;
40627 + static atomic_unchecked_t seq;
40628 struct sk_buff *skb;
40629 void *msg_head;
40630 int ret;
40631 @@ -49,7 +49,7 @@ void quota_send_warning(short type, unsi
40632 "VFS: Not enough memory to send quota warning.\n");
40633 return;
40634 }
40635 - msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
40636 + msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
40637 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
40638 if (!msg_head) {
40639 printk(KERN_ERR
40640 diff -urNp linux-2.6.39.4/fs/readdir.c linux-2.6.39.4/fs/readdir.c
40641 --- linux-2.6.39.4/fs/readdir.c 2011-05-19 00:06:34.000000000 -0400
40642 +++ linux-2.6.39.4/fs/readdir.c 2011-08-05 19:44:37.000000000 -0400
40643 @@ -17,6 +17,7 @@
40644 #include <linux/security.h>
40645 #include <linux/syscalls.h>
40646 #include <linux/unistd.h>
40647 +#include <linux/namei.h>
40648
40649 #include <asm/uaccess.h>
40650
40651 @@ -67,6 +68,7 @@ struct old_linux_dirent {
40652
40653 struct readdir_callback {
40654 struct old_linux_dirent __user * dirent;
40655 + struct file * file;
40656 int result;
40657 };
40658
40659 @@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
40660 buf->result = -EOVERFLOW;
40661 return -EOVERFLOW;
40662 }
40663 +
40664 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40665 + return 0;
40666 +
40667 buf->result++;
40668 dirent = buf->dirent;
40669 if (!access_ok(VERIFY_WRITE, dirent,
40670 @@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
40671
40672 buf.result = 0;
40673 buf.dirent = dirent;
40674 + buf.file = file;
40675
40676 error = vfs_readdir(file, fillonedir, &buf);
40677 if (buf.result)
40678 @@ -142,6 +149,7 @@ struct linux_dirent {
40679 struct getdents_callback {
40680 struct linux_dirent __user * current_dir;
40681 struct linux_dirent __user * previous;
40682 + struct file * file;
40683 int count;
40684 int error;
40685 };
40686 @@ -163,6 +171,10 @@ static int filldir(void * __buf, const c
40687 buf->error = -EOVERFLOW;
40688 return -EOVERFLOW;
40689 }
40690 +
40691 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40692 + return 0;
40693 +
40694 dirent = buf->previous;
40695 if (dirent) {
40696 if (__put_user(offset, &dirent->d_off))
40697 @@ -210,6 +222,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
40698 buf.previous = NULL;
40699 buf.count = count;
40700 buf.error = 0;
40701 + buf.file = file;
40702
40703 error = vfs_readdir(file, filldir, &buf);
40704 if (error >= 0)
40705 @@ -229,6 +242,7 @@ out:
40706 struct getdents_callback64 {
40707 struct linux_dirent64 __user * current_dir;
40708 struct linux_dirent64 __user * previous;
40709 + struct file *file;
40710 int count;
40711 int error;
40712 };
40713 @@ -244,6 +258,10 @@ static int filldir64(void * __buf, const
40714 buf->error = -EINVAL; /* only used if we fail.. */
40715 if (reclen > buf->count)
40716 return -EINVAL;
40717 +
40718 + if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
40719 + return 0;
40720 +
40721 dirent = buf->previous;
40722 if (dirent) {
40723 if (__put_user(offset, &dirent->d_off))
40724 @@ -291,6 +309,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
40725
40726 buf.current_dir = dirent;
40727 buf.previous = NULL;
40728 + buf.file = file;
40729 buf.count = count;
40730 buf.error = 0;
40731
40732 diff -urNp linux-2.6.39.4/fs/reiserfs/dir.c linux-2.6.39.4/fs/reiserfs/dir.c
40733 --- linux-2.6.39.4/fs/reiserfs/dir.c 2011-05-19 00:06:34.000000000 -0400
40734 +++ linux-2.6.39.4/fs/reiserfs/dir.c 2011-08-05 19:44:37.000000000 -0400
40735 @@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
40736 struct reiserfs_dir_entry de;
40737 int ret = 0;
40738
40739 + pax_track_stack();
40740 +
40741 reiserfs_write_lock(inode->i_sb);
40742
40743 reiserfs_check_lock_depth(inode->i_sb, "readdir");
40744 diff -urNp linux-2.6.39.4/fs/reiserfs/do_balan.c linux-2.6.39.4/fs/reiserfs/do_balan.c
40745 --- linux-2.6.39.4/fs/reiserfs/do_balan.c 2011-05-19 00:06:34.000000000 -0400
40746 +++ linux-2.6.39.4/fs/reiserfs/do_balan.c 2011-08-05 19:44:37.000000000 -0400
40747 @@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb,
40748 return;
40749 }
40750
40751 - atomic_inc(&(fs_generation(tb->tb_sb)));
40752 + atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
40753 do_balance_starts(tb);
40754
40755 /* balance leaf returns 0 except if combining L R and S into
40756 diff -urNp linux-2.6.39.4/fs/reiserfs/journal.c linux-2.6.39.4/fs/reiserfs/journal.c
40757 --- linux-2.6.39.4/fs/reiserfs/journal.c 2011-05-19 00:06:34.000000000 -0400
40758 +++ linux-2.6.39.4/fs/reiserfs/journal.c 2011-08-05 19:44:37.000000000 -0400
40759 @@ -2299,6 +2299,8 @@ static struct buffer_head *reiserfs_brea
40760 struct buffer_head *bh;
40761 int i, j;
40762
40763 + pax_track_stack();
40764 +
40765 bh = __getblk(dev, block, bufsize);
40766 if (buffer_uptodate(bh))
40767 return (bh);
40768 diff -urNp linux-2.6.39.4/fs/reiserfs/namei.c linux-2.6.39.4/fs/reiserfs/namei.c
40769 --- linux-2.6.39.4/fs/reiserfs/namei.c 2011-05-19 00:06:34.000000000 -0400
40770 +++ linux-2.6.39.4/fs/reiserfs/namei.c 2011-08-05 19:44:37.000000000 -0400
40771 @@ -1225,6 +1225,8 @@ static int reiserfs_rename(struct inode
40772 unsigned long savelink = 1;
40773 struct timespec ctime;
40774
40775 + pax_track_stack();
40776 +
40777 /* three balancings: (1) old name removal, (2) new name insertion
40778 and (3) maybe "save" link insertion
40779 stat data updates: (1) old directory,
40780 diff -urNp linux-2.6.39.4/fs/reiserfs/procfs.c linux-2.6.39.4/fs/reiserfs/procfs.c
40781 --- linux-2.6.39.4/fs/reiserfs/procfs.c 2011-05-19 00:06:34.000000000 -0400
40782 +++ linux-2.6.39.4/fs/reiserfs/procfs.c 2011-08-05 19:44:37.000000000 -0400
40783 @@ -113,7 +113,7 @@ static int show_super(struct seq_file *m
40784 "SMALL_TAILS " : "NO_TAILS ",
40785 replay_only(sb) ? "REPLAY_ONLY " : "",
40786 convert_reiserfs(sb) ? "CONV " : "",
40787 - atomic_read(&r->s_generation_counter),
40788 + atomic_read_unchecked(&r->s_generation_counter),
40789 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
40790 SF(s_do_balance), SF(s_unneeded_left_neighbor),
40791 SF(s_good_search_by_key_reada), SF(s_bmaps),
40792 @@ -299,6 +299,8 @@ static int show_journal(struct seq_file
40793 struct journal_params *jp = &rs->s_v1.s_journal;
40794 char b[BDEVNAME_SIZE];
40795
40796 + pax_track_stack();
40797 +
40798 seq_printf(m, /* on-disk fields */
40799 "jp_journal_1st_block: \t%i\n"
40800 "jp_journal_dev: \t%s[%x]\n"
40801 diff -urNp linux-2.6.39.4/fs/reiserfs/stree.c linux-2.6.39.4/fs/reiserfs/stree.c
40802 --- linux-2.6.39.4/fs/reiserfs/stree.c 2011-05-19 00:06:34.000000000 -0400
40803 +++ linux-2.6.39.4/fs/reiserfs/stree.c 2011-08-05 19:44:37.000000000 -0400
40804 @@ -1196,6 +1196,8 @@ int reiserfs_delete_item(struct reiserfs
40805 int iter = 0;
40806 #endif
40807
40808 + pax_track_stack();
40809 +
40810 BUG_ON(!th->t_trans_id);
40811
40812 init_tb_struct(th, &s_del_balance, sb, path,
40813 @@ -1333,6 +1335,8 @@ void reiserfs_delete_solid_item(struct r
40814 int retval;
40815 int quota_cut_bytes = 0;
40816
40817 + pax_track_stack();
40818 +
40819 BUG_ON(!th->t_trans_id);
40820
40821 le_key2cpu_key(&cpu_key, key);
40822 @@ -1562,6 +1566,8 @@ int reiserfs_cut_from_item(struct reiser
40823 int quota_cut_bytes;
40824 loff_t tail_pos = 0;
40825
40826 + pax_track_stack();
40827 +
40828 BUG_ON(!th->t_trans_id);
40829
40830 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
40831 @@ -1957,6 +1963,8 @@ int reiserfs_paste_into_item(struct reis
40832 int retval;
40833 int fs_gen;
40834
40835 + pax_track_stack();
40836 +
40837 BUG_ON(!th->t_trans_id);
40838
40839 fs_gen = get_generation(inode->i_sb);
40840 @@ -2045,6 +2053,8 @@ int reiserfs_insert_item(struct reiserfs
40841 int fs_gen = 0;
40842 int quota_bytes = 0;
40843
40844 + pax_track_stack();
40845 +
40846 BUG_ON(!th->t_trans_id);
40847
40848 if (inode) { /* Do we count quotas for item? */
40849 diff -urNp linux-2.6.39.4/fs/reiserfs/super.c linux-2.6.39.4/fs/reiserfs/super.c
40850 --- linux-2.6.39.4/fs/reiserfs/super.c 2011-05-19 00:06:34.000000000 -0400
40851 +++ linux-2.6.39.4/fs/reiserfs/super.c 2011-08-05 19:44:37.000000000 -0400
40852 @@ -927,6 +927,8 @@ static int reiserfs_parse_options(struct
40853 {.option_name = NULL}
40854 };
40855
40856 + pax_track_stack();
40857 +
40858 *blocks = 0;
40859 if (!options || !*options)
40860 /* use default configuration: create tails, journaling on, no
40861 diff -urNp linux-2.6.39.4/fs/select.c linux-2.6.39.4/fs/select.c
40862 --- linux-2.6.39.4/fs/select.c 2011-05-19 00:06:34.000000000 -0400
40863 +++ linux-2.6.39.4/fs/select.c 2011-08-05 19:44:37.000000000 -0400
40864 @@ -20,6 +20,7 @@
40865 #include <linux/module.h>
40866 #include <linux/slab.h>
40867 #include <linux/poll.h>
40868 +#include <linux/security.h>
40869 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
40870 #include <linux/file.h>
40871 #include <linux/fdtable.h>
40872 @@ -403,6 +404,8 @@ int do_select(int n, fd_set_bits *fds, s
40873 int retval, i, timed_out = 0;
40874 unsigned long slack = 0;
40875
40876 + pax_track_stack();
40877 +
40878 rcu_read_lock();
40879 retval = max_select_fd(n, fds);
40880 rcu_read_unlock();
40881 @@ -528,6 +531,8 @@ int core_sys_select(int n, fd_set __user
40882 /* Allocate small arguments on the stack to save memory and be faster */
40883 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
40884
40885 + pax_track_stack();
40886 +
40887 ret = -EINVAL;
40888 if (n < 0)
40889 goto out_nofds;
40890 @@ -837,6 +842,9 @@ int do_sys_poll(struct pollfd __user *uf
40891 struct poll_list *walk = head;
40892 unsigned long todo = nfds;
40893
40894 + pax_track_stack();
40895 +
40896 + gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
40897 if (nfds > rlimit(RLIMIT_NOFILE))
40898 return -EINVAL;
40899
40900 diff -urNp linux-2.6.39.4/fs/seq_file.c linux-2.6.39.4/fs/seq_file.c
40901 --- linux-2.6.39.4/fs/seq_file.c 2011-05-19 00:06:34.000000000 -0400
40902 +++ linux-2.6.39.4/fs/seq_file.c 2011-08-05 20:34:06.000000000 -0400
40903 @@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
40904 return 0;
40905 }
40906 if (!m->buf) {
40907 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
40908 + m->size = PAGE_SIZE;
40909 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
40910 if (!m->buf)
40911 return -ENOMEM;
40912 }
40913 @@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
40914 Eoverflow:
40915 m->op->stop(m, p);
40916 kfree(m->buf);
40917 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
40918 + m->size <<= 1;
40919 + m->buf = kmalloc(m->size, GFP_KERNEL);
40920 return !m->buf ? -ENOMEM : -EAGAIN;
40921 }
40922
40923 @@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
40924 m->version = file->f_version;
40925 /* grab buffer if we didn't have one */
40926 if (!m->buf) {
40927 - m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
40928 + m->size = PAGE_SIZE;
40929 + m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
40930 if (!m->buf)
40931 goto Enomem;
40932 }
40933 @@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
40934 goto Fill;
40935 m->op->stop(m, p);
40936 kfree(m->buf);
40937 - m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
40938 + m->size <<= 1;
40939 + m->buf = kmalloc(m->size, GFP_KERNEL);
40940 if (!m->buf)
40941 goto Enomem;
40942 m->count = 0;
40943 @@ -549,7 +553,7 @@ static void single_stop(struct seq_file
40944 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
40945 void *data)
40946 {
40947 - struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
40948 + seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
40949 int res = -ENOMEM;
40950
40951 if (op) {
40952 diff -urNp linux-2.6.39.4/fs/splice.c linux-2.6.39.4/fs/splice.c
40953 --- linux-2.6.39.4/fs/splice.c 2011-05-19 00:06:34.000000000 -0400
40954 +++ linux-2.6.39.4/fs/splice.c 2011-08-05 19:44:37.000000000 -0400
40955 @@ -186,7 +186,7 @@ ssize_t splice_to_pipe(struct pipe_inode
40956 pipe_lock(pipe);
40957
40958 for (;;) {
40959 - if (!pipe->readers) {
40960 + if (!atomic_read(&pipe->readers)) {
40961 send_sig(SIGPIPE, current, 0);
40962 if (!ret)
40963 ret = -EPIPE;
40964 @@ -240,9 +240,9 @@ ssize_t splice_to_pipe(struct pipe_inode
40965 do_wakeup = 0;
40966 }
40967
40968 - pipe->waiting_writers++;
40969 + atomic_inc(&pipe->waiting_writers);
40970 pipe_wait(pipe);
40971 - pipe->waiting_writers--;
40972 + atomic_dec(&pipe->waiting_writers);
40973 }
40974
40975 pipe_unlock(pipe);
40976 @@ -316,6 +316,8 @@ __generic_file_splice_read(struct file *
40977 .spd_release = spd_release_page,
40978 };
40979
40980 + pax_track_stack();
40981 +
40982 if (splice_grow_spd(pipe, &spd))
40983 return -ENOMEM;
40984
40985 @@ -556,7 +558,7 @@ static ssize_t kernel_readv(struct file
40986 old_fs = get_fs();
40987 set_fs(get_ds());
40988 /* The cast to a user pointer is valid due to the set_fs() */
40989 - res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
40990 + res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
40991 set_fs(old_fs);
40992
40993 return res;
40994 @@ -571,7 +573,7 @@ static ssize_t kernel_write(struct file
40995 old_fs = get_fs();
40996 set_fs(get_ds());
40997 /* The cast to a user pointer is valid due to the set_fs() */
40998 - res = vfs_write(file, (const char __user *)buf, count, &pos);
40999 + res = vfs_write(file, (__force const char __user *)buf, count, &pos);
41000 set_fs(old_fs);
41001
41002 return res;
41003 @@ -599,6 +601,8 @@ ssize_t default_file_splice_read(struct
41004 .spd_release = spd_release_page,
41005 };
41006
41007 + pax_track_stack();
41008 +
41009 if (splice_grow_spd(pipe, &spd))
41010 return -ENOMEM;
41011
41012 @@ -622,7 +626,7 @@ ssize_t default_file_splice_read(struct
41013 goto err;
41014
41015 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
41016 - vec[i].iov_base = (void __user *) page_address(page);
41017 + vec[i].iov_base = (__force void __user *) page_address(page);
41018 vec[i].iov_len = this_len;
41019 spd.pages[i] = page;
41020 spd.nr_pages++;
41021 @@ -842,10 +846,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
41022 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
41023 {
41024 while (!pipe->nrbufs) {
41025 - if (!pipe->writers)
41026 + if (!atomic_read(&pipe->writers))
41027 return 0;
41028
41029 - if (!pipe->waiting_writers && sd->num_spliced)
41030 + if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
41031 return 0;
41032
41033 if (sd->flags & SPLICE_F_NONBLOCK)
41034 @@ -1178,7 +1182,7 @@ ssize_t splice_direct_to_actor(struct fi
41035 * out of the pipe right after the splice_to_pipe(). So set
41036 * PIPE_READERS appropriately.
41037 */
41038 - pipe->readers = 1;
41039 + atomic_set(&pipe->readers, 1);
41040
41041 current->splice_pipe = pipe;
41042 }
41043 @@ -1615,6 +1619,8 @@ static long vmsplice_to_pipe(struct file
41044 };
41045 long ret;
41046
41047 + pax_track_stack();
41048 +
41049 pipe = get_pipe_info(file);
41050 if (!pipe)
41051 return -EBADF;
41052 @@ -1730,9 +1736,9 @@ static int ipipe_prep(struct pipe_inode_
41053 ret = -ERESTARTSYS;
41054 break;
41055 }
41056 - if (!pipe->writers)
41057 + if (!atomic_read(&pipe->writers))
41058 break;
41059 - if (!pipe->waiting_writers) {
41060 + if (!atomic_read(&pipe->waiting_writers)) {
41061 if (flags & SPLICE_F_NONBLOCK) {
41062 ret = -EAGAIN;
41063 break;
41064 @@ -1764,7 +1770,7 @@ static int opipe_prep(struct pipe_inode_
41065 pipe_lock(pipe);
41066
41067 while (pipe->nrbufs >= pipe->buffers) {
41068 - if (!pipe->readers) {
41069 + if (!atomic_read(&pipe->readers)) {
41070 send_sig(SIGPIPE, current, 0);
41071 ret = -EPIPE;
41072 break;
41073 @@ -1777,9 +1783,9 @@ static int opipe_prep(struct pipe_inode_
41074 ret = -ERESTARTSYS;
41075 break;
41076 }
41077 - pipe->waiting_writers++;
41078 + atomic_inc(&pipe->waiting_writers);
41079 pipe_wait(pipe);
41080 - pipe->waiting_writers--;
41081 + atomic_dec(&pipe->waiting_writers);
41082 }
41083
41084 pipe_unlock(pipe);
41085 @@ -1815,14 +1821,14 @@ retry:
41086 pipe_double_lock(ipipe, opipe);
41087
41088 do {
41089 - if (!opipe->readers) {
41090 + if (!atomic_read(&opipe->readers)) {
41091 send_sig(SIGPIPE, current, 0);
41092 if (!ret)
41093 ret = -EPIPE;
41094 break;
41095 }
41096
41097 - if (!ipipe->nrbufs && !ipipe->writers)
41098 + if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
41099 break;
41100
41101 /*
41102 @@ -1922,7 +1928,7 @@ static int link_pipe(struct pipe_inode_i
41103 pipe_double_lock(ipipe, opipe);
41104
41105 do {
41106 - if (!opipe->readers) {
41107 + if (!atomic_read(&opipe->readers)) {
41108 send_sig(SIGPIPE, current, 0);
41109 if (!ret)
41110 ret = -EPIPE;
41111 @@ -1967,7 +1973,7 @@ static int link_pipe(struct pipe_inode_i
41112 * return EAGAIN if we have the potential of some data in the
41113 * future, otherwise just return 0
41114 */
41115 - if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
41116 + if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
41117 ret = -EAGAIN;
41118
41119 pipe_unlock(ipipe);
41120 diff -urNp linux-2.6.39.4/fs/sysfs/file.c linux-2.6.39.4/fs/sysfs/file.c
41121 --- linux-2.6.39.4/fs/sysfs/file.c 2011-05-19 00:06:34.000000000 -0400
41122 +++ linux-2.6.39.4/fs/sysfs/file.c 2011-08-05 19:44:37.000000000 -0400
41123 @@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
41124
41125 struct sysfs_open_dirent {
41126 atomic_t refcnt;
41127 - atomic_t event;
41128 + atomic_unchecked_t event;
41129 wait_queue_head_t poll;
41130 struct list_head buffers; /* goes through sysfs_buffer.list */
41131 };
41132 @@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
41133 if (!sysfs_get_active(attr_sd))
41134 return -ENODEV;
41135
41136 - buffer->event = atomic_read(&attr_sd->s_attr.open->event);
41137 + buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
41138 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
41139
41140 sysfs_put_active(attr_sd);
41141 @@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
41142 return -ENOMEM;
41143
41144 atomic_set(&new_od->refcnt, 0);
41145 - atomic_set(&new_od->event, 1);
41146 + atomic_set_unchecked(&new_od->event, 1);
41147 init_waitqueue_head(&new_od->poll);
41148 INIT_LIST_HEAD(&new_od->buffers);
41149 goto retry;
41150 @@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
41151
41152 sysfs_put_active(attr_sd);
41153
41154 - if (buffer->event != atomic_read(&od->event))
41155 + if (buffer->event != atomic_read_unchecked(&od->event))
41156 goto trigger;
41157
41158 return DEFAULT_POLLMASK;
41159 @@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
41160
41161 od = sd->s_attr.open;
41162 if (od) {
41163 - atomic_inc(&od->event);
41164 + atomic_inc_unchecked(&od->event);
41165 wake_up_interruptible(&od->poll);
41166 }
41167
41168 diff -urNp linux-2.6.39.4/fs/sysfs/mount.c linux-2.6.39.4/fs/sysfs/mount.c
41169 --- linux-2.6.39.4/fs/sysfs/mount.c 2011-05-19 00:06:34.000000000 -0400
41170 +++ linux-2.6.39.4/fs/sysfs/mount.c 2011-08-05 19:44:37.000000000 -0400
41171 @@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
41172 .s_name = "",
41173 .s_count = ATOMIC_INIT(1),
41174 .s_flags = SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
41175 +#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
41176 + .s_mode = S_IFDIR | S_IRWXU,
41177 +#else
41178 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
41179 +#endif
41180 .s_ino = 1,
41181 };
41182
41183 diff -urNp linux-2.6.39.4/fs/sysfs/symlink.c linux-2.6.39.4/fs/sysfs/symlink.c
41184 --- linux-2.6.39.4/fs/sysfs/symlink.c 2011-05-19 00:06:34.000000000 -0400
41185 +++ linux-2.6.39.4/fs/sysfs/symlink.c 2011-08-05 19:44:37.000000000 -0400
41186 @@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de
41187
41188 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
41189 {
41190 - char *page = nd_get_link(nd);
41191 + const char *page = nd_get_link(nd);
41192 if (!IS_ERR(page))
41193 free_page((unsigned long)page);
41194 }
41195 diff -urNp linux-2.6.39.4/fs/udf/inode.c linux-2.6.39.4/fs/udf/inode.c
41196 --- linux-2.6.39.4/fs/udf/inode.c 2011-05-19 00:06:34.000000000 -0400
41197 +++ linux-2.6.39.4/fs/udf/inode.c 2011-08-05 19:44:37.000000000 -0400
41198 @@ -560,6 +560,8 @@ static struct buffer_head *inode_getblk(
41199 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
41200 int lastblock = 0;
41201
41202 + pax_track_stack();
41203 +
41204 prev_epos.offset = udf_file_entry_alloc_offset(inode);
41205 prev_epos.block = iinfo->i_location;
41206 prev_epos.bh = NULL;
41207 diff -urNp linux-2.6.39.4/fs/udf/misc.c linux-2.6.39.4/fs/udf/misc.c
41208 --- linux-2.6.39.4/fs/udf/misc.c 2011-05-19 00:06:34.000000000 -0400
41209 +++ linux-2.6.39.4/fs/udf/misc.c 2011-08-05 19:44:37.000000000 -0400
41210 @@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
41211
41212 u8 udf_tag_checksum(const struct tag *t)
41213 {
41214 - u8 *data = (u8 *)t;
41215 + const u8 *data = (const u8 *)t;
41216 u8 checksum = 0;
41217 int i;
41218 for (i = 0; i < sizeof(struct tag); ++i)
41219 diff -urNp linux-2.6.39.4/fs/utimes.c linux-2.6.39.4/fs/utimes.c
41220 --- linux-2.6.39.4/fs/utimes.c 2011-05-19 00:06:34.000000000 -0400
41221 +++ linux-2.6.39.4/fs/utimes.c 2011-08-05 19:44:37.000000000 -0400
41222 @@ -1,6 +1,7 @@
41223 #include <linux/compiler.h>
41224 #include <linux/file.h>
41225 #include <linux/fs.h>
41226 +#include <linux/security.h>
41227 #include <linux/linkage.h>
41228 #include <linux/mount.h>
41229 #include <linux/namei.h>
41230 @@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
41231 goto mnt_drop_write_and_out;
41232 }
41233 }
41234 +
41235 + if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
41236 + error = -EACCES;
41237 + goto mnt_drop_write_and_out;
41238 + }
41239 +
41240 mutex_lock(&inode->i_mutex);
41241 error = notify_change(path->dentry, &newattrs);
41242 mutex_unlock(&inode->i_mutex);
41243 diff -urNp linux-2.6.39.4/fs/xattr_acl.c linux-2.6.39.4/fs/xattr_acl.c
41244 --- linux-2.6.39.4/fs/xattr_acl.c 2011-05-19 00:06:34.000000000 -0400
41245 +++ linux-2.6.39.4/fs/xattr_acl.c 2011-08-05 19:44:37.000000000 -0400
41246 @@ -17,8 +17,8 @@
41247 struct posix_acl *
41248 posix_acl_from_xattr(const void *value, size_t size)
41249 {
41250 - posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
41251 - posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
41252 + const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
41253 + const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
41254 int count;
41255 struct posix_acl *acl;
41256 struct posix_acl_entry *acl_e;
41257 diff -urNp linux-2.6.39.4/fs/xattr.c linux-2.6.39.4/fs/xattr.c
41258 --- linux-2.6.39.4/fs/xattr.c 2011-05-19 00:06:34.000000000 -0400
41259 +++ linux-2.6.39.4/fs/xattr.c 2011-08-05 19:44:37.000000000 -0400
41260 @@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
41261 * Extended attribute SET operations
41262 */
41263 static long
41264 -setxattr(struct dentry *d, const char __user *name, const void __user *value,
41265 +setxattr(struct path *path, const char __user *name, const void __user *value,
41266 size_t size, int flags)
41267 {
41268 int error;
41269 @@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
41270 return PTR_ERR(kvalue);
41271 }
41272
41273 - error = vfs_setxattr(d, kname, kvalue, size, flags);
41274 + if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
41275 + error = -EACCES;
41276 + goto out;
41277 + }
41278 +
41279 + error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
41280 +out:
41281 kfree(kvalue);
41282 return error;
41283 }
41284 @@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
41285 return error;
41286 error = mnt_want_write(path.mnt);
41287 if (!error) {
41288 - error = setxattr(path.dentry, name, value, size, flags);
41289 + error = setxattr(&path, name, value, size, flags);
41290 mnt_drop_write(path.mnt);
41291 }
41292 path_put(&path);
41293 @@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
41294 return error;
41295 error = mnt_want_write(path.mnt);
41296 if (!error) {
41297 - error = setxattr(path.dentry, name, value, size, flags);
41298 + error = setxattr(&path, name, value, size, flags);
41299 mnt_drop_write(path.mnt);
41300 }
41301 path_put(&path);
41302 @@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
41303 const void __user *,value, size_t, size, int, flags)
41304 {
41305 struct file *f;
41306 - struct dentry *dentry;
41307 int error = -EBADF;
41308
41309 f = fget(fd);
41310 if (!f)
41311 return error;
41312 - dentry = f->f_path.dentry;
41313 - audit_inode(NULL, dentry);
41314 + audit_inode(NULL, f->f_path.dentry);
41315 error = mnt_want_write_file(f);
41316 if (!error) {
41317 - error = setxattr(dentry, name, value, size, flags);
41318 + error = setxattr(&f->f_path, name, value, size, flags);
41319 mnt_drop_write(f->f_path.mnt);
41320 }
41321 fput(f);
41322 diff -urNp linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c
41323 --- linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-05-19 00:06:34.000000000 -0400
41324 +++ linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-08-05 19:44:37.000000000 -0400
41325 @@ -73,6 +73,7 @@ xfs_compat_ioc_fsgeometry_v1(
41326 xfs_fsop_geom_t fsgeo;
41327 int error;
41328
41329 + memset(&fsgeo, 0, sizeof(fsgeo));
41330 error = xfs_fs_geometry(mp, &fsgeo, 3);
41331 if (error)
41332 return -error;
41333 diff -urNp linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c
41334 --- linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-05-19 00:06:34.000000000 -0400
41335 +++ linux-2.6.39.4/fs/xfs/linux-2.6/xfs_ioctl.c 2011-08-05 19:44:37.000000000 -0400
41336 @@ -128,7 +128,7 @@ xfs_find_handle(
41337 }
41338
41339 error = -EFAULT;
41340 - if (copy_to_user(hreq->ohandle, &handle, hsize) ||
41341 + if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
41342 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
41343 goto out_put;
41344
41345 diff -urNp linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c
41346 --- linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c 2011-05-19 00:06:34.000000000 -0400
41347 +++ linux-2.6.39.4/fs/xfs/linux-2.6/xfs_iops.c 2011-08-05 19:44:37.000000000 -0400
41348 @@ -437,7 +437,7 @@ xfs_vn_put_link(
41349 struct nameidata *nd,
41350 void *p)
41351 {
41352 - char *s = nd_get_link(nd);
41353 + const char *s = nd_get_link(nd);
41354
41355 if (!IS_ERR(s))
41356 kfree(s);
41357 diff -urNp linux-2.6.39.4/fs/xfs/xfs_bmap.c linux-2.6.39.4/fs/xfs/xfs_bmap.c
41358 --- linux-2.6.39.4/fs/xfs/xfs_bmap.c 2011-05-19 00:06:34.000000000 -0400
41359 +++ linux-2.6.39.4/fs/xfs/xfs_bmap.c 2011-08-05 19:44:37.000000000 -0400
41360 @@ -287,7 +287,7 @@ xfs_bmap_validate_ret(
41361 int nmap,
41362 int ret_nmap);
41363 #else
41364 -#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
41365 +#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
41366 #endif /* DEBUG */
41367
41368 STATIC int
41369 diff -urNp linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c
41370 --- linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c 2011-05-19 00:06:34.000000000 -0400
41371 +++ linux-2.6.39.4/fs/xfs/xfs_dir2_sf.c 2011-08-05 19:44:37.000000000 -0400
41372 @@ -780,7 +780,15 @@ xfs_dir2_sf_getdents(
41373 }
41374
41375 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
41376 - if (filldir(dirent, (char *)sfep->name, sfep->namelen,
41377 + if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
41378 + char name[sfep->namelen];
41379 + memcpy(name, sfep->name, sfep->namelen);
41380 + if (filldir(dirent, name, sfep->namelen,
41381 + off & 0x7fffffff, ino, DT_UNKNOWN)) {
41382 + *offset = off & 0x7fffffff;
41383 + return 0;
41384 + }
41385 + } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
41386 off & 0x7fffffff, ino, DT_UNKNOWN)) {
41387 *offset = off & 0x7fffffff;
41388 return 0;
41389 diff -urNp linux-2.6.39.4/grsecurity/gracl_alloc.c linux-2.6.39.4/grsecurity/gracl_alloc.c
41390 --- linux-2.6.39.4/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
41391 +++ linux-2.6.39.4/grsecurity/gracl_alloc.c 2011-08-05 19:44:37.000000000 -0400
41392 @@ -0,0 +1,105 @@
41393 +#include <linux/kernel.h>
41394 +#include <linux/mm.h>
41395 +#include <linux/slab.h>
41396 +#include <linux/vmalloc.h>
41397 +#include <linux/gracl.h>
41398 +#include <linux/grsecurity.h>
41399 +
41400 +static unsigned long alloc_stack_next = 1;
41401 +static unsigned long alloc_stack_size = 1;
41402 +static void **alloc_stack;
41403 +
41404 +static __inline__ int
41405 +alloc_pop(void)
41406 +{
41407 + if (alloc_stack_next == 1)
41408 + return 0;
41409 +
41410 + kfree(alloc_stack[alloc_stack_next - 2]);
41411 +
41412 + alloc_stack_next--;
41413 +
41414 + return 1;
41415 +}
41416 +
41417 +static __inline__ int
41418 +alloc_push(void *buf)
41419 +{
41420 + if (alloc_stack_next >= alloc_stack_size)
41421 + return 1;
41422 +
41423 + alloc_stack[alloc_stack_next - 1] = buf;
41424 +
41425 + alloc_stack_next++;
41426 +
41427 + return 0;
41428 +}
41429 +
41430 +void *
41431 +acl_alloc(unsigned long len)
41432 +{
41433 + void *ret = NULL;
41434 +
41435 + if (!len || len > PAGE_SIZE)
41436 + goto out;
41437 +
41438 + ret = kmalloc(len, GFP_KERNEL);
41439 +
41440 + if (ret) {
41441 + if (alloc_push(ret)) {
41442 + kfree(ret);
41443 + ret = NULL;
41444 + }
41445 + }
41446 +
41447 +out:
41448 + return ret;
41449 +}
41450 +
41451 +void *
41452 +acl_alloc_num(unsigned long num, unsigned long len)
41453 +{
41454 + if (!len || (num > (PAGE_SIZE / len)))
41455 + return NULL;
41456 +
41457 + return acl_alloc(num * len);
41458 +}
41459 +
41460 +void
41461 +acl_free_all(void)
41462 +{
41463 + if (gr_acl_is_enabled() || !alloc_stack)
41464 + return;
41465 +
41466 + while (alloc_pop()) ;
41467 +
41468 + if (alloc_stack) {
41469 + if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
41470 + kfree(alloc_stack);
41471 + else
41472 + vfree(alloc_stack);
41473 + }
41474 +
41475 + alloc_stack = NULL;
41476 + alloc_stack_size = 1;
41477 + alloc_stack_next = 1;
41478 +
41479 + return;
41480 +}
41481 +
41482 +int
41483 +acl_alloc_stack_init(unsigned long size)
41484 +{
41485 + if ((size * sizeof (void *)) <= PAGE_SIZE)
41486 + alloc_stack =
41487 + (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
41488 + else
41489 + alloc_stack = (void **) vmalloc(size * sizeof (void *));
41490 +
41491 + alloc_stack_size = size;
41492 +
41493 + if (!alloc_stack)
41494 + return 0;
41495 + else
41496 + return 1;
41497 +}
41498 diff -urNp linux-2.6.39.4/grsecurity/gracl.c linux-2.6.39.4/grsecurity/gracl.c
41499 --- linux-2.6.39.4/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
41500 +++ linux-2.6.39.4/grsecurity/gracl.c 2011-08-05 19:44:37.000000000 -0400
41501 @@ -0,0 +1,4106 @@
41502 +#include <linux/kernel.h>
41503 +#include <linux/module.h>
41504 +#include <linux/sched.h>
41505 +#include <linux/mm.h>
41506 +#include <linux/file.h>
41507 +#include <linux/fs.h>
41508 +#include <linux/namei.h>
41509 +#include <linux/mount.h>
41510 +#include <linux/tty.h>
41511 +#include <linux/proc_fs.h>
41512 +#include <linux/lglock.h>
41513 +#include <linux/slab.h>
41514 +#include <linux/vmalloc.h>
41515 +#include <linux/types.h>
41516 +#include <linux/sysctl.h>
41517 +#include <linux/netdevice.h>
41518 +#include <linux/ptrace.h>
41519 +#include <linux/gracl.h>
41520 +#include <linux/gralloc.h>
41521 +#include <linux/grsecurity.h>
41522 +#include <linux/grinternal.h>
41523 +#include <linux/pid_namespace.h>
41524 +#include <linux/fdtable.h>
41525 +#include <linux/percpu.h>
41526 +
41527 +#include <asm/uaccess.h>
41528 +#include <asm/errno.h>
41529 +#include <asm/mman.h>
41530 +
41531 +static struct acl_role_db acl_role_set;
41532 +static struct name_db name_set;
41533 +static struct inodev_db inodev_set;
41534 +
41535 +/* for keeping track of userspace pointers used for subjects, so we
41536 + can share references in the kernel as well
41537 +*/
41538 +
41539 +static struct path real_root;
41540 +
41541 +static struct acl_subj_map_db subj_map_set;
41542 +
41543 +static struct acl_role_label *default_role;
41544 +
41545 +static struct acl_role_label *role_list;
41546 +
41547 +static u16 acl_sp_role_value;
41548 +
41549 +extern char *gr_shared_page[4];
41550 +static DEFINE_MUTEX(gr_dev_mutex);
41551 +DEFINE_RWLOCK(gr_inode_lock);
41552 +
41553 +struct gr_arg *gr_usermode;
41554 +
41555 +static unsigned int gr_status __read_only = GR_STATUS_INIT;
41556 +
41557 +extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
41558 +extern void gr_clear_learn_entries(void);
41559 +
41560 +#ifdef CONFIG_GRKERNSEC_RESLOG
41561 +extern void gr_log_resource(const struct task_struct *task,
41562 + const int res, const unsigned long wanted, const int gt);
41563 +#endif
41564 +
41565 +unsigned char *gr_system_salt;
41566 +unsigned char *gr_system_sum;
41567 +
41568 +static struct sprole_pw **acl_special_roles = NULL;
41569 +static __u16 num_sprole_pws = 0;
41570 +
41571 +static struct acl_role_label *kernel_role = NULL;
41572 +
41573 +static unsigned int gr_auth_attempts = 0;
41574 +static unsigned long gr_auth_expires = 0UL;
41575 +
41576 +#ifdef CONFIG_NET
41577 +extern struct vfsmount *sock_mnt;
41578 +#endif
41579 +
41580 +extern struct vfsmount *pipe_mnt;
41581 +extern struct vfsmount *shm_mnt;
41582 +#ifdef CONFIG_HUGETLBFS
41583 +extern struct vfsmount *hugetlbfs_vfsmount;
41584 +#endif
41585 +
41586 +static struct acl_object_label *fakefs_obj_rw;
41587 +static struct acl_object_label *fakefs_obj_rwx;
41588 +
41589 +extern int gr_init_uidset(void);
41590 +extern void gr_free_uidset(void);
41591 +extern void gr_remove_uid(uid_t uid);
41592 +extern int gr_find_uid(uid_t uid);
41593 +
41594 +DECLARE_BRLOCK(vfsmount_lock);
41595 +
41596 +__inline__ int
41597 +gr_acl_is_enabled(void)
41598 +{
41599 + return (gr_status & GR_READY);
41600 +}
41601 +
41602 +#ifdef CONFIG_BTRFS_FS
41603 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
41604 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
41605 +#endif
41606 +
41607 +static inline dev_t __get_dev(const struct dentry *dentry)
41608 +{
41609 +#ifdef CONFIG_BTRFS_FS
41610 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
41611 + return get_btrfs_dev_from_inode(dentry->d_inode);
41612 + else
41613 +#endif
41614 + return dentry->d_inode->i_sb->s_dev;
41615 +}
41616 +
41617 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
41618 +{
41619 + return __get_dev(dentry);
41620 +}
41621 +
41622 +static char gr_task_roletype_to_char(struct task_struct *task)
41623 +{
41624 + switch (task->role->roletype &
41625 + (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
41626 + GR_ROLE_SPECIAL)) {
41627 + case GR_ROLE_DEFAULT:
41628 + return 'D';
41629 + case GR_ROLE_USER:
41630 + return 'U';
41631 + case GR_ROLE_GROUP:
41632 + return 'G';
41633 + case GR_ROLE_SPECIAL:
41634 + return 'S';
41635 + }
41636 +
41637 + return 'X';
41638 +}
41639 +
41640 +char gr_roletype_to_char(void)
41641 +{
41642 + return gr_task_roletype_to_char(current);
41643 +}
41644 +
41645 +__inline__ int
41646 +gr_acl_tpe_check(void)
41647 +{
41648 + if (unlikely(!(gr_status & GR_READY)))
41649 + return 0;
41650 + if (current->role->roletype & GR_ROLE_TPE)
41651 + return 1;
41652 + else
41653 + return 0;
41654 +}
41655 +
41656 +int
41657 +gr_handle_rawio(const struct inode *inode)
41658 +{
41659 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
41660 + if (inode && S_ISBLK(inode->i_mode) &&
41661 + grsec_enable_chroot_caps && proc_is_chrooted(current) &&
41662 + !capable(CAP_SYS_RAWIO))
41663 + return 1;
41664 +#endif
41665 + return 0;
41666 +}
41667 +
41668 +static int
41669 +gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
41670 +{
41671 + if (likely(lena != lenb))
41672 + return 0;
41673 +
41674 + return !memcmp(a, b, lena);
41675 +}
41676 +
41677 +static int prepend(char **buffer, int *buflen, const char *str, int namelen)
41678 +{
41679 + *buflen -= namelen;
41680 + if (*buflen < 0)
41681 + return -ENAMETOOLONG;
41682 + *buffer -= namelen;
41683 + memcpy(*buffer, str, namelen);
41684 + return 0;
41685 +}
41686 +
41687 +static int prepend_name(char **buffer, int *buflen, struct qstr *name)
41688 +{
41689 + return prepend(buffer, buflen, name->name, name->len);
41690 +}
41691 +
41692 +static int prepend_path(const struct path *path, struct path *root,
41693 + char **buffer, int *buflen)
41694 +{
41695 + struct dentry *dentry = path->dentry;
41696 + struct vfsmount *vfsmnt = path->mnt;
41697 + bool slash = false;
41698 + int error = 0;
41699 +
41700 + while (dentry != root->dentry || vfsmnt != root->mnt) {
41701 + struct dentry * parent;
41702 +
41703 + if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
41704 + /* Global root? */
41705 + if (vfsmnt->mnt_parent == vfsmnt) {
41706 + goto out;
41707 + }
41708 + dentry = vfsmnt->mnt_mountpoint;
41709 + vfsmnt = vfsmnt->mnt_parent;
41710 + continue;
41711 + }
41712 + parent = dentry->d_parent;
41713 + prefetch(parent);
41714 + spin_lock(&dentry->d_lock);
41715 + error = prepend_name(buffer, buflen, &dentry->d_name);
41716 + spin_unlock(&dentry->d_lock);
41717 + if (!error)
41718 + error = prepend(buffer, buflen, "/", 1);
41719 + if (error)
41720 + break;
41721 +
41722 + slash = true;
41723 + dentry = parent;
41724 + }
41725 +
41726 +out:
41727 + if (!error && !slash)
41728 + error = prepend(buffer, buflen, "/", 1);
41729 +
41730 + return error;
41731 +}
41732 +
41733 +/* this must be called with vfsmount_lock and rename_lock held */
41734 +
41735 +static char *__our_d_path(const struct path *path, struct path *root,
41736 + char *buf, int buflen)
41737 +{
41738 + char *res = buf + buflen;
41739 + int error;
41740 +
41741 + prepend(&res, &buflen, "\0", 1);
41742 + error = prepend_path(path, root, &res, &buflen);
41743 + if (error)
41744 + return ERR_PTR(error);
41745 +
41746 + return res;
41747 +}
41748 +
41749 +static char *
41750 +gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
41751 +{
41752 + char *retval;
41753 +
41754 + retval = __our_d_path(path, root, buf, buflen);
41755 + if (unlikely(IS_ERR(retval)))
41756 + retval = strcpy(buf, "<path too long>");
41757 + else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
41758 + retval[1] = '\0';
41759 +
41760 + return retval;
41761 +}
41762 +
41763 +static char *
41764 +__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
41765 + char *buf, int buflen)
41766 +{
41767 + struct path path;
41768 + char *res;
41769 +
41770 + path.dentry = (struct dentry *)dentry;
41771 + path.mnt = (struct vfsmount *)vfsmnt;
41772 +
41773 + /* we can use real_root.dentry, real_root.mnt, because this is only called
41774 + by the RBAC system */
41775 + res = gen_full_path(&path, &real_root, buf, buflen);
41776 +
41777 + return res;
41778 +}
41779 +
41780 +static char *
41781 +d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
41782 + char *buf, int buflen)
41783 +{
41784 + char *res;
41785 + struct path path;
41786 + struct path root;
41787 + struct task_struct *reaper = &init_task;
41788 +
41789 + path.dentry = (struct dentry *)dentry;
41790 + path.mnt = (struct vfsmount *)vfsmnt;
41791 +
41792 + /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
41793 + get_fs_root(reaper->fs, &root);
41794 +
41795 + write_seqlock(&rename_lock);
41796 + br_read_lock(vfsmount_lock);
41797 + res = gen_full_path(&path, &root, buf, buflen);
41798 + br_read_unlock(vfsmount_lock);
41799 + write_sequnlock(&rename_lock);
41800 +
41801 + path_put(&root);
41802 + return res;
41803 +}
41804 +
41805 +static char *
41806 +gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
41807 +{
41808 + char *ret;
41809 + write_seqlock(&rename_lock);
41810 + br_read_lock(vfsmount_lock);
41811 + ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
41812 + PAGE_SIZE);
41813 + br_read_unlock(vfsmount_lock);
41814 + write_sequnlock(&rename_lock);
41815 + return ret;
41816 +}
41817 +
41818 +char *
41819 +gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
41820 +{
41821 + return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
41822 + PAGE_SIZE);
41823 +}
41824 +
41825 +char *
41826 +gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
41827 +{
41828 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
41829 + PAGE_SIZE);
41830 +}
41831 +
41832 +char *
41833 +gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
41834 +{
41835 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
41836 + PAGE_SIZE);
41837 +}
41838 +
41839 +char *
41840 +gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
41841 +{
41842 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
41843 + PAGE_SIZE);
41844 +}
41845 +
41846 +char *
41847 +gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
41848 +{
41849 + return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
41850 + PAGE_SIZE);
41851 +}
41852 +
41853 +__inline__ __u32
41854 +to_gr_audit(const __u32 reqmode)
41855 +{
41856 + /* masks off auditable permission flags, then shifts them to create
41857 + auditing flags, and adds the special case of append auditing if
41858 + we're requesting write */
41859 + return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
41860 +}
41861 +
41862 +struct acl_subject_label *
41863 +lookup_subject_map(const struct acl_subject_label *userp)
41864 +{
41865 + unsigned int index = shash(userp, subj_map_set.s_size);
41866 + struct subject_map *match;
41867 +
41868 + match = subj_map_set.s_hash[index];
41869 +
41870 + while (match && match->user != userp)
41871 + match = match->next;
41872 +
41873 + if (match != NULL)
41874 + return match->kernel;
41875 + else
41876 + return NULL;
41877 +}
41878 +
41879 +static void
41880 +insert_subj_map_entry(struct subject_map *subjmap)
41881 +{
41882 + unsigned int index = shash(subjmap->user, subj_map_set.s_size);
41883 + struct subject_map **curr;
41884 +
41885 + subjmap->prev = NULL;
41886 +
41887 + curr = &subj_map_set.s_hash[index];
41888 + if (*curr != NULL)
41889 + (*curr)->prev = subjmap;
41890 +
41891 + subjmap->next = *curr;
41892 + *curr = subjmap;
41893 +
41894 + return;
41895 +}
41896 +
41897 +static struct acl_role_label *
41898 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
41899 + const gid_t gid)
41900 +{
41901 + unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
41902 + struct acl_role_label *match;
41903 + struct role_allowed_ip *ipp;
41904 + unsigned int x;
41905 + u32 curr_ip = task->signal->curr_ip;
41906 +
41907 + task->signal->saved_ip = curr_ip;
41908 +
41909 + match = acl_role_set.r_hash[index];
41910 +
41911 + while (match) {
41912 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
41913 + for (x = 0; x < match->domain_child_num; x++) {
41914 + if (match->domain_children[x] == uid)
41915 + goto found;
41916 + }
41917 + } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
41918 + break;
41919 + match = match->next;
41920 + }
41921 +found:
41922 + if (match == NULL) {
41923 + try_group:
41924 + index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
41925 + match = acl_role_set.r_hash[index];
41926 +
41927 + while (match) {
41928 + if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
41929 + for (x = 0; x < match->domain_child_num; x++) {
41930 + if (match->domain_children[x] == gid)
41931 + goto found2;
41932 + }
41933 + } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
41934 + break;
41935 + match = match->next;
41936 + }
41937 +found2:
41938 + if (match == NULL)
41939 + match = default_role;
41940 + if (match->allowed_ips == NULL)
41941 + return match;
41942 + else {
41943 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
41944 + if (likely
41945 + ((ntohl(curr_ip) & ipp->netmask) ==
41946 + (ntohl(ipp->addr) & ipp->netmask)))
41947 + return match;
41948 + }
41949 + match = default_role;
41950 + }
41951 + } else if (match->allowed_ips == NULL) {
41952 + return match;
41953 + } else {
41954 + for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
41955 + if (likely
41956 + ((ntohl(curr_ip) & ipp->netmask) ==
41957 + (ntohl(ipp->addr) & ipp->netmask)))
41958 + return match;
41959 + }
41960 + goto try_group;
41961 + }
41962 +
41963 + return match;
41964 +}
41965 +
41966 +struct acl_subject_label *
41967 +lookup_acl_subj_label(const ino_t ino, const dev_t dev,
41968 + const struct acl_role_label *role)
41969 +{
41970 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
41971 + struct acl_subject_label *match;
41972 +
41973 + match = role->subj_hash[index];
41974 +
41975 + while (match && (match->inode != ino || match->device != dev ||
41976 + (match->mode & GR_DELETED))) {
41977 + match = match->next;
41978 + }
41979 +
41980 + if (match && !(match->mode & GR_DELETED))
41981 + return match;
41982 + else
41983 + return NULL;
41984 +}
41985 +
41986 +struct acl_subject_label *
41987 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
41988 + const struct acl_role_label *role)
41989 +{
41990 + unsigned int index = fhash(ino, dev, role->subj_hash_size);
41991 + struct acl_subject_label *match;
41992 +
41993 + match = role->subj_hash[index];
41994 +
41995 + while (match && (match->inode != ino || match->device != dev ||
41996 + !(match->mode & GR_DELETED))) {
41997 + match = match->next;
41998 + }
41999 +
42000 + if (match && (match->mode & GR_DELETED))
42001 + return match;
42002 + else
42003 + return NULL;
42004 +}
42005 +
42006 +static struct acl_object_label *
42007 +lookup_acl_obj_label(const ino_t ino, const dev_t dev,
42008 + const struct acl_subject_label *subj)
42009 +{
42010 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
42011 + struct acl_object_label *match;
42012 +
42013 + match = subj->obj_hash[index];
42014 +
42015 + while (match && (match->inode != ino || match->device != dev ||
42016 + (match->mode & GR_DELETED))) {
42017 + match = match->next;
42018 + }
42019 +
42020 + if (match && !(match->mode & GR_DELETED))
42021 + return match;
42022 + else
42023 + return NULL;
42024 +}
42025 +
42026 +static struct acl_object_label *
42027 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
42028 + const struct acl_subject_label *subj)
42029 +{
42030 + unsigned int index = fhash(ino, dev, subj->obj_hash_size);
42031 + struct acl_object_label *match;
42032 +
42033 + match = subj->obj_hash[index];
42034 +
42035 + while (match && (match->inode != ino || match->device != dev ||
42036 + !(match->mode & GR_DELETED))) {
42037 + match = match->next;
42038 + }
42039 +
42040 + if (match && (match->mode & GR_DELETED))
42041 + return match;
42042 +
42043 + match = subj->obj_hash[index];
42044 +
42045 + while (match && (match->inode != ino || match->device != dev ||
42046 + (match->mode & GR_DELETED))) {
42047 + match = match->next;
42048 + }
42049 +
42050 + if (match && !(match->mode & GR_DELETED))
42051 + return match;
42052 + else
42053 + return NULL;
42054 +}
42055 +
42056 +static struct name_entry *
42057 +lookup_name_entry(const char *name)
42058 +{
42059 + unsigned int len = strlen(name);
42060 + unsigned int key = full_name_hash(name, len);
42061 + unsigned int index = key % name_set.n_size;
42062 + struct name_entry *match;
42063 +
42064 + match = name_set.n_hash[index];
42065 +
42066 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
42067 + match = match->next;
42068 +
42069 + return match;
42070 +}
42071 +
42072 +static struct name_entry *
42073 +lookup_name_entry_create(const char *name)
42074 +{
42075 + unsigned int len = strlen(name);
42076 + unsigned int key = full_name_hash(name, len);
42077 + unsigned int index = key % name_set.n_size;
42078 + struct name_entry *match;
42079 +
42080 + match = name_set.n_hash[index];
42081 +
42082 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
42083 + !match->deleted))
42084 + match = match->next;
42085 +
42086 + if (match && match->deleted)
42087 + return match;
42088 +
42089 + match = name_set.n_hash[index];
42090 +
42091 + while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
42092 + match->deleted))
42093 + match = match->next;
42094 +
42095 + if (match && !match->deleted)
42096 + return match;
42097 + else
42098 + return NULL;
42099 +}
42100 +
42101 +static struct inodev_entry *
42102 +lookup_inodev_entry(const ino_t ino, const dev_t dev)
42103 +{
42104 + unsigned int index = fhash(ino, dev, inodev_set.i_size);
42105 + struct inodev_entry *match;
42106 +
42107 + match = inodev_set.i_hash[index];
42108 +
42109 + while (match && (match->nentry->inode != ino || match->nentry->device != dev))
42110 + match = match->next;
42111 +
42112 + return match;
42113 +}
42114 +
42115 +static void
42116 +insert_inodev_entry(struct inodev_entry *entry)
42117 +{
42118 + unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
42119 + inodev_set.i_size);
42120 + struct inodev_entry **curr;
42121 +
42122 + entry->prev = NULL;
42123 +
42124 + curr = &inodev_set.i_hash[index];
42125 + if (*curr != NULL)
42126 + (*curr)->prev = entry;
42127 +
42128 + entry->next = *curr;
42129 + *curr = entry;
42130 +
42131 + return;
42132 +}
42133 +
42134 +static void
42135 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
42136 +{
42137 + unsigned int index =
42138 + rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
42139 + struct acl_role_label **curr;
42140 + struct acl_role_label *tmp;
42141 +
42142 + curr = &acl_role_set.r_hash[index];
42143 +
42144 + /* if role was already inserted due to domains and already has
42145 + a role in the same bucket as it attached, then we need to
42146 + combine these two buckets
42147 + */
42148 + if (role->next) {
42149 + tmp = role->next;
42150 + while (tmp->next)
42151 + tmp = tmp->next;
42152 + tmp->next = *curr;
42153 + } else
42154 + role->next = *curr;
42155 + *curr = role;
42156 +
42157 + return;
42158 +}
42159 +
42160 +static void
42161 +insert_acl_role_label(struct acl_role_label *role)
42162 +{
42163 + int i;
42164 +
42165 + if (role_list == NULL) {
42166 + role_list = role;
42167 + role->prev = NULL;
42168 + } else {
42169 + role->prev = role_list;
42170 + role_list = role;
42171 + }
42172 +
42173 + /* used for hash chains */
42174 + role->next = NULL;
42175 +
42176 + if (role->roletype & GR_ROLE_DOMAIN) {
42177 + for (i = 0; i < role->domain_child_num; i++)
42178 + __insert_acl_role_label(role, role->domain_children[i]);
42179 + } else
42180 + __insert_acl_role_label(role, role->uidgid);
42181 +}
42182 +
42183 +static int
42184 +insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
42185 +{
42186 + struct name_entry **curr, *nentry;
42187 + struct inodev_entry *ientry;
42188 + unsigned int len = strlen(name);
42189 + unsigned int key = full_name_hash(name, len);
42190 + unsigned int index = key % name_set.n_size;
42191 +
42192 + curr = &name_set.n_hash[index];
42193 +
42194 + while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
42195 + curr = &((*curr)->next);
42196 +
42197 + if (*curr != NULL)
42198 + return 1;
42199 +
42200 + nentry = acl_alloc(sizeof (struct name_entry));
42201 + if (nentry == NULL)
42202 + return 0;
42203 + ientry = acl_alloc(sizeof (struct inodev_entry));
42204 + if (ientry == NULL)
42205 + return 0;
42206 + ientry->nentry = nentry;
42207 +
42208 + nentry->key = key;
42209 + nentry->name = name;
42210 + nentry->inode = inode;
42211 + nentry->device = device;
42212 + nentry->len = len;
42213 + nentry->deleted = deleted;
42214 +
42215 + nentry->prev = NULL;
42216 + curr = &name_set.n_hash[index];
42217 + if (*curr != NULL)
42218 + (*curr)->prev = nentry;
42219 + nentry->next = *curr;
42220 + *curr = nentry;
42221 +
42222 + /* insert us into the table searchable by inode/dev */
42223 + insert_inodev_entry(ientry);
42224 +
42225 + return 1;
42226 +}
42227 +
42228 +static void
42229 +insert_acl_obj_label(struct acl_object_label *obj,
42230 + struct acl_subject_label *subj)
42231 +{
42232 + unsigned int index =
42233 + fhash(obj->inode, obj->device, subj->obj_hash_size);
42234 + struct acl_object_label **curr;
42235 +
42236 +
42237 + obj->prev = NULL;
42238 +
42239 + curr = &subj->obj_hash[index];
42240 + if (*curr != NULL)
42241 + (*curr)->prev = obj;
42242 +
42243 + obj->next = *curr;
42244 + *curr = obj;
42245 +
42246 + return;
42247 +}
42248 +
42249 +static void
42250 +insert_acl_subj_label(struct acl_subject_label *obj,
42251 + struct acl_role_label *role)
42252 +{
42253 + unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
42254 + struct acl_subject_label **curr;
42255 +
42256 + obj->prev = NULL;
42257 +
42258 + curr = &role->subj_hash[index];
42259 + if (*curr != NULL)
42260 + (*curr)->prev = obj;
42261 +
42262 + obj->next = *curr;
42263 + *curr = obj;
42264 +
42265 + return;
42266 +}
42267 +
42268 +/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
42269 +
42270 +static void *
42271 +create_table(__u32 * len, int elementsize)
42272 +{
42273 + unsigned int table_sizes[] = {
42274 + 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
42275 + 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
42276 + 4194301, 8388593, 16777213, 33554393, 67108859
42277 + };
42278 + void *newtable = NULL;
42279 + unsigned int pwr = 0;
42280 +
42281 + while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
42282 + table_sizes[pwr] <= *len)
42283 + pwr++;
42284 +
42285 + if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
42286 + return newtable;
42287 +
42288 + if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
42289 + newtable =
42290 + kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
42291 + else
42292 + newtable = vmalloc(table_sizes[pwr] * elementsize);
42293 +
42294 + *len = table_sizes[pwr];
42295 +
42296 + return newtable;
42297 +}
42298 +
42299 +static int
42300 +init_variables(const struct gr_arg *arg)
42301 +{
42302 + struct task_struct *reaper = &init_task;
42303 + unsigned int stacksize;
42304 +
42305 + subj_map_set.s_size = arg->role_db.num_subjects;
42306 + acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
42307 + name_set.n_size = arg->role_db.num_objects;
42308 + inodev_set.i_size = arg->role_db.num_objects;
42309 +
42310 + if (!subj_map_set.s_size || !acl_role_set.r_size ||
42311 + !name_set.n_size || !inodev_set.i_size)
42312 + return 1;
42313 +
42314 + if (!gr_init_uidset())
42315 + return 1;
42316 +
42317 + /* set up the stack that holds allocation info */
42318 +
42319 + stacksize = arg->role_db.num_pointers + 5;
42320 +
42321 + if (!acl_alloc_stack_init(stacksize))
42322 + return 1;
42323 +
42324 + /* grab reference for the real root dentry and vfsmount */
42325 + get_fs_root(reaper->fs, &real_root);
42326 +
42327 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
42328 + printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
42329 +#endif
42330 +
42331 + fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
42332 + if (fakefs_obj_rw == NULL)
42333 + return 1;
42334 + fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
42335 +
42336 + fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
42337 + if (fakefs_obj_rwx == NULL)
42338 + return 1;
42339 + fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
42340 +
42341 + subj_map_set.s_hash =
42342 + (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
42343 + acl_role_set.r_hash =
42344 + (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
42345 + name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
42346 + inodev_set.i_hash =
42347 + (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
42348 +
42349 + if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
42350 + !name_set.n_hash || !inodev_set.i_hash)
42351 + return 1;
42352 +
42353 + memset(subj_map_set.s_hash, 0,
42354 + sizeof(struct subject_map *) * subj_map_set.s_size);
42355 + memset(acl_role_set.r_hash, 0,
42356 + sizeof (struct acl_role_label *) * acl_role_set.r_size);
42357 + memset(name_set.n_hash, 0,
42358 + sizeof (struct name_entry *) * name_set.n_size);
42359 + memset(inodev_set.i_hash, 0,
42360 + sizeof (struct inodev_entry *) * inodev_set.i_size);
42361 +
42362 + return 0;
42363 +}
42364 +
42365 +/* free information not needed after startup
42366 + currently contains user->kernel pointer mappings for subjects
42367 +*/
42368 +
42369 +static void
42370 +free_init_variables(void)
42371 +{
42372 + __u32 i;
42373 +
42374 + if (subj_map_set.s_hash) {
42375 + for (i = 0; i < subj_map_set.s_size; i++) {
42376 + if (subj_map_set.s_hash[i]) {
42377 + kfree(subj_map_set.s_hash[i]);
42378 + subj_map_set.s_hash[i] = NULL;
42379 + }
42380 + }
42381 +
42382 + if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
42383 + PAGE_SIZE)
42384 + kfree(subj_map_set.s_hash);
42385 + else
42386 + vfree(subj_map_set.s_hash);
42387 + }
42388 +
42389 + return;
42390 +}
42391 +
42392 +static void
42393 +free_variables(void)
42394 +{
42395 + struct acl_subject_label *s;
42396 + struct acl_role_label *r;
42397 + struct task_struct *task, *task2;
42398 + unsigned int x;
42399 +
42400 + gr_clear_learn_entries();
42401 +
42402 + read_lock(&tasklist_lock);
42403 + do_each_thread(task2, task) {
42404 + task->acl_sp_role = 0;
42405 + task->acl_role_id = 0;
42406 + task->acl = NULL;
42407 + task->role = NULL;
42408 + } while_each_thread(task2, task);
42409 + read_unlock(&tasklist_lock);
42410 +
42411 + /* release the reference to the real root dentry and vfsmount */
42412 + path_put(&real_root);
42413 +
42414 + /* free all object hash tables */
42415 +
42416 + FOR_EACH_ROLE_START(r)
42417 + if (r->subj_hash == NULL)
42418 + goto next_role;
42419 + FOR_EACH_SUBJECT_START(r, s, x)
42420 + if (s->obj_hash == NULL)
42421 + break;
42422 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
42423 + kfree(s->obj_hash);
42424 + else
42425 + vfree(s->obj_hash);
42426 + FOR_EACH_SUBJECT_END(s, x)
42427 + FOR_EACH_NESTED_SUBJECT_START(r, s)
42428 + if (s->obj_hash == NULL)
42429 + break;
42430 + if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
42431 + kfree(s->obj_hash);
42432 + else
42433 + vfree(s->obj_hash);
42434 + FOR_EACH_NESTED_SUBJECT_END(s)
42435 + if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
42436 + kfree(r->subj_hash);
42437 + else
42438 + vfree(r->subj_hash);
42439 + r->subj_hash = NULL;
42440 +next_role:
42441 + FOR_EACH_ROLE_END(r)
42442 +
42443 + acl_free_all();
42444 +
42445 + if (acl_role_set.r_hash) {
42446 + if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
42447 + PAGE_SIZE)
42448 + kfree(acl_role_set.r_hash);
42449 + else
42450 + vfree(acl_role_set.r_hash);
42451 + }
42452 + if (name_set.n_hash) {
42453 + if ((name_set.n_size * sizeof (struct name_entry *)) <=
42454 + PAGE_SIZE)
42455 + kfree(name_set.n_hash);
42456 + else
42457 + vfree(name_set.n_hash);
42458 + }
42459 +
42460 + if (inodev_set.i_hash) {
42461 + if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
42462 + PAGE_SIZE)
42463 + kfree(inodev_set.i_hash);
42464 + else
42465 + vfree(inodev_set.i_hash);
42466 + }
42467 +
42468 + gr_free_uidset();
42469 +
42470 + memset(&name_set, 0, sizeof (struct name_db));
42471 + memset(&inodev_set, 0, sizeof (struct inodev_db));
42472 + memset(&acl_role_set, 0, sizeof (struct acl_role_db));
42473 + memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
42474 +
42475 + default_role = NULL;
42476 + role_list = NULL;
42477 +
42478 + return;
42479 +}
42480 +
42481 +static __u32
42482 +count_user_objs(struct acl_object_label *userp)
42483 +{
42484 + struct acl_object_label o_tmp;
42485 + __u32 num = 0;
42486 +
42487 + while (userp) {
42488 + if (copy_from_user(&o_tmp, userp,
42489 + sizeof (struct acl_object_label)))
42490 + break;
42491 +
42492 + userp = o_tmp.prev;
42493 + num++;
42494 + }
42495 +
42496 + return num;
42497 +}
42498 +
42499 +static struct acl_subject_label *
42500 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
42501 +
42502 +static int
42503 +copy_user_glob(struct acl_object_label *obj)
42504 +{
42505 + struct acl_object_label *g_tmp, **guser;
42506 + unsigned int len;
42507 + char *tmp;
42508 +
42509 + if (obj->globbed == NULL)
42510 + return 0;
42511 +
42512 + guser = &obj->globbed;
42513 + while (*guser) {
42514 + g_tmp = (struct acl_object_label *)
42515 + acl_alloc(sizeof (struct acl_object_label));
42516 + if (g_tmp == NULL)
42517 + return -ENOMEM;
42518 +
42519 + if (copy_from_user(g_tmp, *guser,
42520 + sizeof (struct acl_object_label)))
42521 + return -EFAULT;
42522 +
42523 + len = strnlen_user(g_tmp->filename, PATH_MAX);
42524 +
42525 + if (!len || len >= PATH_MAX)
42526 + return -EINVAL;
42527 +
42528 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42529 + return -ENOMEM;
42530 +
42531 + if (copy_from_user(tmp, g_tmp->filename, len))
42532 + return -EFAULT;
42533 + tmp[len-1] = '\0';
42534 + g_tmp->filename = tmp;
42535 +
42536 + *guser = g_tmp;
42537 + guser = &(g_tmp->next);
42538 + }
42539 +
42540 + return 0;
42541 +}
42542 +
42543 +static int
42544 +copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
42545 + struct acl_role_label *role)
42546 +{
42547 + struct acl_object_label *o_tmp;
42548 + unsigned int len;
42549 + int ret;
42550 + char *tmp;
42551 +
42552 + while (userp) {
42553 + if ((o_tmp = (struct acl_object_label *)
42554 + acl_alloc(sizeof (struct acl_object_label))) == NULL)
42555 + return -ENOMEM;
42556 +
42557 + if (copy_from_user(o_tmp, userp,
42558 + sizeof (struct acl_object_label)))
42559 + return -EFAULT;
42560 +
42561 + userp = o_tmp->prev;
42562 +
42563 + len = strnlen_user(o_tmp->filename, PATH_MAX);
42564 +
42565 + if (!len || len >= PATH_MAX)
42566 + return -EINVAL;
42567 +
42568 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42569 + return -ENOMEM;
42570 +
42571 + if (copy_from_user(tmp, o_tmp->filename, len))
42572 + return -EFAULT;
42573 + tmp[len-1] = '\0';
42574 + o_tmp->filename = tmp;
42575 +
42576 + insert_acl_obj_label(o_tmp, subj);
42577 + if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
42578 + o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
42579 + return -ENOMEM;
42580 +
42581 + ret = copy_user_glob(o_tmp);
42582 + if (ret)
42583 + return ret;
42584 +
42585 + if (o_tmp->nested) {
42586 + o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
42587 + if (IS_ERR(o_tmp->nested))
42588 + return PTR_ERR(o_tmp->nested);
42589 +
42590 + /* insert into nested subject list */
42591 + o_tmp->nested->next = role->hash->first;
42592 + role->hash->first = o_tmp->nested;
42593 + }
42594 + }
42595 +
42596 + return 0;
42597 +}
42598 +
42599 +static __u32
42600 +count_user_subjs(struct acl_subject_label *userp)
42601 +{
42602 + struct acl_subject_label s_tmp;
42603 + __u32 num = 0;
42604 +
42605 + while (userp) {
42606 + if (copy_from_user(&s_tmp, userp,
42607 + sizeof (struct acl_subject_label)))
42608 + break;
42609 +
42610 + userp = s_tmp.prev;
42611 + /* do not count nested subjects against this count, since
42612 + they are not included in the hash table, but are
42613 + attached to objects. We have already counted
42614 + the subjects in userspace for the allocation
42615 + stack
42616 + */
42617 + if (!(s_tmp.mode & GR_NESTED))
42618 + num++;
42619 + }
42620 +
42621 + return num;
42622 +}
42623 +
42624 +static int
42625 +copy_user_allowedips(struct acl_role_label *rolep)
42626 +{
42627 + struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
42628 +
42629 + ruserip = rolep->allowed_ips;
42630 +
42631 + while (ruserip) {
42632 + rlast = rtmp;
42633 +
42634 + if ((rtmp = (struct role_allowed_ip *)
42635 + acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
42636 + return -ENOMEM;
42637 +
42638 + if (copy_from_user(rtmp, ruserip,
42639 + sizeof (struct role_allowed_ip)))
42640 + return -EFAULT;
42641 +
42642 + ruserip = rtmp->prev;
42643 +
42644 + if (!rlast) {
42645 + rtmp->prev = NULL;
42646 + rolep->allowed_ips = rtmp;
42647 + } else {
42648 + rlast->next = rtmp;
42649 + rtmp->prev = rlast;
42650 + }
42651 +
42652 + if (!ruserip)
42653 + rtmp->next = NULL;
42654 + }
42655 +
42656 + return 0;
42657 +}
42658 +
42659 +static int
42660 +copy_user_transitions(struct acl_role_label *rolep)
42661 +{
42662 + struct role_transition *rusertp, *rtmp = NULL, *rlast;
42663 +
42664 + unsigned int len;
42665 + char *tmp;
42666 +
42667 + rusertp = rolep->transitions;
42668 +
42669 + while (rusertp) {
42670 + rlast = rtmp;
42671 +
42672 + if ((rtmp = (struct role_transition *)
42673 + acl_alloc(sizeof (struct role_transition))) == NULL)
42674 + return -ENOMEM;
42675 +
42676 + if (copy_from_user(rtmp, rusertp,
42677 + sizeof (struct role_transition)))
42678 + return -EFAULT;
42679 +
42680 + rusertp = rtmp->prev;
42681 +
42682 + len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
42683 +
42684 + if (!len || len >= GR_SPROLE_LEN)
42685 + return -EINVAL;
42686 +
42687 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42688 + return -ENOMEM;
42689 +
42690 + if (copy_from_user(tmp, rtmp->rolename, len))
42691 + return -EFAULT;
42692 + tmp[len-1] = '\0';
42693 + rtmp->rolename = tmp;
42694 +
42695 + if (!rlast) {
42696 + rtmp->prev = NULL;
42697 + rolep->transitions = rtmp;
42698 + } else {
42699 + rlast->next = rtmp;
42700 + rtmp->prev = rlast;
42701 + }
42702 +
42703 + if (!rusertp)
42704 + rtmp->next = NULL;
42705 + }
42706 +
42707 + return 0;
42708 +}
42709 +
42710 +static struct acl_subject_label *
42711 +do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
42712 +{
42713 + struct acl_subject_label *s_tmp = NULL, *s_tmp2;
42714 + unsigned int len;
42715 + char *tmp;
42716 + __u32 num_objs;
42717 + struct acl_ip_label **i_tmp, *i_utmp2;
42718 + struct gr_hash_struct ghash;
42719 + struct subject_map *subjmap;
42720 + unsigned int i_num;
42721 + int err;
42722 +
42723 + s_tmp = lookup_subject_map(userp);
42724 +
42725 + /* we've already copied this subject into the kernel, just return
42726 + the reference to it, and don't copy it over again
42727 + */
42728 + if (s_tmp)
42729 + return(s_tmp);
42730 +
42731 + if ((s_tmp = (struct acl_subject_label *)
42732 + acl_alloc(sizeof (struct acl_subject_label))) == NULL)
42733 + return ERR_PTR(-ENOMEM);
42734 +
42735 + subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
42736 + if (subjmap == NULL)
42737 + return ERR_PTR(-ENOMEM);
42738 +
42739 + subjmap->user = userp;
42740 + subjmap->kernel = s_tmp;
42741 + insert_subj_map_entry(subjmap);
42742 +
42743 + if (copy_from_user(s_tmp, userp,
42744 + sizeof (struct acl_subject_label)))
42745 + return ERR_PTR(-EFAULT);
42746 +
42747 + len = strnlen_user(s_tmp->filename, PATH_MAX);
42748 +
42749 + if (!len || len >= PATH_MAX)
42750 + return ERR_PTR(-EINVAL);
42751 +
42752 + if ((tmp = (char *) acl_alloc(len)) == NULL)
42753 + return ERR_PTR(-ENOMEM);
42754 +
42755 + if (copy_from_user(tmp, s_tmp->filename, len))
42756 + return ERR_PTR(-EFAULT);
42757 + tmp[len-1] = '\0';
42758 + s_tmp->filename = tmp;
42759 +
42760 + if (!strcmp(s_tmp->filename, "/"))
42761 + role->root_label = s_tmp;
42762 +
42763 + if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
42764 + return ERR_PTR(-EFAULT);
42765 +
42766 + /* copy user and group transition tables */
42767 +
42768 + if (s_tmp->user_trans_num) {
42769 + uid_t *uidlist;
42770 +
42771 + uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
42772 + if (uidlist == NULL)
42773 + return ERR_PTR(-ENOMEM);
42774 + if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
42775 + return ERR_PTR(-EFAULT);
42776 +
42777 + s_tmp->user_transitions = uidlist;
42778 + }
42779 +
42780 + if (s_tmp->group_trans_num) {
42781 + gid_t *gidlist;
42782 +
42783 + gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
42784 + if (gidlist == NULL)
42785 + return ERR_PTR(-ENOMEM);
42786 + if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
42787 + return ERR_PTR(-EFAULT);
42788 +
42789 + s_tmp->group_transitions = gidlist;
42790 + }
42791 +
42792 + /* set up object hash table */
42793 + num_objs = count_user_objs(ghash.first);
42794 +
42795 + s_tmp->obj_hash_size = num_objs;
42796 + s_tmp->obj_hash =
42797 + (struct acl_object_label **)
42798 + create_table(&(s_tmp->obj_hash_size), sizeof(void *));
42799 +
42800 + if (!s_tmp->obj_hash)
42801 + return ERR_PTR(-ENOMEM);
42802 +
42803 + memset(s_tmp->obj_hash, 0,
42804 + s_tmp->obj_hash_size *
42805 + sizeof (struct acl_object_label *));
42806 +
42807 + /* add in objects */
42808 + err = copy_user_objs(ghash.first, s_tmp, role);
42809 +
42810 + if (err)
42811 + return ERR_PTR(err);
42812 +
42813 + /* set pointer for parent subject */
42814 + if (s_tmp->parent_subject) {
42815 + s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
42816 +
42817 + if (IS_ERR(s_tmp2))
42818 + return s_tmp2;
42819 +
42820 + s_tmp->parent_subject = s_tmp2;
42821 + }
42822 +
42823 + /* add in ip acls */
42824 +
42825 + if (!s_tmp->ip_num) {
42826 + s_tmp->ips = NULL;
42827 + goto insert;
42828 + }
42829 +
42830 + i_tmp =
42831 + (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
42832 + sizeof (struct acl_ip_label *));
42833 +
42834 + if (!i_tmp)
42835 + return ERR_PTR(-ENOMEM);
42836 +
42837 + for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
42838 + *(i_tmp + i_num) =
42839 + (struct acl_ip_label *)
42840 + acl_alloc(sizeof (struct acl_ip_label));
42841 + if (!*(i_tmp + i_num))
42842 + return ERR_PTR(-ENOMEM);
42843 +
42844 + if (copy_from_user
42845 + (&i_utmp2, s_tmp->ips + i_num,
42846 + sizeof (struct acl_ip_label *)))
42847 + return ERR_PTR(-EFAULT);
42848 +
42849 + if (copy_from_user
42850 + (*(i_tmp + i_num), i_utmp2,
42851 + sizeof (struct acl_ip_label)))
42852 + return ERR_PTR(-EFAULT);
42853 +
42854 + if ((*(i_tmp + i_num))->iface == NULL)
42855 + continue;
42856 +
42857 + len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
42858 + if (!len || len >= IFNAMSIZ)
42859 + return ERR_PTR(-EINVAL);
42860 + tmp = acl_alloc(len);
42861 + if (tmp == NULL)
42862 + return ERR_PTR(-ENOMEM);
42863 + if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
42864 + return ERR_PTR(-EFAULT);
42865 + (*(i_tmp + i_num))->iface = tmp;
42866 + }
42867 +
42868 + s_tmp->ips = i_tmp;
42869 +
42870 +insert:
42871 + if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
42872 + s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
42873 + return ERR_PTR(-ENOMEM);
42874 +
42875 + return s_tmp;
42876 +}
42877 +
42878 +static int
42879 +copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
42880 +{
42881 + struct acl_subject_label s_pre;
42882 + struct acl_subject_label * ret;
42883 + int err;
42884 +
42885 + while (userp) {
42886 + if (copy_from_user(&s_pre, userp,
42887 + sizeof (struct acl_subject_label)))
42888 + return -EFAULT;
42889 +
42890 + /* do not add nested subjects here, add
42891 + while parsing objects
42892 + */
42893 +
42894 + if (s_pre.mode & GR_NESTED) {
42895 + userp = s_pre.prev;
42896 + continue;
42897 + }
42898 +
42899 + ret = do_copy_user_subj(userp, role);
42900 +
42901 + err = PTR_ERR(ret);
42902 + if (IS_ERR(ret))
42903 + return err;
42904 +
42905 + insert_acl_subj_label(ret, role);
42906 +
42907 + userp = s_pre.prev;
42908 + }
42909 +
42910 + return 0;
42911 +}
42912 +
42913 +static int
42914 +copy_user_acl(struct gr_arg *arg)
42915 +{
42916 + struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
42917 + struct sprole_pw *sptmp;
42918 + struct gr_hash_struct *ghash;
42919 + uid_t *domainlist;
42920 + unsigned int r_num;
42921 + unsigned int len;
42922 + char *tmp;
42923 + int err = 0;
42924 + __u16 i;
42925 + __u32 num_subjs;
42926 +
42927 + /* we need a default and kernel role */
42928 + if (arg->role_db.num_roles < 2)
42929 + return -EINVAL;
42930 +
42931 + /* copy special role authentication info from userspace */
42932 +
42933 + num_sprole_pws = arg->num_sprole_pws;
42934 + acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
42935 +
42936 + if (!acl_special_roles) {
42937 + err = -ENOMEM;
42938 + goto cleanup;
42939 + }
42940 +
42941 + for (i = 0; i < num_sprole_pws; i++) {
42942 + sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
42943 + if (!sptmp) {
42944 + err = -ENOMEM;
42945 + goto cleanup;
42946 + }
42947 + if (copy_from_user(sptmp, arg->sprole_pws + i,
42948 + sizeof (struct sprole_pw))) {
42949 + err = -EFAULT;
42950 + goto cleanup;
42951 + }
42952 +
42953 + len =
42954 + strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
42955 +
42956 + if (!len || len >= GR_SPROLE_LEN) {
42957 + err = -EINVAL;
42958 + goto cleanup;
42959 + }
42960 +
42961 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
42962 + err = -ENOMEM;
42963 + goto cleanup;
42964 + }
42965 +
42966 + if (copy_from_user(tmp, sptmp->rolename, len)) {
42967 + err = -EFAULT;
42968 + goto cleanup;
42969 + }
42970 + tmp[len-1] = '\0';
42971 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
42972 + printk(KERN_ALERT "Copying special role %s\n", tmp);
42973 +#endif
42974 + sptmp->rolename = tmp;
42975 + acl_special_roles[i] = sptmp;
42976 + }
42977 +
42978 + r_utmp = (struct acl_role_label **) arg->role_db.r_table;
42979 +
42980 + for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
42981 + r_tmp = acl_alloc(sizeof (struct acl_role_label));
42982 +
42983 + if (!r_tmp) {
42984 + err = -ENOMEM;
42985 + goto cleanup;
42986 + }
42987 +
42988 + if (copy_from_user(&r_utmp2, r_utmp + r_num,
42989 + sizeof (struct acl_role_label *))) {
42990 + err = -EFAULT;
42991 + goto cleanup;
42992 + }
42993 +
42994 + if (copy_from_user(r_tmp, r_utmp2,
42995 + sizeof (struct acl_role_label))) {
42996 + err = -EFAULT;
42997 + goto cleanup;
42998 + }
42999 +
43000 + len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
43001 +
43002 + if (!len || len >= PATH_MAX) {
43003 + err = -EINVAL;
43004 + goto cleanup;
43005 + }
43006 +
43007 + if ((tmp = (char *) acl_alloc(len)) == NULL) {
43008 + err = -ENOMEM;
43009 + goto cleanup;
43010 + }
43011 + if (copy_from_user(tmp, r_tmp->rolename, len)) {
43012 + err = -EFAULT;
43013 + goto cleanup;
43014 + }
43015 + tmp[len-1] = '\0';
43016 + r_tmp->rolename = tmp;
43017 +
43018 + if (!strcmp(r_tmp->rolename, "default")
43019 + && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
43020 + default_role = r_tmp;
43021 + } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
43022 + kernel_role = r_tmp;
43023 + }
43024 +
43025 + if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
43026 + err = -ENOMEM;
43027 + goto cleanup;
43028 + }
43029 + if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
43030 + err = -EFAULT;
43031 + goto cleanup;
43032 + }
43033 +
43034 + r_tmp->hash = ghash;
43035 +
43036 + num_subjs = count_user_subjs(r_tmp->hash->first);
43037 +
43038 + r_tmp->subj_hash_size = num_subjs;
43039 + r_tmp->subj_hash =
43040 + (struct acl_subject_label **)
43041 + create_table(&(r_tmp->subj_hash_size), sizeof(void *));
43042 +
43043 + if (!r_tmp->subj_hash) {
43044 + err = -ENOMEM;
43045 + goto cleanup;
43046 + }
43047 +
43048 + err = copy_user_allowedips(r_tmp);
43049 + if (err)
43050 + goto cleanup;
43051 +
43052 + /* copy domain info */
43053 + if (r_tmp->domain_children != NULL) {
43054 + domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
43055 + if (domainlist == NULL) {
43056 + err = -ENOMEM;
43057 + goto cleanup;
43058 + }
43059 + if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
43060 + err = -EFAULT;
43061 + goto cleanup;
43062 + }
43063 + r_tmp->domain_children = domainlist;
43064 + }
43065 +
43066 + err = copy_user_transitions(r_tmp);
43067 + if (err)
43068 + goto cleanup;
43069 +
43070 + memset(r_tmp->subj_hash, 0,
43071 + r_tmp->subj_hash_size *
43072 + sizeof (struct acl_subject_label *));
43073 +
43074 + err = copy_user_subjs(r_tmp->hash->first, r_tmp);
43075 +
43076 + if (err)
43077 + goto cleanup;
43078 +
43079 + /* set nested subject list to null */
43080 + r_tmp->hash->first = NULL;
43081 +
43082 + insert_acl_role_label(r_tmp);
43083 + }
43084 +
43085 + goto return_err;
43086 + cleanup:
43087 + free_variables();
43088 + return_err:
43089 + return err;
43090 +
43091 +}
43092 +
43093 +static int
43094 +gracl_init(struct gr_arg *args)
43095 +{
43096 + int error = 0;
43097 +
43098 + memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
43099 + memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
43100 +
43101 + if (init_variables(args)) {
43102 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
43103 + error = -ENOMEM;
43104 + free_variables();
43105 + goto out;
43106 + }
43107 +
43108 + error = copy_user_acl(args);
43109 + free_init_variables();
43110 + if (error) {
43111 + free_variables();
43112 + goto out;
43113 + }
43114 +
43115 + if ((error = gr_set_acls(0))) {
43116 + free_variables();
43117 + goto out;
43118 + }
43119 +
43120 + pax_open_kernel();
43121 + gr_status |= GR_READY;
43122 + pax_close_kernel();
43123 +
43124 + out:
43125 + return error;
43126 +}
43127 +
43128 +/* derived from glibc fnmatch() 0: match, 1: no match*/
43129 +
43130 +static int
43131 +glob_match(const char *p, const char *n)
43132 +{
43133 + char c;
43134 +
43135 + while ((c = *p++) != '\0') {
43136 + switch (c) {
43137 + case '?':
43138 + if (*n == '\0')
43139 + return 1;
43140 + else if (*n == '/')
43141 + return 1;
43142 + break;
43143 + case '\\':
43144 + if (*n != c)
43145 + return 1;
43146 + break;
43147 + case '*':
43148 + for (c = *p++; c == '?' || c == '*'; c = *p++) {
43149 + if (*n == '/')
43150 + return 1;
43151 + else if (c == '?') {
43152 + if (*n == '\0')
43153 + return 1;
43154 + else
43155 + ++n;
43156 + }
43157 + }
43158 + if (c == '\0') {
43159 + return 0;
43160 + } else {
43161 + const char *endp;
43162 +
43163 + if ((endp = strchr(n, '/')) == NULL)
43164 + endp = n + strlen(n);
43165 +
43166 + if (c == '[') {
43167 + for (--p; n < endp; ++n)
43168 + if (!glob_match(p, n))
43169 + return 0;
43170 + } else if (c == '/') {
43171 + while (*n != '\0' && *n != '/')
43172 + ++n;
43173 + if (*n == '/' && !glob_match(p, n + 1))
43174 + return 0;
43175 + } else {
43176 + for (--p; n < endp; ++n)
43177 + if (*n == c && !glob_match(p, n))
43178 + return 0;
43179 + }
43180 +
43181 + return 1;
43182 + }
43183 + case '[':
43184 + {
43185 + int not;
43186 + char cold;
43187 +
43188 + if (*n == '\0' || *n == '/')
43189 + return 1;
43190 +
43191 + not = (*p == '!' || *p == '^');
43192 + if (not)
43193 + ++p;
43194 +
43195 + c = *p++;
43196 + for (;;) {
43197 + unsigned char fn = (unsigned char)*n;
43198 +
43199 + if (c == '\0')
43200 + return 1;
43201 + else {
43202 + if (c == fn)
43203 + goto matched;
43204 + cold = c;
43205 + c = *p++;
43206 +
43207 + if (c == '-' && *p != ']') {
43208 + unsigned char cend = *p++;
43209 +
43210 + if (cend == '\0')
43211 + return 1;
43212 +
43213 + if (cold <= fn && fn <= cend)
43214 + goto matched;
43215 +
43216 + c = *p++;
43217 + }
43218 + }
43219 +
43220 + if (c == ']')
43221 + break;
43222 + }
43223 + if (!not)
43224 + return 1;
43225 + break;
43226 + matched:
43227 + while (c != ']') {
43228 + if (c == '\0')
43229 + return 1;
43230 +
43231 + c = *p++;
43232 + }
43233 + if (not)
43234 + return 1;
43235 + }
43236 + break;
43237 + default:
43238 + if (c != *n)
43239 + return 1;
43240 + }
43241 +
43242 + ++n;
43243 + }
43244 +
43245 + if (*n == '\0')
43246 + return 0;
43247 +
43248 + if (*n == '/')
43249 + return 0;
43250 +
43251 + return 1;
43252 +}
43253 +
43254 +static struct acl_object_label *
43255 +chk_glob_label(struct acl_object_label *globbed,
43256 + struct dentry *dentry, struct vfsmount *mnt, char **path)
43257 +{
43258 + struct acl_object_label *tmp;
43259 +
43260 + if (*path == NULL)
43261 + *path = gr_to_filename_nolock(dentry, mnt);
43262 +
43263 + tmp = globbed;
43264 +
43265 + while (tmp) {
43266 + if (!glob_match(tmp->filename, *path))
43267 + return tmp;
43268 + tmp = tmp->next;
43269 + }
43270 +
43271 + return NULL;
43272 +}
43273 +
43274 +static struct acl_object_label *
43275 +__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
43276 + const ino_t curr_ino, const dev_t curr_dev,
43277 + const struct acl_subject_label *subj, char **path, const int checkglob)
43278 +{
43279 + struct acl_subject_label *tmpsubj;
43280 + struct acl_object_label *retval;
43281 + struct acl_object_label *retval2;
43282 +
43283 + tmpsubj = (struct acl_subject_label *) subj;
43284 + read_lock(&gr_inode_lock);
43285 + do {
43286 + retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
43287 + if (retval) {
43288 + if (checkglob && retval->globbed) {
43289 + retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
43290 + (struct vfsmount *)orig_mnt, path);
43291 + if (retval2)
43292 + retval = retval2;
43293 + }
43294 + break;
43295 + }
43296 + } while ((tmpsubj = tmpsubj->parent_subject));
43297 + read_unlock(&gr_inode_lock);
43298 +
43299 + return retval;
43300 +}
43301 +
43302 +static __inline__ struct acl_object_label *
43303 +full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
43304 + struct dentry *curr_dentry,
43305 + const struct acl_subject_label *subj, char **path, const int checkglob)
43306 +{
43307 + int newglob = checkglob;
43308 + ino_t inode;
43309 + dev_t device;
43310 +
43311 + /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
43312 + as we don't want a / * rule to match instead of the / object
43313 + don't do this for create lookups that call this function though, since they're looking up
43314 + on the parent and thus need globbing checks on all paths
43315 + */
43316 + if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
43317 + newglob = GR_NO_GLOB;
43318 +
43319 + spin_lock(&curr_dentry->d_lock);
43320 + inode = curr_dentry->d_inode->i_ino;
43321 + device = __get_dev(curr_dentry);
43322 + spin_unlock(&curr_dentry->d_lock);
43323 +
43324 + return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
43325 +}
43326 +
43327 +static struct acl_object_label *
43328 +__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43329 + const struct acl_subject_label *subj, char *path, const int checkglob)
43330 +{
43331 + struct dentry *dentry = (struct dentry *) l_dentry;
43332 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
43333 + struct acl_object_label *retval;
43334 + struct dentry *parent;
43335 +
43336 + write_seqlock(&rename_lock);
43337 + br_read_lock(vfsmount_lock);
43338 +
43339 + if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
43340 +#ifdef CONFIG_NET
43341 + mnt == sock_mnt ||
43342 +#endif
43343 +#ifdef CONFIG_HUGETLBFS
43344 + (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
43345 +#endif
43346 + /* ignore Eric Biederman */
43347 + IS_PRIVATE(l_dentry->d_inode))) {
43348 + retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
43349 + goto out;
43350 + }
43351 +
43352 + for (;;) {
43353 + if (dentry == real_root.dentry && mnt == real_root.mnt)
43354 + break;
43355 +
43356 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
43357 + if (mnt->mnt_parent == mnt)
43358 + break;
43359 +
43360 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43361 + if (retval != NULL)
43362 + goto out;
43363 +
43364 + dentry = mnt->mnt_mountpoint;
43365 + mnt = mnt->mnt_parent;
43366 + continue;
43367 + }
43368 +
43369 + parent = dentry->d_parent;
43370 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43371 + if (retval != NULL)
43372 + goto out;
43373 +
43374 + dentry = parent;
43375 + }
43376 +
43377 + retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
43378 +
43379 + /* real_root is pinned so we don't have to hold a reference */
43380 + if (retval == NULL)
43381 + retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
43382 +out:
43383 + br_read_unlock(vfsmount_lock);
43384 + write_sequnlock(&rename_lock);
43385 +
43386 + BUG_ON(retval == NULL);
43387 +
43388 + return retval;
43389 +}
43390 +
43391 +static __inline__ struct acl_object_label *
43392 +chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43393 + const struct acl_subject_label *subj)
43394 +{
43395 + char *path = NULL;
43396 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
43397 +}
43398 +
43399 +static __inline__ struct acl_object_label *
43400 +chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43401 + const struct acl_subject_label *subj)
43402 +{
43403 + char *path = NULL;
43404 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
43405 +}
43406 +
43407 +static __inline__ struct acl_object_label *
43408 +chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43409 + const struct acl_subject_label *subj, char *path)
43410 +{
43411 + return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
43412 +}
43413 +
43414 +static struct acl_subject_label *
43415 +chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
43416 + const struct acl_role_label *role)
43417 +{
43418 + struct dentry *dentry = (struct dentry *) l_dentry;
43419 + struct vfsmount *mnt = (struct vfsmount *) l_mnt;
43420 + struct acl_subject_label *retval;
43421 + struct dentry *parent;
43422 +
43423 + write_seqlock(&rename_lock);
43424 + br_read_lock(vfsmount_lock);
43425 +
43426 + for (;;) {
43427 + if (dentry == real_root.dentry && mnt == real_root.mnt)
43428 + break;
43429 + if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
43430 + if (mnt->mnt_parent == mnt)
43431 + break;
43432 +
43433 + spin_lock(&dentry->d_lock);
43434 + read_lock(&gr_inode_lock);
43435 + retval =
43436 + lookup_acl_subj_label(dentry->d_inode->i_ino,
43437 + __get_dev(dentry), role);
43438 + read_unlock(&gr_inode_lock);
43439 + spin_unlock(&dentry->d_lock);
43440 + if (retval != NULL)
43441 + goto out;
43442 +
43443 + dentry = mnt->mnt_mountpoint;
43444 + mnt = mnt->mnt_parent;
43445 + continue;
43446 + }
43447 +
43448 + spin_lock(&dentry->d_lock);
43449 + read_lock(&gr_inode_lock);
43450 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
43451 + __get_dev(dentry), role);
43452 + read_unlock(&gr_inode_lock);
43453 + parent = dentry->d_parent;
43454 + spin_unlock(&dentry->d_lock);
43455 +
43456 + if (retval != NULL)
43457 + goto out;
43458 +
43459 + dentry = parent;
43460 + }
43461 +
43462 + spin_lock(&dentry->d_lock);
43463 + read_lock(&gr_inode_lock);
43464 + retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
43465 + __get_dev(dentry), role);
43466 + read_unlock(&gr_inode_lock);
43467 + spin_unlock(&dentry->d_lock);
43468 +
43469 + if (unlikely(retval == NULL)) {
43470 + /* real_root is pinned, we don't need to hold a reference */
43471 + read_lock(&gr_inode_lock);
43472 + retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
43473 + __get_dev(real_root.dentry), role);
43474 + read_unlock(&gr_inode_lock);
43475 + }
43476 +out:
43477 + br_read_unlock(vfsmount_lock);
43478 + write_sequnlock(&rename_lock);
43479 +
43480 + BUG_ON(retval == NULL);
43481 +
43482 + return retval;
43483 +}
43484 +
43485 +static void
43486 +gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
43487 +{
43488 + struct task_struct *task = current;
43489 + const struct cred *cred = current_cred();
43490 +
43491 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
43492 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43493 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43494 + 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
43495 +
43496 + return;
43497 +}
43498 +
43499 +static void
43500 +gr_log_learn_sysctl(const char *path, const __u32 mode)
43501 +{
43502 + struct task_struct *task = current;
43503 + const struct cred *cred = current_cred();
43504 +
43505 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
43506 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43507 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43508 + 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
43509 +
43510 + return;
43511 +}
43512 +
43513 +static void
43514 +gr_log_learn_id_change(const char type, const unsigned int real,
43515 + const unsigned int effective, const unsigned int fs)
43516 +{
43517 + struct task_struct *task = current;
43518 + const struct cred *cred = current_cred();
43519 +
43520 + security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
43521 + cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
43522 + task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
43523 + type, real, effective, fs, &task->signal->saved_ip);
43524 +
43525 + return;
43526 +}
43527 +
43528 +__u32
43529 +gr_check_link(const struct dentry * new_dentry,
43530 + const struct dentry * parent_dentry,
43531 + const struct vfsmount * parent_mnt,
43532 + const struct dentry * old_dentry, const struct vfsmount * old_mnt)
43533 +{
43534 + struct acl_object_label *obj;
43535 + __u32 oldmode, newmode;
43536 + __u32 needmode;
43537 +
43538 + if (unlikely(!(gr_status & GR_READY)))
43539 + return (GR_CREATE | GR_LINK);
43540 +
43541 + obj = chk_obj_label(old_dentry, old_mnt, current->acl);
43542 + oldmode = obj->mode;
43543 +
43544 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43545 + oldmode |= (GR_CREATE | GR_LINK);
43546 +
43547 + needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
43548 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
43549 + needmode |= GR_SETID | GR_AUDIT_SETID;
43550 +
43551 + newmode =
43552 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
43553 + oldmode | needmode);
43554 +
43555 + needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
43556 + GR_SETID | GR_READ | GR_FIND | GR_DELETE |
43557 + GR_INHERIT | GR_AUDIT_INHERIT);
43558 +
43559 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
43560 + goto bad;
43561 +
43562 + if ((oldmode & needmode) != needmode)
43563 + goto bad;
43564 +
43565 + needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
43566 + if ((newmode & needmode) != needmode)
43567 + goto bad;
43568 +
43569 + if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
43570 + return newmode;
43571 +bad:
43572 + needmode = oldmode;
43573 + if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
43574 + needmode |= GR_SETID;
43575 +
43576 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
43577 + gr_log_learn(old_dentry, old_mnt, needmode);
43578 + return (GR_CREATE | GR_LINK);
43579 + } else if (newmode & GR_SUPPRESS)
43580 + return GR_SUPPRESS;
43581 + else
43582 + return 0;
43583 +}
43584 +
43585 +__u32
43586 +gr_search_file(const struct dentry * dentry, const __u32 mode,
43587 + const struct vfsmount * mnt)
43588 +{
43589 + __u32 retval = mode;
43590 + struct acl_subject_label *curracl;
43591 + struct acl_object_label *currobj;
43592 +
43593 + if (unlikely(!(gr_status & GR_READY)))
43594 + return (mode & ~GR_AUDITS);
43595 +
43596 + curracl = current->acl;
43597 +
43598 + currobj = chk_obj_label(dentry, mnt, curracl);
43599 + retval = currobj->mode & mode;
43600 +
43601 + /* if we're opening a specified transfer file for writing
43602 + (e.g. /dev/initctl), then transfer our role to init
43603 + */
43604 + if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
43605 + current->role->roletype & GR_ROLE_PERSIST)) {
43606 + struct task_struct *task = init_pid_ns.child_reaper;
43607 +
43608 + if (task->role != current->role) {
43609 + task->acl_sp_role = 0;
43610 + task->acl_role_id = current->acl_role_id;
43611 + task->role = current->role;
43612 + rcu_read_lock();
43613 + read_lock(&grsec_exec_file_lock);
43614 + gr_apply_subject_to_task(task);
43615 + read_unlock(&grsec_exec_file_lock);
43616 + rcu_read_unlock();
43617 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
43618 + }
43619 + }
43620 +
43621 + if (unlikely
43622 + ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
43623 + && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
43624 + __u32 new_mode = mode;
43625 +
43626 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43627 +
43628 + retval = new_mode;
43629 +
43630 + if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
43631 + new_mode |= GR_INHERIT;
43632 +
43633 + if (!(mode & GR_NOLEARN))
43634 + gr_log_learn(dentry, mnt, new_mode);
43635 + }
43636 +
43637 + return retval;
43638 +}
43639 +
43640 +__u32
43641 +gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
43642 + const struct vfsmount * mnt, const __u32 mode)
43643 +{
43644 + struct name_entry *match;
43645 + struct acl_object_label *matchpo;
43646 + struct acl_subject_label *curracl;
43647 + char *path;
43648 + __u32 retval;
43649 +
43650 + if (unlikely(!(gr_status & GR_READY)))
43651 + return (mode & ~GR_AUDITS);
43652 +
43653 + preempt_disable();
43654 + path = gr_to_filename_rbac(new_dentry, mnt);
43655 + match = lookup_name_entry_create(path);
43656 +
43657 + if (!match)
43658 + goto check_parent;
43659 +
43660 + curracl = current->acl;
43661 +
43662 + read_lock(&gr_inode_lock);
43663 + matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
43664 + read_unlock(&gr_inode_lock);
43665 +
43666 + if (matchpo) {
43667 + if ((matchpo->mode & mode) !=
43668 + (mode & ~(GR_AUDITS | GR_SUPPRESS))
43669 + && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
43670 + __u32 new_mode = mode;
43671 +
43672 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43673 +
43674 + gr_log_learn(new_dentry, mnt, new_mode);
43675 +
43676 + preempt_enable();
43677 + return new_mode;
43678 + }
43679 + preempt_enable();
43680 + return (matchpo->mode & mode);
43681 + }
43682 +
43683 + check_parent:
43684 + curracl = current->acl;
43685 +
43686 + matchpo = chk_obj_create_label(parent, mnt, curracl, path);
43687 + retval = matchpo->mode & mode;
43688 +
43689 + if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
43690 + && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
43691 + __u32 new_mode = mode;
43692 +
43693 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
43694 +
43695 + gr_log_learn(new_dentry, mnt, new_mode);
43696 + preempt_enable();
43697 + return new_mode;
43698 + }
43699 +
43700 + preempt_enable();
43701 + return retval;
43702 +}
43703 +
43704 +int
43705 +gr_check_hidden_task(const struct task_struct *task)
43706 +{
43707 + if (unlikely(!(gr_status & GR_READY)))
43708 + return 0;
43709 +
43710 + if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
43711 + return 1;
43712 +
43713 + return 0;
43714 +}
43715 +
43716 +int
43717 +gr_check_protected_task(const struct task_struct *task)
43718 +{
43719 + if (unlikely(!(gr_status & GR_READY) || !task))
43720 + return 0;
43721 +
43722 + if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
43723 + task->acl != current->acl)
43724 + return 1;
43725 +
43726 + return 0;
43727 +}
43728 +
43729 +int
43730 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
43731 +{
43732 + struct task_struct *p;
43733 + int ret = 0;
43734 +
43735 + if (unlikely(!(gr_status & GR_READY) || !pid))
43736 + return ret;
43737 +
43738 + read_lock(&tasklist_lock);
43739 + do_each_pid_task(pid, type, p) {
43740 + if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
43741 + p->acl != current->acl) {
43742 + ret = 1;
43743 + goto out;
43744 + }
43745 + } while_each_pid_task(pid, type, p);
43746 +out:
43747 + read_unlock(&tasklist_lock);
43748 +
43749 + return ret;
43750 +}
43751 +
43752 +void
43753 +gr_copy_label(struct task_struct *tsk)
43754 +{
43755 + tsk->signal->used_accept = 0;
43756 + tsk->acl_sp_role = 0;
43757 + tsk->acl_role_id = current->acl_role_id;
43758 + tsk->acl = current->acl;
43759 + tsk->role = current->role;
43760 + tsk->signal->curr_ip = current->signal->curr_ip;
43761 + tsk->signal->saved_ip = current->signal->saved_ip;
43762 + if (current->exec_file)
43763 + get_file(current->exec_file);
43764 + tsk->exec_file = current->exec_file;
43765 + tsk->is_writable = current->is_writable;
43766 + if (unlikely(current->signal->used_accept)) {
43767 + current->signal->curr_ip = 0;
43768 + current->signal->saved_ip = 0;
43769 + }
43770 +
43771 + return;
43772 +}
43773 +
43774 +static void
43775 +gr_set_proc_res(struct task_struct *task)
43776 +{
43777 + struct acl_subject_label *proc;
43778 + unsigned short i;
43779 +
43780 + proc = task->acl;
43781 +
43782 + if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
43783 + return;
43784 +
43785 + for (i = 0; i < RLIM_NLIMITS; i++) {
43786 + if (!(proc->resmask & (1 << i)))
43787 + continue;
43788 +
43789 + task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
43790 + task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
43791 + }
43792 +
43793 + return;
43794 +}
43795 +
43796 +extern int __gr_process_user_ban(struct user_struct *user);
43797 +
43798 +int
43799 +gr_check_user_change(int real, int effective, int fs)
43800 +{
43801 + unsigned int i;
43802 + __u16 num;
43803 + uid_t *uidlist;
43804 + int curuid;
43805 + int realok = 0;
43806 + int effectiveok = 0;
43807 + int fsok = 0;
43808 +
43809 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
43810 + struct user_struct *user;
43811 +
43812 + if (real == -1)
43813 + goto skipit;
43814 +
43815 + user = find_user(real);
43816 + if (user == NULL)
43817 + goto skipit;
43818 +
43819 + if (__gr_process_user_ban(user)) {
43820 + /* for find_user */
43821 + free_uid(user);
43822 + return 1;
43823 + }
43824 +
43825 + /* for find_user */
43826 + free_uid(user);
43827 +
43828 +skipit:
43829 +#endif
43830 +
43831 + if (unlikely(!(gr_status & GR_READY)))
43832 + return 0;
43833 +
43834 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43835 + gr_log_learn_id_change('u', real, effective, fs);
43836 +
43837 + num = current->acl->user_trans_num;
43838 + uidlist = current->acl->user_transitions;
43839 +
43840 + if (uidlist == NULL)
43841 + return 0;
43842 +
43843 + if (real == -1)
43844 + realok = 1;
43845 + if (effective == -1)
43846 + effectiveok = 1;
43847 + if (fs == -1)
43848 + fsok = 1;
43849 +
43850 + if (current->acl->user_trans_type & GR_ID_ALLOW) {
43851 + for (i = 0; i < num; i++) {
43852 + curuid = (int)uidlist[i];
43853 + if (real == curuid)
43854 + realok = 1;
43855 + if (effective == curuid)
43856 + effectiveok = 1;
43857 + if (fs == curuid)
43858 + fsok = 1;
43859 + }
43860 + } else if (current->acl->user_trans_type & GR_ID_DENY) {
43861 + for (i = 0; i < num; i++) {
43862 + curuid = (int)uidlist[i];
43863 + if (real == curuid)
43864 + break;
43865 + if (effective == curuid)
43866 + break;
43867 + if (fs == curuid)
43868 + break;
43869 + }
43870 + /* not in deny list */
43871 + if (i == num) {
43872 + realok = 1;
43873 + effectiveok = 1;
43874 + fsok = 1;
43875 + }
43876 + }
43877 +
43878 + if (realok && effectiveok && fsok)
43879 + return 0;
43880 + else {
43881 + gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
43882 + return 1;
43883 + }
43884 +}
43885 +
43886 +int
43887 +gr_check_group_change(int real, int effective, int fs)
43888 +{
43889 + unsigned int i;
43890 + __u16 num;
43891 + gid_t *gidlist;
43892 + int curgid;
43893 + int realok = 0;
43894 + int effectiveok = 0;
43895 + int fsok = 0;
43896 +
43897 + if (unlikely(!(gr_status & GR_READY)))
43898 + return 0;
43899 +
43900 + if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
43901 + gr_log_learn_id_change('g', real, effective, fs);
43902 +
43903 + num = current->acl->group_trans_num;
43904 + gidlist = current->acl->group_transitions;
43905 +
43906 + if (gidlist == NULL)
43907 + return 0;
43908 +
43909 + if (real == -1)
43910 + realok = 1;
43911 + if (effective == -1)
43912 + effectiveok = 1;
43913 + if (fs == -1)
43914 + fsok = 1;
43915 +
43916 + if (current->acl->group_trans_type & GR_ID_ALLOW) {
43917 + for (i = 0; i < num; i++) {
43918 + curgid = (int)gidlist[i];
43919 + if (real == curgid)
43920 + realok = 1;
43921 + if (effective == curgid)
43922 + effectiveok = 1;
43923 + if (fs == curgid)
43924 + fsok = 1;
43925 + }
43926 + } else if (current->acl->group_trans_type & GR_ID_DENY) {
43927 + for (i = 0; i < num; i++) {
43928 + curgid = (int)gidlist[i];
43929 + if (real == curgid)
43930 + break;
43931 + if (effective == curgid)
43932 + break;
43933 + if (fs == curgid)
43934 + break;
43935 + }
43936 + /* not in deny list */
43937 + if (i == num) {
43938 + realok = 1;
43939 + effectiveok = 1;
43940 + fsok = 1;
43941 + }
43942 + }
43943 +
43944 + if (realok && effectiveok && fsok)
43945 + return 0;
43946 + else {
43947 + gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
43948 + return 1;
43949 + }
43950 +}
43951 +
43952 +void
43953 +gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
43954 +{
43955 + struct acl_role_label *role = task->role;
43956 + struct acl_subject_label *subj = NULL;
43957 + struct acl_object_label *obj;
43958 + struct file *filp;
43959 +
43960 + if (unlikely(!(gr_status & GR_READY)))
43961 + return;
43962 +
43963 + filp = task->exec_file;
43964 +
43965 + /* kernel process, we'll give them the kernel role */
43966 + if (unlikely(!filp)) {
43967 + task->role = kernel_role;
43968 + task->acl = kernel_role->root_label;
43969 + return;
43970 + } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
43971 + role = lookup_acl_role_label(task, uid, gid);
43972 +
43973 + /* perform subject lookup in possibly new role
43974 + we can use this result below in the case where role == task->role
43975 + */
43976 + subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
43977 +
43978 + /* if we changed uid/gid, but result in the same role
43979 + and are using inheritance, don't lose the inherited subject
43980 + if current subject is other than what normal lookup
43981 + would result in, we arrived via inheritance, don't
43982 + lose subject
43983 + */
43984 + if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
43985 + (subj == task->acl)))
43986 + task->acl = subj;
43987 +
43988 + task->role = role;
43989 +
43990 + task->is_writable = 0;
43991 +
43992 + /* ignore additional mmap checks for processes that are writable
43993 + by the default ACL */
43994 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
43995 + if (unlikely(obj->mode & GR_WRITE))
43996 + task->is_writable = 1;
43997 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
43998 + if (unlikely(obj->mode & GR_WRITE))
43999 + task->is_writable = 1;
44000 +
44001 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44002 + printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
44003 +#endif
44004 +
44005 + gr_set_proc_res(task);
44006 +
44007 + return;
44008 +}
44009 +
44010 +int
44011 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
44012 + const int unsafe_share)
44013 +{
44014 + struct task_struct *task = current;
44015 + struct acl_subject_label *newacl;
44016 + struct acl_object_label *obj;
44017 + __u32 retmode;
44018 +
44019 + if (unlikely(!(gr_status & GR_READY)))
44020 + return 0;
44021 +
44022 + newacl = chk_subj_label(dentry, mnt, task->role);
44023 +
44024 + task_lock(task);
44025 + if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
44026 + !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
44027 + !(task->role->roletype & GR_ROLE_GOD) &&
44028 + !gr_search_file(dentry, GR_PTRACERD, mnt) &&
44029 + !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
44030 + task_unlock(task);
44031 + if (unsafe_share)
44032 + gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
44033 + else
44034 + gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
44035 + return -EACCES;
44036 + }
44037 + task_unlock(task);
44038 +
44039 + obj = chk_obj_label(dentry, mnt, task->acl);
44040 + retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
44041 +
44042 + if (!(task->acl->mode & GR_INHERITLEARN) &&
44043 + ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
44044 + if (obj->nested)
44045 + task->acl = obj->nested;
44046 + else
44047 + task->acl = newacl;
44048 + } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
44049 + gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
44050 +
44051 + task->is_writable = 0;
44052 +
44053 + /* ignore additional mmap checks for processes that are writable
44054 + by the default ACL */
44055 + obj = chk_obj_label(dentry, mnt, default_role->root_label);
44056 + if (unlikely(obj->mode & GR_WRITE))
44057 + task->is_writable = 1;
44058 + obj = chk_obj_label(dentry, mnt, task->role->root_label);
44059 + if (unlikely(obj->mode & GR_WRITE))
44060 + task->is_writable = 1;
44061 +
44062 + gr_set_proc_res(task);
44063 +
44064 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44065 + printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
44066 +#endif
44067 + return 0;
44068 +}
44069 +
44070 +/* always called with valid inodev ptr */
44071 +static void
44072 +do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
44073 +{
44074 + struct acl_object_label *matchpo;
44075 + struct acl_subject_label *matchps;
44076 + struct acl_subject_label *subj;
44077 + struct acl_role_label *role;
44078 + unsigned int x;
44079 +
44080 + FOR_EACH_ROLE_START(role)
44081 + FOR_EACH_SUBJECT_START(role, subj, x)
44082 + if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
44083 + matchpo->mode |= GR_DELETED;
44084 + FOR_EACH_SUBJECT_END(subj,x)
44085 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
44086 + if (subj->inode == ino && subj->device == dev)
44087 + subj->mode |= GR_DELETED;
44088 + FOR_EACH_NESTED_SUBJECT_END(subj)
44089 + if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
44090 + matchps->mode |= GR_DELETED;
44091 + FOR_EACH_ROLE_END(role)
44092 +
44093 + inodev->nentry->deleted = 1;
44094 +
44095 + return;
44096 +}
44097 +
44098 +void
44099 +gr_handle_delete(const ino_t ino, const dev_t dev)
44100 +{
44101 + struct inodev_entry *inodev;
44102 +
44103 + if (unlikely(!(gr_status & GR_READY)))
44104 + return;
44105 +
44106 + write_lock(&gr_inode_lock);
44107 + inodev = lookup_inodev_entry(ino, dev);
44108 + if (inodev != NULL)
44109 + do_handle_delete(inodev, ino, dev);
44110 + write_unlock(&gr_inode_lock);
44111 +
44112 + return;
44113 +}
44114 +
44115 +static void
44116 +update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
44117 + const ino_t newinode, const dev_t newdevice,
44118 + struct acl_subject_label *subj)
44119 +{
44120 + unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
44121 + struct acl_object_label *match;
44122 +
44123 + match = subj->obj_hash[index];
44124 +
44125 + while (match && (match->inode != oldinode ||
44126 + match->device != olddevice ||
44127 + !(match->mode & GR_DELETED)))
44128 + match = match->next;
44129 +
44130 + if (match && (match->inode == oldinode)
44131 + && (match->device == olddevice)
44132 + && (match->mode & GR_DELETED)) {
44133 + if (match->prev == NULL) {
44134 + subj->obj_hash[index] = match->next;
44135 + if (match->next != NULL)
44136 + match->next->prev = NULL;
44137 + } else {
44138 + match->prev->next = match->next;
44139 + if (match->next != NULL)
44140 + match->next->prev = match->prev;
44141 + }
44142 + match->prev = NULL;
44143 + match->next = NULL;
44144 + match->inode = newinode;
44145 + match->device = newdevice;
44146 + match->mode &= ~GR_DELETED;
44147 +
44148 + insert_acl_obj_label(match, subj);
44149 + }
44150 +
44151 + return;
44152 +}
44153 +
44154 +static void
44155 +update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
44156 + const ino_t newinode, const dev_t newdevice,
44157 + struct acl_role_label *role)
44158 +{
44159 + unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
44160 + struct acl_subject_label *match;
44161 +
44162 + match = role->subj_hash[index];
44163 +
44164 + while (match && (match->inode != oldinode ||
44165 + match->device != olddevice ||
44166 + !(match->mode & GR_DELETED)))
44167 + match = match->next;
44168 +
44169 + if (match && (match->inode == oldinode)
44170 + && (match->device == olddevice)
44171 + && (match->mode & GR_DELETED)) {
44172 + if (match->prev == NULL) {
44173 + role->subj_hash[index] = match->next;
44174 + if (match->next != NULL)
44175 + match->next->prev = NULL;
44176 + } else {
44177 + match->prev->next = match->next;
44178 + if (match->next != NULL)
44179 + match->next->prev = match->prev;
44180 + }
44181 + match->prev = NULL;
44182 + match->next = NULL;
44183 + match->inode = newinode;
44184 + match->device = newdevice;
44185 + match->mode &= ~GR_DELETED;
44186 +
44187 + insert_acl_subj_label(match, role);
44188 + }
44189 +
44190 + return;
44191 +}
44192 +
44193 +static void
44194 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
44195 + const ino_t newinode, const dev_t newdevice)
44196 +{
44197 + unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
44198 + struct inodev_entry *match;
44199 +
44200 + match = inodev_set.i_hash[index];
44201 +
44202 + while (match && (match->nentry->inode != oldinode ||
44203 + match->nentry->device != olddevice || !match->nentry->deleted))
44204 + match = match->next;
44205 +
44206 + if (match && (match->nentry->inode == oldinode)
44207 + && (match->nentry->device == olddevice) &&
44208 + match->nentry->deleted) {
44209 + if (match->prev == NULL) {
44210 + inodev_set.i_hash[index] = match->next;
44211 + if (match->next != NULL)
44212 + match->next->prev = NULL;
44213 + } else {
44214 + match->prev->next = match->next;
44215 + if (match->next != NULL)
44216 + match->next->prev = match->prev;
44217 + }
44218 + match->prev = NULL;
44219 + match->next = NULL;
44220 + match->nentry->inode = newinode;
44221 + match->nentry->device = newdevice;
44222 + match->nentry->deleted = 0;
44223 +
44224 + insert_inodev_entry(match);
44225 + }
44226 +
44227 + return;
44228 +}
44229 +
44230 +static void
44231 +do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
44232 + const struct vfsmount *mnt)
44233 +{
44234 + struct acl_subject_label *subj;
44235 + struct acl_role_label *role;
44236 + unsigned int x;
44237 + ino_t ino = dentry->d_inode->i_ino;
44238 + dev_t dev = __get_dev(dentry);
44239 +
44240 + FOR_EACH_ROLE_START(role)
44241 + update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
44242 +
44243 + FOR_EACH_NESTED_SUBJECT_START(role, subj)
44244 + if ((subj->inode == ino) && (subj->device == dev)) {
44245 + subj->inode = ino;
44246 + subj->device = dev;
44247 + }
44248 + FOR_EACH_NESTED_SUBJECT_END(subj)
44249 + FOR_EACH_SUBJECT_START(role, subj, x)
44250 + update_acl_obj_label(matchn->inode, matchn->device,
44251 + ino, dev, subj);
44252 + FOR_EACH_SUBJECT_END(subj,x)
44253 + FOR_EACH_ROLE_END(role)
44254 +
44255 + update_inodev_entry(matchn->inode, matchn->device, ino, dev);
44256 +
44257 + return;
44258 +}
44259 +
44260 +void
44261 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
44262 +{
44263 + struct name_entry *matchn;
44264 +
44265 + if (unlikely(!(gr_status & GR_READY)))
44266 + return;
44267 +
44268 + preempt_disable();
44269 + matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
44270 +
44271 + if (unlikely((unsigned long)matchn)) {
44272 + write_lock(&gr_inode_lock);
44273 + do_handle_create(matchn, dentry, mnt);
44274 + write_unlock(&gr_inode_lock);
44275 + }
44276 + preempt_enable();
44277 +
44278 + return;
44279 +}
44280 +
44281 +void
44282 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
44283 + struct dentry *old_dentry,
44284 + struct dentry *new_dentry,
44285 + struct vfsmount *mnt, const __u8 replace)
44286 +{
44287 + struct name_entry *matchn;
44288 + struct inodev_entry *inodev;
44289 + ino_t old_ino = old_dentry->d_inode->i_ino;
44290 + dev_t old_dev = __get_dev(old_dentry);
44291 +
44292 + /* vfs_rename swaps the name and parent link for old_dentry and
44293 + new_dentry
44294 + at this point, old_dentry has the new name, parent link, and inode
44295 + for the renamed file
44296 + if a file is being replaced by a rename, new_dentry has the inode
44297 + and name for the replaced file
44298 + */
44299 +
44300 + if (unlikely(!(gr_status & GR_READY)))
44301 + return;
44302 +
44303 + preempt_disable();
44304 + matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
44305 +
44306 + /* we wouldn't have to check d_inode if it weren't for
44307 + NFS silly-renaming
44308 + */
44309 +
44310 + write_lock(&gr_inode_lock);
44311 + if (unlikely(replace && new_dentry->d_inode)) {
44312 + ino_t new_ino = new_dentry->d_inode->i_ino;
44313 + dev_t new_dev = __get_dev(new_dentry);
44314 +
44315 + inodev = lookup_inodev_entry(new_ino, new_dev);
44316 + if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
44317 + do_handle_delete(inodev, new_ino, new_dev);
44318 + }
44319 +
44320 + inodev = lookup_inodev_entry(old_ino, old_dev);
44321 + if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
44322 + do_handle_delete(inodev, old_ino, old_dev);
44323 +
44324 + if (unlikely((unsigned long)matchn))
44325 + do_handle_create(matchn, old_dentry, mnt);
44326 +
44327 + write_unlock(&gr_inode_lock);
44328 + preempt_enable();
44329 +
44330 + return;
44331 +}
44332 +
44333 +static int
44334 +lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
44335 + unsigned char **sum)
44336 +{
44337 + struct acl_role_label *r;
44338 + struct role_allowed_ip *ipp;
44339 + struct role_transition *trans;
44340 + unsigned int i;
44341 + int found = 0;
44342 + u32 curr_ip = current->signal->curr_ip;
44343 +
44344 + current->signal->saved_ip = curr_ip;
44345 +
44346 + /* check transition table */
44347 +
44348 + for (trans = current->role->transitions; trans; trans = trans->next) {
44349 + if (!strcmp(rolename, trans->rolename)) {
44350 + found = 1;
44351 + break;
44352 + }
44353 + }
44354 +
44355 + if (!found)
44356 + return 0;
44357 +
44358 + /* handle special roles that do not require authentication
44359 + and check ip */
44360 +
44361 + FOR_EACH_ROLE_START(r)
44362 + if (!strcmp(rolename, r->rolename) &&
44363 + (r->roletype & GR_ROLE_SPECIAL)) {
44364 + found = 0;
44365 + if (r->allowed_ips != NULL) {
44366 + for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
44367 + if ((ntohl(curr_ip) & ipp->netmask) ==
44368 + (ntohl(ipp->addr) & ipp->netmask))
44369 + found = 1;
44370 + }
44371 + } else
44372 + found = 2;
44373 + if (!found)
44374 + return 0;
44375 +
44376 + if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
44377 + ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
44378 + *salt = NULL;
44379 + *sum = NULL;
44380 + return 1;
44381 + }
44382 + }
44383 + FOR_EACH_ROLE_END(r)
44384 +
44385 + for (i = 0; i < num_sprole_pws; i++) {
44386 + if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
44387 + *salt = acl_special_roles[i]->salt;
44388 + *sum = acl_special_roles[i]->sum;
44389 + return 1;
44390 + }
44391 + }
44392 +
44393 + return 0;
44394 +}
44395 +
44396 +static void
44397 +assign_special_role(char *rolename)
44398 +{
44399 + struct acl_object_label *obj;
44400 + struct acl_role_label *r;
44401 + struct acl_role_label *assigned = NULL;
44402 + struct task_struct *tsk;
44403 + struct file *filp;
44404 +
44405 + FOR_EACH_ROLE_START(r)
44406 + if (!strcmp(rolename, r->rolename) &&
44407 + (r->roletype & GR_ROLE_SPECIAL)) {
44408 + assigned = r;
44409 + break;
44410 + }
44411 + FOR_EACH_ROLE_END(r)
44412 +
44413 + if (!assigned)
44414 + return;
44415 +
44416 + read_lock(&tasklist_lock);
44417 + read_lock(&grsec_exec_file_lock);
44418 +
44419 + tsk = current->real_parent;
44420 + if (tsk == NULL)
44421 + goto out_unlock;
44422 +
44423 + filp = tsk->exec_file;
44424 + if (filp == NULL)
44425 + goto out_unlock;
44426 +
44427 + tsk->is_writable = 0;
44428 +
44429 + tsk->acl_sp_role = 1;
44430 + tsk->acl_role_id = ++acl_sp_role_value;
44431 + tsk->role = assigned;
44432 + tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
44433 +
44434 + /* ignore additional mmap checks for processes that are writable
44435 + by the default ACL */
44436 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44437 + if (unlikely(obj->mode & GR_WRITE))
44438 + tsk->is_writable = 1;
44439 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
44440 + if (unlikely(obj->mode & GR_WRITE))
44441 + tsk->is_writable = 1;
44442 +
44443 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44444 + printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
44445 +#endif
44446 +
44447 +out_unlock:
44448 + read_unlock(&grsec_exec_file_lock);
44449 + read_unlock(&tasklist_lock);
44450 + return;
44451 +}
44452 +
44453 +int gr_check_secure_terminal(struct task_struct *task)
44454 +{
44455 + struct task_struct *p, *p2, *p3;
44456 + struct files_struct *files;
44457 + struct fdtable *fdt;
44458 + struct file *our_file = NULL, *file;
44459 + int i;
44460 +
44461 + if (task->signal->tty == NULL)
44462 + return 1;
44463 +
44464 + files = get_files_struct(task);
44465 + if (files != NULL) {
44466 + rcu_read_lock();
44467 + fdt = files_fdtable(files);
44468 + for (i=0; i < fdt->max_fds; i++) {
44469 + file = fcheck_files(files, i);
44470 + if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
44471 + get_file(file);
44472 + our_file = file;
44473 + }
44474 + }
44475 + rcu_read_unlock();
44476 + put_files_struct(files);
44477 + }
44478 +
44479 + if (our_file == NULL)
44480 + return 1;
44481 +
44482 + read_lock(&tasklist_lock);
44483 + do_each_thread(p2, p) {
44484 + files = get_files_struct(p);
44485 + if (files == NULL ||
44486 + (p->signal && p->signal->tty == task->signal->tty)) {
44487 + if (files != NULL)
44488 + put_files_struct(files);
44489 + continue;
44490 + }
44491 + rcu_read_lock();
44492 + fdt = files_fdtable(files);
44493 + for (i=0; i < fdt->max_fds; i++) {
44494 + file = fcheck_files(files, i);
44495 + if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
44496 + file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
44497 + p3 = task;
44498 + while (p3->pid > 0) {
44499 + if (p3 == p)
44500 + break;
44501 + p3 = p3->real_parent;
44502 + }
44503 + if (p3 == p)
44504 + break;
44505 + gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
44506 + gr_handle_alertkill(p);
44507 + rcu_read_unlock();
44508 + put_files_struct(files);
44509 + read_unlock(&tasklist_lock);
44510 + fput(our_file);
44511 + return 0;
44512 + }
44513 + }
44514 + rcu_read_unlock();
44515 + put_files_struct(files);
44516 + } while_each_thread(p2, p);
44517 + read_unlock(&tasklist_lock);
44518 +
44519 + fput(our_file);
44520 + return 1;
44521 +}
44522 +
44523 +ssize_t
44524 +write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
44525 +{
44526 + struct gr_arg_wrapper uwrap;
44527 + unsigned char *sprole_salt = NULL;
44528 + unsigned char *sprole_sum = NULL;
44529 + int error = sizeof (struct gr_arg_wrapper);
44530 + int error2 = 0;
44531 +
44532 + mutex_lock(&gr_dev_mutex);
44533 +
44534 + if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
44535 + error = -EPERM;
44536 + goto out;
44537 + }
44538 +
44539 + if (count != sizeof (struct gr_arg_wrapper)) {
44540 + gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
44541 + error = -EINVAL;
44542 + goto out;
44543 + }
44544 +
44545 +
44546 + if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
44547 + gr_auth_expires = 0;
44548 + gr_auth_attempts = 0;
44549 + }
44550 +
44551 + if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
44552 + error = -EFAULT;
44553 + goto out;
44554 + }
44555 +
44556 + if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
44557 + error = -EINVAL;
44558 + goto out;
44559 + }
44560 +
44561 + if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
44562 + error = -EFAULT;
44563 + goto out;
44564 + }
44565 +
44566 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
44567 + gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
44568 + time_after(gr_auth_expires, get_seconds())) {
44569 + error = -EBUSY;
44570 + goto out;
44571 + }
44572 +
44573 + /* if non-root trying to do anything other than use a special role,
44574 + do not attempt authentication, do not count towards authentication
44575 + locking
44576 + */
44577 +
44578 + if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
44579 + gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
44580 + current_uid()) {
44581 + error = -EPERM;
44582 + goto out;
44583 + }
44584 +
44585 + /* ensure pw and special role name are null terminated */
44586 +
44587 + gr_usermode->pw[GR_PW_LEN - 1] = '\0';
44588 + gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
44589 +
44590 + /* Okay.
44591 + * We have our enough of the argument structure..(we have yet
44592 + * to copy_from_user the tables themselves) . Copy the tables
44593 + * only if we need them, i.e. for loading operations. */
44594 +
44595 + switch (gr_usermode->mode) {
44596 + case GR_STATUS:
44597 + if (gr_status & GR_READY) {
44598 + error = 1;
44599 + if (!gr_check_secure_terminal(current))
44600 + error = 3;
44601 + } else
44602 + error = 2;
44603 + goto out;
44604 + case GR_SHUTDOWN:
44605 + if ((gr_status & GR_READY)
44606 + && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44607 + pax_open_kernel();
44608 + gr_status &= ~GR_READY;
44609 + pax_close_kernel();
44610 +
44611 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
44612 + free_variables();
44613 + memset(gr_usermode, 0, sizeof (struct gr_arg));
44614 + memset(gr_system_salt, 0, GR_SALT_LEN);
44615 + memset(gr_system_sum, 0, GR_SHA_LEN);
44616 + } else if (gr_status & GR_READY) {
44617 + gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
44618 + error = -EPERM;
44619 + } else {
44620 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
44621 + error = -EAGAIN;
44622 + }
44623 + break;
44624 + case GR_ENABLE:
44625 + if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
44626 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
44627 + else {
44628 + if (gr_status & GR_READY)
44629 + error = -EAGAIN;
44630 + else
44631 + error = error2;
44632 + gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
44633 + }
44634 + break;
44635 + case GR_RELOAD:
44636 + if (!(gr_status & GR_READY)) {
44637 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
44638 + error = -EAGAIN;
44639 + } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44640 + preempt_disable();
44641 +
44642 + pax_open_kernel();
44643 + gr_status &= ~GR_READY;
44644 + pax_close_kernel();
44645 +
44646 + free_variables();
44647 + if (!(error2 = gracl_init(gr_usermode))) {
44648 + preempt_enable();
44649 + gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
44650 + } else {
44651 + preempt_enable();
44652 + error = error2;
44653 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
44654 + }
44655 + } else {
44656 + gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
44657 + error = -EPERM;
44658 + }
44659 + break;
44660 + case GR_SEGVMOD:
44661 + if (unlikely(!(gr_status & GR_READY))) {
44662 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
44663 + error = -EAGAIN;
44664 + break;
44665 + }
44666 +
44667 + if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
44668 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
44669 + if (gr_usermode->segv_device && gr_usermode->segv_inode) {
44670 + struct acl_subject_label *segvacl;
44671 + segvacl =
44672 + lookup_acl_subj_label(gr_usermode->segv_inode,
44673 + gr_usermode->segv_device,
44674 + current->role);
44675 + if (segvacl) {
44676 + segvacl->crashes = 0;
44677 + segvacl->expires = 0;
44678 + }
44679 + } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
44680 + gr_remove_uid(gr_usermode->segv_uid);
44681 + }
44682 + } else {
44683 + gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
44684 + error = -EPERM;
44685 + }
44686 + break;
44687 + case GR_SPROLE:
44688 + case GR_SPROLEPAM:
44689 + if (unlikely(!(gr_status & GR_READY))) {
44690 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
44691 + error = -EAGAIN;
44692 + break;
44693 + }
44694 +
44695 + if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
44696 + current->role->expires = 0;
44697 + current->role->auth_attempts = 0;
44698 + }
44699 +
44700 + if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
44701 + time_after(current->role->expires, get_seconds())) {
44702 + error = -EBUSY;
44703 + goto out;
44704 + }
44705 +
44706 + if (lookup_special_role_auth
44707 + (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
44708 + && ((!sprole_salt && !sprole_sum)
44709 + || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
44710 + char *p = "";
44711 + assign_special_role(gr_usermode->sp_role);
44712 + read_lock(&tasklist_lock);
44713 + if (current->real_parent)
44714 + p = current->real_parent->role->rolename;
44715 + read_unlock(&tasklist_lock);
44716 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
44717 + p, acl_sp_role_value);
44718 + } else {
44719 + gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
44720 + error = -EPERM;
44721 + if(!(current->role->auth_attempts++))
44722 + current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
44723 +
44724 + goto out;
44725 + }
44726 + break;
44727 + case GR_UNSPROLE:
44728 + if (unlikely(!(gr_status & GR_READY))) {
44729 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
44730 + error = -EAGAIN;
44731 + break;
44732 + }
44733 +
44734 + if (current->role->roletype & GR_ROLE_SPECIAL) {
44735 + char *p = "";
44736 + int i = 0;
44737 +
44738 + read_lock(&tasklist_lock);
44739 + if (current->real_parent) {
44740 + p = current->real_parent->role->rolename;
44741 + i = current->real_parent->acl_role_id;
44742 + }
44743 + read_unlock(&tasklist_lock);
44744 +
44745 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
44746 + gr_set_acls(1);
44747 + } else {
44748 + error = -EPERM;
44749 + goto out;
44750 + }
44751 + break;
44752 + default:
44753 + gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
44754 + error = -EINVAL;
44755 + break;
44756 + }
44757 +
44758 + if (error != -EPERM)
44759 + goto out;
44760 +
44761 + if(!(gr_auth_attempts++))
44762 + gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
44763 +
44764 + out:
44765 + mutex_unlock(&gr_dev_mutex);
44766 + return error;
44767 +}
44768 +
44769 +/* must be called with
44770 + rcu_read_lock();
44771 + read_lock(&tasklist_lock);
44772 + read_lock(&grsec_exec_file_lock);
44773 +*/
44774 +int gr_apply_subject_to_task(struct task_struct *task)
44775 +{
44776 + struct acl_object_label *obj;
44777 + char *tmpname;
44778 + struct acl_subject_label *tmpsubj;
44779 + struct file *filp;
44780 + struct name_entry *nmatch;
44781 +
44782 + filp = task->exec_file;
44783 + if (filp == NULL)
44784 + return 0;
44785 +
44786 + /* the following is to apply the correct subject
44787 + on binaries running when the RBAC system
44788 + is enabled, when the binaries have been
44789 + replaced or deleted since their execution
44790 + -----
44791 + when the RBAC system starts, the inode/dev
44792 + from exec_file will be one the RBAC system
44793 + is unaware of. It only knows the inode/dev
44794 + of the present file on disk, or the absence
44795 + of it.
44796 + */
44797 + preempt_disable();
44798 + tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
44799 +
44800 + nmatch = lookup_name_entry(tmpname);
44801 + preempt_enable();
44802 + tmpsubj = NULL;
44803 + if (nmatch) {
44804 + if (nmatch->deleted)
44805 + tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
44806 + else
44807 + tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
44808 + if (tmpsubj != NULL)
44809 + task->acl = tmpsubj;
44810 + }
44811 + if (tmpsubj == NULL)
44812 + task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
44813 + task->role);
44814 + if (task->acl) {
44815 + task->is_writable = 0;
44816 + /* ignore additional mmap checks for processes that are writable
44817 + by the default ACL */
44818 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
44819 + if (unlikely(obj->mode & GR_WRITE))
44820 + task->is_writable = 1;
44821 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
44822 + if (unlikely(obj->mode & GR_WRITE))
44823 + task->is_writable = 1;
44824 +
44825 + gr_set_proc_res(task);
44826 +
44827 +#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
44828 + printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
44829 +#endif
44830 + } else {
44831 + return 1;
44832 + }
44833 +
44834 + return 0;
44835 +}
44836 +
44837 +int
44838 +gr_set_acls(const int type)
44839 +{
44840 + struct task_struct *task, *task2;
44841 + struct acl_role_label *role = current->role;
44842 + __u16 acl_role_id = current->acl_role_id;
44843 + const struct cred *cred;
44844 + int ret;
44845 +
44846 + rcu_read_lock();
44847 + read_lock(&tasklist_lock);
44848 + read_lock(&grsec_exec_file_lock);
44849 + do_each_thread(task2, task) {
44850 + /* check to see if we're called from the exit handler,
44851 + if so, only replace ACLs that have inherited the admin
44852 + ACL */
44853 +
44854 + if (type && (task->role != role ||
44855 + task->acl_role_id != acl_role_id))
44856 + continue;
44857 +
44858 + task->acl_role_id = 0;
44859 + task->acl_sp_role = 0;
44860 +
44861 + if (task->exec_file) {
44862 + cred = __task_cred(task);
44863 + task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
44864 + ret = gr_apply_subject_to_task(task);
44865 + if (ret) {
44866 + read_unlock(&grsec_exec_file_lock);
44867 + read_unlock(&tasklist_lock);
44868 + rcu_read_unlock();
44869 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
44870 + return ret;
44871 + }
44872 + } else {
44873 + // it's a kernel process
44874 + task->role = kernel_role;
44875 + task->acl = kernel_role->root_label;
44876 +#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
44877 + task->acl->mode &= ~GR_PROCFIND;
44878 +#endif
44879 + }
44880 + } while_each_thread(task2, task);
44881 + read_unlock(&grsec_exec_file_lock);
44882 + read_unlock(&tasklist_lock);
44883 + rcu_read_unlock();
44884 +
44885 + return 0;
44886 +}
44887 +
44888 +void
44889 +gr_learn_resource(const struct task_struct *task,
44890 + const int res, const unsigned long wanted, const int gt)
44891 +{
44892 + struct acl_subject_label *acl;
44893 + const struct cred *cred;
44894 +
44895 + if (unlikely((gr_status & GR_READY) &&
44896 + task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
44897 + goto skip_reslog;
44898 +
44899 +#ifdef CONFIG_GRKERNSEC_RESLOG
44900 + gr_log_resource(task, res, wanted, gt);
44901 +#endif
44902 + skip_reslog:
44903 +
44904 + if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
44905 + return;
44906 +
44907 + acl = task->acl;
44908 +
44909 + if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
44910 + !(acl->resmask & (1 << (unsigned short) res))))
44911 + return;
44912 +
44913 + if (wanted >= acl->res[res].rlim_cur) {
44914 + unsigned long res_add;
44915 +
44916 + res_add = wanted;
44917 + switch (res) {
44918 + case RLIMIT_CPU:
44919 + res_add += GR_RLIM_CPU_BUMP;
44920 + break;
44921 + case RLIMIT_FSIZE:
44922 + res_add += GR_RLIM_FSIZE_BUMP;
44923 + break;
44924 + case RLIMIT_DATA:
44925 + res_add += GR_RLIM_DATA_BUMP;
44926 + break;
44927 + case RLIMIT_STACK:
44928 + res_add += GR_RLIM_STACK_BUMP;
44929 + break;
44930 + case RLIMIT_CORE:
44931 + res_add += GR_RLIM_CORE_BUMP;
44932 + break;
44933 + case RLIMIT_RSS:
44934 + res_add += GR_RLIM_RSS_BUMP;
44935 + break;
44936 + case RLIMIT_NPROC:
44937 + res_add += GR_RLIM_NPROC_BUMP;
44938 + break;
44939 + case RLIMIT_NOFILE:
44940 + res_add += GR_RLIM_NOFILE_BUMP;
44941 + break;
44942 + case RLIMIT_MEMLOCK:
44943 + res_add += GR_RLIM_MEMLOCK_BUMP;
44944 + break;
44945 + case RLIMIT_AS:
44946 + res_add += GR_RLIM_AS_BUMP;
44947 + break;
44948 + case RLIMIT_LOCKS:
44949 + res_add += GR_RLIM_LOCKS_BUMP;
44950 + break;
44951 + case RLIMIT_SIGPENDING:
44952 + res_add += GR_RLIM_SIGPENDING_BUMP;
44953 + break;
44954 + case RLIMIT_MSGQUEUE:
44955 + res_add += GR_RLIM_MSGQUEUE_BUMP;
44956 + break;
44957 + case RLIMIT_NICE:
44958 + res_add += GR_RLIM_NICE_BUMP;
44959 + break;
44960 + case RLIMIT_RTPRIO:
44961 + res_add += GR_RLIM_RTPRIO_BUMP;
44962 + break;
44963 + case RLIMIT_RTTIME:
44964 + res_add += GR_RLIM_RTTIME_BUMP;
44965 + break;
44966 + }
44967 +
44968 + acl->res[res].rlim_cur = res_add;
44969 +
44970 + if (wanted > acl->res[res].rlim_max)
44971 + acl->res[res].rlim_max = res_add;
44972 +
44973 + /* only log the subject filename, since resource logging is supported for
44974 + single-subject learning only */
44975 + rcu_read_lock();
44976 + cred = __task_cred(task);
44977 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
44978 + task->role->roletype, cred->uid, cred->gid, acl->filename,
44979 + acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
44980 + "", (unsigned long) res, &task->signal->saved_ip);
44981 + rcu_read_unlock();
44982 + }
44983 +
44984 + return;
44985 +}
44986 +
44987 +#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
44988 +void
44989 +pax_set_initial_flags(struct linux_binprm *bprm)
44990 +{
44991 + struct task_struct *task = current;
44992 + struct acl_subject_label *proc;
44993 + unsigned long flags;
44994 +
44995 + if (unlikely(!(gr_status & GR_READY)))
44996 + return;
44997 +
44998 + flags = pax_get_flags(task);
44999 +
45000 + proc = task->acl;
45001 +
45002 + if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
45003 + flags &= ~MF_PAX_PAGEEXEC;
45004 + if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
45005 + flags &= ~MF_PAX_SEGMEXEC;
45006 + if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
45007 + flags &= ~MF_PAX_RANDMMAP;
45008 + if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
45009 + flags &= ~MF_PAX_EMUTRAMP;
45010 + if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
45011 + flags &= ~MF_PAX_MPROTECT;
45012 +
45013 + if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
45014 + flags |= MF_PAX_PAGEEXEC;
45015 + if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
45016 + flags |= MF_PAX_SEGMEXEC;
45017 + if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
45018 + flags |= MF_PAX_RANDMMAP;
45019 + if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
45020 + flags |= MF_PAX_EMUTRAMP;
45021 + if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
45022 + flags |= MF_PAX_MPROTECT;
45023 +
45024 + pax_set_flags(task, flags);
45025 +
45026 + return;
45027 +}
45028 +#endif
45029 +
45030 +#ifdef CONFIG_SYSCTL
45031 +/* Eric Biederman likes breaking userland ABI and every inode-based security
45032 + system to save 35kb of memory */
45033 +
45034 +/* we modify the passed in filename, but adjust it back before returning */
45035 +static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
45036 +{
45037 + struct name_entry *nmatch;
45038 + char *p, *lastp = NULL;
45039 + struct acl_object_label *obj = NULL, *tmp;
45040 + struct acl_subject_label *tmpsubj;
45041 + char c = '\0';
45042 +
45043 + read_lock(&gr_inode_lock);
45044 +
45045 + p = name + len - 1;
45046 + do {
45047 + nmatch = lookup_name_entry(name);
45048 + if (lastp != NULL)
45049 + *lastp = c;
45050 +
45051 + if (nmatch == NULL)
45052 + goto next_component;
45053 + tmpsubj = current->acl;
45054 + do {
45055 + obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
45056 + if (obj != NULL) {
45057 + tmp = obj->globbed;
45058 + while (tmp) {
45059 + if (!glob_match(tmp->filename, name)) {
45060 + obj = tmp;
45061 + goto found_obj;
45062 + }
45063 + tmp = tmp->next;
45064 + }
45065 + goto found_obj;
45066 + }
45067 + } while ((tmpsubj = tmpsubj->parent_subject));
45068 +next_component:
45069 + /* end case */
45070 + if (p == name)
45071 + break;
45072 +
45073 + while (*p != '/')
45074 + p--;
45075 + if (p == name)
45076 + lastp = p + 1;
45077 + else {
45078 + lastp = p;
45079 + p--;
45080 + }
45081 + c = *lastp;
45082 + *lastp = '\0';
45083 + } while (1);
45084 +found_obj:
45085 + read_unlock(&gr_inode_lock);
45086 + /* obj returned will always be non-null */
45087 + return obj;
45088 +}
45089 +
45090 +/* returns 0 when allowing, non-zero on error
45091 + op of 0 is used for readdir, so we don't log the names of hidden files
45092 +*/
45093 +__u32
45094 +gr_handle_sysctl(const struct ctl_table *table, const int op)
45095 +{
45096 + struct ctl_table *tmp;
45097 + const char *proc_sys = "/proc/sys";
45098 + char *path;
45099 + struct acl_object_label *obj;
45100 + unsigned short len = 0, pos = 0, depth = 0, i;
45101 + __u32 err = 0;
45102 + __u32 mode = 0;
45103 +
45104 + if (unlikely(!(gr_status & GR_READY)))
45105 + return 0;
45106 +
45107 + /* for now, ignore operations on non-sysctl entries if it's not a
45108 + readdir*/
45109 + if (table->child != NULL && op != 0)
45110 + return 0;
45111 +
45112 + mode |= GR_FIND;
45113 + /* it's only a read if it's an entry, read on dirs is for readdir */
45114 + if (op & MAY_READ)
45115 + mode |= GR_READ;
45116 + if (op & MAY_WRITE)
45117 + mode |= GR_WRITE;
45118 +
45119 + preempt_disable();
45120 +
45121 + path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
45122 +
45123 + /* it's only a read/write if it's an actual entry, not a dir
45124 + (which are opened for readdir)
45125 + */
45126 +
45127 + /* convert the requested sysctl entry into a pathname */
45128 +
45129 + for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
45130 + len += strlen(tmp->procname);
45131 + len++;
45132 + depth++;
45133 + }
45134 +
45135 + if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
45136 + /* deny */
45137 + goto out;
45138 + }
45139 +
45140 + memset(path, 0, PAGE_SIZE);
45141 +
45142 + memcpy(path, proc_sys, strlen(proc_sys));
45143 +
45144 + pos += strlen(proc_sys);
45145 +
45146 + for (; depth > 0; depth--) {
45147 + path[pos] = '/';
45148 + pos++;
45149 + for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
45150 + if (depth == i) {
45151 + memcpy(path + pos, tmp->procname,
45152 + strlen(tmp->procname));
45153 + pos += strlen(tmp->procname);
45154 + }
45155 + i++;
45156 + }
45157 + }
45158 +
45159 + obj = gr_lookup_by_name(path, pos);
45160 + err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
45161 +
45162 + if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
45163 + ((err & mode) != mode))) {
45164 + __u32 new_mode = mode;
45165 +
45166 + new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
45167 +
45168 + err = 0;
45169 + gr_log_learn_sysctl(path, new_mode);
45170 + } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
45171 + gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
45172 + err = -ENOENT;
45173 + } else if (!(err & GR_FIND)) {
45174 + err = -ENOENT;
45175 + } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
45176 + gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
45177 + path, (mode & GR_READ) ? " reading" : "",
45178 + (mode & GR_WRITE) ? " writing" : "");
45179 + err = -EACCES;
45180 + } else if ((err & mode) != mode) {
45181 + err = -EACCES;
45182 + } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
45183 + gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
45184 + path, (mode & GR_READ) ? " reading" : "",
45185 + (mode & GR_WRITE) ? " writing" : "");
45186 + err = 0;
45187 + } else
45188 + err = 0;
45189 +
45190 + out:
45191 + preempt_enable();
45192 +
45193 + return err;
45194 +}
45195 +#endif
45196 +
45197 +int
45198 +gr_handle_proc_ptrace(struct task_struct *task)
45199 +{
45200 + struct file *filp;
45201 + struct task_struct *tmp = task;
45202 + struct task_struct *curtemp = current;
45203 + __u32 retmode;
45204 +
45205 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
45206 + if (unlikely(!(gr_status & GR_READY)))
45207 + return 0;
45208 +#endif
45209 +
45210 + read_lock(&tasklist_lock);
45211 + read_lock(&grsec_exec_file_lock);
45212 + filp = task->exec_file;
45213 +
45214 + while (tmp->pid > 0) {
45215 + if (tmp == curtemp)
45216 + break;
45217 + tmp = tmp->real_parent;
45218 + }
45219 +
45220 + if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
45221 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
45222 + read_unlock(&grsec_exec_file_lock);
45223 + read_unlock(&tasklist_lock);
45224 + return 1;
45225 + }
45226 +
45227 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
45228 + if (!(gr_status & GR_READY)) {
45229 + read_unlock(&grsec_exec_file_lock);
45230 + read_unlock(&tasklist_lock);
45231 + return 0;
45232 + }
45233 +#endif
45234 +
45235 + retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
45236 + read_unlock(&grsec_exec_file_lock);
45237 + read_unlock(&tasklist_lock);
45238 +
45239 + if (retmode & GR_NOPTRACE)
45240 + return 1;
45241 +
45242 + if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
45243 + && (current->acl != task->acl || (current->acl != current->role->root_label
45244 + && current->pid != task->pid)))
45245 + return 1;
45246 +
45247 + return 0;
45248 +}
45249 +
45250 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
45251 +{
45252 + if (unlikely(!(gr_status & GR_READY)))
45253 + return;
45254 +
45255 + if (!(current->role->roletype & GR_ROLE_GOD))
45256 + return;
45257 +
45258 + seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
45259 + p->role->rolename, gr_task_roletype_to_char(p),
45260 + p->acl->filename);
45261 +}
45262 +
45263 +int
45264 +gr_handle_ptrace(struct task_struct *task, const long request)
45265 +{
45266 + struct task_struct *tmp = task;
45267 + struct task_struct *curtemp = current;
45268 + __u32 retmode;
45269 +
45270 +#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
45271 + if (unlikely(!(gr_status & GR_READY)))
45272 + return 0;
45273 +#endif
45274 +
45275 + read_lock(&tasklist_lock);
45276 + while (tmp->pid > 0) {
45277 + if (tmp == curtemp)
45278 + break;
45279 + tmp = tmp->real_parent;
45280 + }
45281 +
45282 + if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
45283 + ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
45284 + read_unlock(&tasklist_lock);
45285 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45286 + return 1;
45287 + }
45288 + read_unlock(&tasklist_lock);
45289 +
45290 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
45291 + if (!(gr_status & GR_READY))
45292 + return 0;
45293 +#endif
45294 +
45295 + read_lock(&grsec_exec_file_lock);
45296 + if (unlikely(!task->exec_file)) {
45297 + read_unlock(&grsec_exec_file_lock);
45298 + return 0;
45299 + }
45300 +
45301 + retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
45302 + read_unlock(&grsec_exec_file_lock);
45303 +
45304 + if (retmode & GR_NOPTRACE) {
45305 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45306 + return 1;
45307 + }
45308 +
45309 + if (retmode & GR_PTRACERD) {
45310 + switch (request) {
45311 + case PTRACE_POKETEXT:
45312 + case PTRACE_POKEDATA:
45313 + case PTRACE_POKEUSR:
45314 +#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
45315 + case PTRACE_SETREGS:
45316 + case PTRACE_SETFPREGS:
45317 +#endif
45318 +#ifdef CONFIG_X86
45319 + case PTRACE_SETFPXREGS:
45320 +#endif
45321 +#ifdef CONFIG_ALTIVEC
45322 + case PTRACE_SETVRREGS:
45323 +#endif
45324 + return 1;
45325 + default:
45326 + return 0;
45327 + }
45328 + } else if (!(current->acl->mode & GR_POVERRIDE) &&
45329 + !(current->role->roletype & GR_ROLE_GOD) &&
45330 + (current->acl != task->acl)) {
45331 + gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
45332 + return 1;
45333 + }
45334 +
45335 + return 0;
45336 +}
45337 +
45338 +static int is_writable_mmap(const struct file *filp)
45339 +{
45340 + struct task_struct *task = current;
45341 + struct acl_object_label *obj, *obj2;
45342 +
45343 + if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
45344 + !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
45345 + obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
45346 + obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
45347 + task->role->root_label);
45348 + if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
45349 + gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
45350 + return 1;
45351 + }
45352 + }
45353 + return 0;
45354 +}
45355 +
45356 +int
45357 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
45358 +{
45359 + __u32 mode;
45360 +
45361 + if (unlikely(!file || !(prot & PROT_EXEC)))
45362 + return 1;
45363 +
45364 + if (is_writable_mmap(file))
45365 + return 0;
45366 +
45367 + mode =
45368 + gr_search_file(file->f_path.dentry,
45369 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
45370 + file->f_path.mnt);
45371 +
45372 + if (!gr_tpe_allow(file))
45373 + return 0;
45374 +
45375 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
45376 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45377 + return 0;
45378 + } else if (unlikely(!(mode & GR_EXEC))) {
45379 + return 0;
45380 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
45381 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45382 + return 1;
45383 + }
45384 +
45385 + return 1;
45386 +}
45387 +
45388 +int
45389 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
45390 +{
45391 + __u32 mode;
45392 +
45393 + if (unlikely(!file || !(prot & PROT_EXEC)))
45394 + return 1;
45395 +
45396 + if (is_writable_mmap(file))
45397 + return 0;
45398 +
45399 + mode =
45400 + gr_search_file(file->f_path.dentry,
45401 + GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
45402 + file->f_path.mnt);
45403 +
45404 + if (!gr_tpe_allow(file))
45405 + return 0;
45406 +
45407 + if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
45408 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45409 + return 0;
45410 + } else if (unlikely(!(mode & GR_EXEC))) {
45411 + return 0;
45412 + } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
45413 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
45414 + return 1;
45415 + }
45416 +
45417 + return 1;
45418 +}
45419 +
45420 +void
45421 +gr_acl_handle_psacct(struct task_struct *task, const long code)
45422 +{
45423 + unsigned long runtime;
45424 + unsigned long cputime;
45425 + unsigned int wday, cday;
45426 + __u8 whr, chr;
45427 + __u8 wmin, cmin;
45428 + __u8 wsec, csec;
45429 + struct timespec timeval;
45430 +
45431 + if (unlikely(!(gr_status & GR_READY) || !task->acl ||
45432 + !(task->acl->mode & GR_PROCACCT)))
45433 + return;
45434 +
45435 + do_posix_clock_monotonic_gettime(&timeval);
45436 + runtime = timeval.tv_sec - task->start_time.tv_sec;
45437 + wday = runtime / (3600 * 24);
45438 + runtime -= wday * (3600 * 24);
45439 + whr = runtime / 3600;
45440 + runtime -= whr * 3600;
45441 + wmin = runtime / 60;
45442 + runtime -= wmin * 60;
45443 + wsec = runtime;
45444 +
45445 + cputime = (task->utime + task->stime) / HZ;
45446 + cday = cputime / (3600 * 24);
45447 + cputime -= cday * (3600 * 24);
45448 + chr = cputime / 3600;
45449 + cputime -= chr * 3600;
45450 + cmin = cputime / 60;
45451 + cputime -= cmin * 60;
45452 + csec = cputime;
45453 +
45454 + gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
45455 +
45456 + return;
45457 +}
45458 +
45459 +void gr_set_kernel_label(struct task_struct *task)
45460 +{
45461 + if (gr_status & GR_READY) {
45462 + task->role = kernel_role;
45463 + task->acl = kernel_role->root_label;
45464 + }
45465 + return;
45466 +}
45467 +
45468 +#ifdef CONFIG_TASKSTATS
45469 +int gr_is_taskstats_denied(int pid)
45470 +{
45471 + struct task_struct *task;
45472 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45473 + const struct cred *cred;
45474 +#endif
45475 + int ret = 0;
45476 +
45477 + /* restrict taskstats viewing to un-chrooted root users
45478 + who have the 'view' subject flag if the RBAC system is enabled
45479 + */
45480 +
45481 + rcu_read_lock();
45482 + read_lock(&tasklist_lock);
45483 + task = find_task_by_vpid(pid);
45484 + if (task) {
45485 +#ifdef CONFIG_GRKERNSEC_CHROOT
45486 + if (proc_is_chrooted(task))
45487 + ret = -EACCES;
45488 +#endif
45489 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45490 + cred = __task_cred(task);
45491 +#ifdef CONFIG_GRKERNSEC_PROC_USER
45492 + if (cred->uid != 0)
45493 + ret = -EACCES;
45494 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45495 + if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
45496 + ret = -EACCES;
45497 +#endif
45498 +#endif
45499 + if (gr_status & GR_READY) {
45500 + if (!(task->acl->mode & GR_VIEW))
45501 + ret = -EACCES;
45502 + }
45503 + } else
45504 + ret = -ENOENT;
45505 +
45506 + read_unlock(&tasklist_lock);
45507 + rcu_read_unlock();
45508 +
45509 + return ret;
45510 +}
45511 +#endif
45512 +
45513 +/* AUXV entries are filled via a descendant of search_binary_handler
45514 + after we've already applied the subject for the target
45515 +*/
45516 +int gr_acl_enable_at_secure(void)
45517 +{
45518 + if (unlikely(!(gr_status & GR_READY)))
45519 + return 0;
45520 +
45521 + if (current->acl->mode & GR_ATSECURE)
45522 + return 1;
45523 +
45524 + return 0;
45525 +}
45526 +
45527 +int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
45528 +{
45529 + struct task_struct *task = current;
45530 + struct dentry *dentry = file->f_path.dentry;
45531 + struct vfsmount *mnt = file->f_path.mnt;
45532 + struct acl_object_label *obj, *tmp;
45533 + struct acl_subject_label *subj;
45534 + unsigned int bufsize;
45535 + int is_not_root;
45536 + char *path;
45537 + dev_t dev = __get_dev(dentry);
45538 +
45539 + if (unlikely(!(gr_status & GR_READY)))
45540 + return 1;
45541 +
45542 + if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
45543 + return 1;
45544 +
45545 + /* ignore Eric Biederman */
45546 + if (IS_PRIVATE(dentry->d_inode))
45547 + return 1;
45548 +
45549 + subj = task->acl;
45550 + do {
45551 + obj = lookup_acl_obj_label(ino, dev, subj);
45552 + if (obj != NULL)
45553 + return (obj->mode & GR_FIND) ? 1 : 0;
45554 + } while ((subj = subj->parent_subject));
45555 +
45556 + /* this is purely an optimization since we're looking for an object
45557 + for the directory we're doing a readdir on
45558 + if it's possible for any globbed object to match the entry we're
45559 + filling into the directory, then the object we find here will be
45560 + an anchor point with attached globbed objects
45561 + */
45562 + obj = chk_obj_label_noglob(dentry, mnt, task->acl);
45563 + if (obj->globbed == NULL)
45564 + return (obj->mode & GR_FIND) ? 1 : 0;
45565 +
45566 + is_not_root = ((obj->filename[0] == '/') &&
45567 + (obj->filename[1] == '\0')) ? 0 : 1;
45568 + bufsize = PAGE_SIZE - namelen - is_not_root;
45569 +
45570 + /* check bufsize > PAGE_SIZE || bufsize == 0 */
45571 + if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
45572 + return 1;
45573 +
45574 + preempt_disable();
45575 + path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
45576 + bufsize);
45577 +
45578 + bufsize = strlen(path);
45579 +
45580 + /* if base is "/", don't append an additional slash */
45581 + if (is_not_root)
45582 + *(path + bufsize) = '/';
45583 + memcpy(path + bufsize + is_not_root, name, namelen);
45584 + *(path + bufsize + namelen + is_not_root) = '\0';
45585 +
45586 + tmp = obj->globbed;
45587 + while (tmp) {
45588 + if (!glob_match(tmp->filename, path)) {
45589 + preempt_enable();
45590 + return (tmp->mode & GR_FIND) ? 1 : 0;
45591 + }
45592 + tmp = tmp->next;
45593 + }
45594 + preempt_enable();
45595 + return (obj->mode & GR_FIND) ? 1 : 0;
45596 +}
45597 +
45598 +#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
45599 +EXPORT_SYMBOL(gr_acl_is_enabled);
45600 +#endif
45601 +EXPORT_SYMBOL(gr_learn_resource);
45602 +EXPORT_SYMBOL(gr_set_kernel_label);
45603 +#ifdef CONFIG_SECURITY
45604 +EXPORT_SYMBOL(gr_check_user_change);
45605 +EXPORT_SYMBOL(gr_check_group_change);
45606 +#endif
45607 +
45608 diff -urNp linux-2.6.39.4/grsecurity/gracl_cap.c linux-2.6.39.4/grsecurity/gracl_cap.c
45609 --- linux-2.6.39.4/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
45610 +++ linux-2.6.39.4/grsecurity/gracl_cap.c 2011-08-05 19:44:37.000000000 -0400
45611 @@ -0,0 +1,139 @@
45612 +#include <linux/kernel.h>
45613 +#include <linux/module.h>
45614 +#include <linux/sched.h>
45615 +#include <linux/gracl.h>
45616 +#include <linux/grsecurity.h>
45617 +#include <linux/grinternal.h>
45618 +
45619 +static const char *captab_log[] = {
45620 + "CAP_CHOWN",
45621 + "CAP_DAC_OVERRIDE",
45622 + "CAP_DAC_READ_SEARCH",
45623 + "CAP_FOWNER",
45624 + "CAP_FSETID",
45625 + "CAP_KILL",
45626 + "CAP_SETGID",
45627 + "CAP_SETUID",
45628 + "CAP_SETPCAP",
45629 + "CAP_LINUX_IMMUTABLE",
45630 + "CAP_NET_BIND_SERVICE",
45631 + "CAP_NET_BROADCAST",
45632 + "CAP_NET_ADMIN",
45633 + "CAP_NET_RAW",
45634 + "CAP_IPC_LOCK",
45635 + "CAP_IPC_OWNER",
45636 + "CAP_SYS_MODULE",
45637 + "CAP_SYS_RAWIO",
45638 + "CAP_SYS_CHROOT",
45639 + "CAP_SYS_PTRACE",
45640 + "CAP_SYS_PACCT",
45641 + "CAP_SYS_ADMIN",
45642 + "CAP_SYS_BOOT",
45643 + "CAP_SYS_NICE",
45644 + "CAP_SYS_RESOURCE",
45645 + "CAP_SYS_TIME",
45646 + "CAP_SYS_TTY_CONFIG",
45647 + "CAP_MKNOD",
45648 + "CAP_LEASE",
45649 + "CAP_AUDIT_WRITE",
45650 + "CAP_AUDIT_CONTROL",
45651 + "CAP_SETFCAP",
45652 + "CAP_MAC_OVERRIDE",
45653 + "CAP_MAC_ADMIN",
45654 + "CAP_SYSLOG"
45655 +};
45656 +
45657 +EXPORT_SYMBOL(gr_is_capable);
45658 +EXPORT_SYMBOL(gr_is_capable_nolog);
45659 +
45660 +int
45661 +gr_is_capable(const int cap)
45662 +{
45663 + struct task_struct *task = current;
45664 + const struct cred *cred = current_cred();
45665 + struct acl_subject_label *curracl;
45666 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
45667 + kernel_cap_t cap_audit = __cap_empty_set;
45668 +
45669 + if (!gr_acl_is_enabled())
45670 + return 1;
45671 +
45672 + curracl = task->acl;
45673 +
45674 + cap_drop = curracl->cap_lower;
45675 + cap_mask = curracl->cap_mask;
45676 + cap_audit = curracl->cap_invert_audit;
45677 +
45678 + while ((curracl = curracl->parent_subject)) {
45679 + /* if the cap isn't specified in the current computed mask but is specified in the
45680 + current level subject, and is lowered in the current level subject, then add
45681 + it to the set of dropped capabilities
45682 + otherwise, add the current level subject's mask to the current computed mask
45683 + */
45684 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
45685 + cap_raise(cap_mask, cap);
45686 + if (cap_raised(curracl->cap_lower, cap))
45687 + cap_raise(cap_drop, cap);
45688 + if (cap_raised(curracl->cap_invert_audit, cap))
45689 + cap_raise(cap_audit, cap);
45690 + }
45691 + }
45692 +
45693 + if (!cap_raised(cap_drop, cap)) {
45694 + if (cap_raised(cap_audit, cap))
45695 + gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
45696 + return 1;
45697 + }
45698 +
45699 + curracl = task->acl;
45700 +
45701 + if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
45702 + && cap_raised(cred->cap_effective, cap)) {
45703 + security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
45704 + task->role->roletype, cred->uid,
45705 + cred->gid, task->exec_file ?
45706 + gr_to_filename(task->exec_file->f_path.dentry,
45707 + task->exec_file->f_path.mnt) : curracl->filename,
45708 + curracl->filename, 0UL,
45709 + 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
45710 + return 1;
45711 + }
45712 +
45713 + if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
45714 + gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
45715 + return 0;
45716 +}
45717 +
45718 +int
45719 +gr_is_capable_nolog(const int cap)
45720 +{
45721 + struct acl_subject_label *curracl;
45722 + kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
45723 +
45724 + if (!gr_acl_is_enabled())
45725 + return 1;
45726 +
45727 + curracl = current->acl;
45728 +
45729 + cap_drop = curracl->cap_lower;
45730 + cap_mask = curracl->cap_mask;
45731 +
45732 + while ((curracl = curracl->parent_subject)) {
45733 + /* if the cap isn't specified in the current computed mask but is specified in the
45734 + current level subject, and is lowered in the current level subject, then add
45735 + it to the set of dropped capabilities
45736 + otherwise, add the current level subject's mask to the current computed mask
45737 + */
45738 + if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
45739 + cap_raise(cap_mask, cap);
45740 + if (cap_raised(curracl->cap_lower, cap))
45741 + cap_raise(cap_drop, cap);
45742 + }
45743 + }
45744 +
45745 + if (!cap_raised(cap_drop, cap))
45746 + return 1;
45747 +
45748 + return 0;
45749 +}
45750 +
45751 diff -urNp linux-2.6.39.4/grsecurity/gracl_fs.c linux-2.6.39.4/grsecurity/gracl_fs.c
45752 --- linux-2.6.39.4/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
45753 +++ linux-2.6.39.4/grsecurity/gracl_fs.c 2011-08-05 19:44:37.000000000 -0400
45754 @@ -0,0 +1,431 @@
45755 +#include <linux/kernel.h>
45756 +#include <linux/sched.h>
45757 +#include <linux/types.h>
45758 +#include <linux/fs.h>
45759 +#include <linux/file.h>
45760 +#include <linux/stat.h>
45761 +#include <linux/grsecurity.h>
45762 +#include <linux/grinternal.h>
45763 +#include <linux/gracl.h>
45764 +
45765 +__u32
45766 +gr_acl_handle_hidden_file(const struct dentry * dentry,
45767 + const struct vfsmount * mnt)
45768 +{
45769 + __u32 mode;
45770 +
45771 + if (unlikely(!dentry->d_inode))
45772 + return GR_FIND;
45773 +
45774 + mode =
45775 + gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
45776 +
45777 + if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
45778 + gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
45779 + return mode;
45780 + } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
45781 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
45782 + return 0;
45783 + } else if (unlikely(!(mode & GR_FIND)))
45784 + return 0;
45785 +
45786 + return GR_FIND;
45787 +}
45788 +
45789 +__u32
45790 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
45791 + const int fmode)
45792 +{
45793 + __u32 reqmode = GR_FIND;
45794 + __u32 mode;
45795 +
45796 + if (unlikely(!dentry->d_inode))
45797 + return reqmode;
45798 +
45799 + if (unlikely(fmode & O_APPEND))
45800 + reqmode |= GR_APPEND;
45801 + else if (unlikely(fmode & FMODE_WRITE))
45802 + reqmode |= GR_WRITE;
45803 + if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
45804 + reqmode |= GR_READ;
45805 + if ((fmode & FMODE_GREXEC) && (fmode & __FMODE_EXEC))
45806 + reqmode &= ~GR_READ;
45807 + mode =
45808 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
45809 + mnt);
45810 +
45811 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45812 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
45813 + reqmode & GR_READ ? " reading" : "",
45814 + reqmode & GR_WRITE ? " writing" : reqmode &
45815 + GR_APPEND ? " appending" : "");
45816 + return reqmode;
45817 + } else
45818 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45819 + {
45820 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
45821 + reqmode & GR_READ ? " reading" : "",
45822 + reqmode & GR_WRITE ? " writing" : reqmode &
45823 + GR_APPEND ? " appending" : "");
45824 + return 0;
45825 + } else if (unlikely((mode & reqmode) != reqmode))
45826 + return 0;
45827 +
45828 + return reqmode;
45829 +}
45830 +
45831 +__u32
45832 +gr_acl_handle_creat(const struct dentry * dentry,
45833 + const struct dentry * p_dentry,
45834 + const struct vfsmount * p_mnt, const int fmode,
45835 + const int imode)
45836 +{
45837 + __u32 reqmode = GR_WRITE | GR_CREATE;
45838 + __u32 mode;
45839 +
45840 + if (unlikely(fmode & O_APPEND))
45841 + reqmode |= GR_APPEND;
45842 + if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
45843 + reqmode |= GR_READ;
45844 + if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
45845 + reqmode |= GR_SETID;
45846 +
45847 + mode =
45848 + gr_check_create(dentry, p_dentry, p_mnt,
45849 + reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
45850 +
45851 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45852 + gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
45853 + reqmode & GR_READ ? " reading" : "",
45854 + reqmode & GR_WRITE ? " writing" : reqmode &
45855 + GR_APPEND ? " appending" : "");
45856 + return reqmode;
45857 + } else
45858 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45859 + {
45860 + gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
45861 + reqmode & GR_READ ? " reading" : "",
45862 + reqmode & GR_WRITE ? " writing" : reqmode &
45863 + GR_APPEND ? " appending" : "");
45864 + return 0;
45865 + } else if (unlikely((mode & reqmode) != reqmode))
45866 + return 0;
45867 +
45868 + return reqmode;
45869 +}
45870 +
45871 +__u32
45872 +gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
45873 + const int fmode)
45874 +{
45875 + __u32 mode, reqmode = GR_FIND;
45876 +
45877 + if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
45878 + reqmode |= GR_EXEC;
45879 + if (fmode & S_IWOTH)
45880 + reqmode |= GR_WRITE;
45881 + if (fmode & S_IROTH)
45882 + reqmode |= GR_READ;
45883 +
45884 + mode =
45885 + gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
45886 + mnt);
45887 +
45888 + if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
45889 + gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
45890 + reqmode & GR_READ ? " reading" : "",
45891 + reqmode & GR_WRITE ? " writing" : "",
45892 + reqmode & GR_EXEC ? " executing" : "");
45893 + return reqmode;
45894 + } else
45895 + if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
45896 + {
45897 + gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
45898 + reqmode & GR_READ ? " reading" : "",
45899 + reqmode & GR_WRITE ? " writing" : "",
45900 + reqmode & GR_EXEC ? " executing" : "");
45901 + return 0;
45902 + } else if (unlikely((mode & reqmode) != reqmode))
45903 + return 0;
45904 +
45905 + return reqmode;
45906 +}
45907 +
45908 +static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
45909 +{
45910 + __u32 mode;
45911 +
45912 + mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
45913 +
45914 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
45915 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
45916 + return mode;
45917 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
45918 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
45919 + return 0;
45920 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
45921 + return 0;
45922 +
45923 + return (reqmode);
45924 +}
45925 +
45926 +__u32
45927 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
45928 +{
45929 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
45930 +}
45931 +
45932 +__u32
45933 +gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
45934 +{
45935 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
45936 +}
45937 +
45938 +__u32
45939 +gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
45940 +{
45941 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
45942 +}
45943 +
45944 +__u32
45945 +gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
45946 +{
45947 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
45948 +}
45949 +
45950 +__u32
45951 +gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
45952 + mode_t mode)
45953 +{
45954 + if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
45955 + return 1;
45956 +
45957 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
45958 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
45959 + GR_FCHMOD_ACL_MSG);
45960 + } else {
45961 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
45962 + }
45963 +}
45964 +
45965 +__u32
45966 +gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
45967 + mode_t mode)
45968 +{
45969 + if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
45970 + return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
45971 + GR_CHMOD_ACL_MSG);
45972 + } else {
45973 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
45974 + }
45975 +}
45976 +
45977 +__u32
45978 +gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
45979 +{
45980 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
45981 +}
45982 +
45983 +__u32
45984 +gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
45985 +{
45986 + return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
45987 +}
45988 +
45989 +__u32
45990 +gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
45991 +{
45992 + return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
45993 +}
45994 +
45995 +__u32
45996 +gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
45997 +{
45998 + return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
45999 + GR_UNIXCONNECT_ACL_MSG);
46000 +}
46001 +
46002 +/* hardlinks require at minimum create permission,
46003 + any additional privilege required is based on the
46004 + privilege of the file being linked to
46005 +*/
46006 +__u32
46007 +gr_acl_handle_link(const struct dentry * new_dentry,
46008 + const struct dentry * parent_dentry,
46009 + const struct vfsmount * parent_mnt,
46010 + const struct dentry * old_dentry,
46011 + const struct vfsmount * old_mnt, const char *to)
46012 +{
46013 + __u32 mode;
46014 + __u32 needmode = GR_CREATE | GR_LINK;
46015 + __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
46016 +
46017 + mode =
46018 + gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
46019 + old_mnt);
46020 +
46021 + if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
46022 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
46023 + return mode;
46024 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
46025 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
46026 + return 0;
46027 + } else if (unlikely((mode & needmode) != needmode))
46028 + return 0;
46029 +
46030 + return 1;
46031 +}
46032 +
46033 +__u32
46034 +gr_acl_handle_symlink(const struct dentry * new_dentry,
46035 + const struct dentry * parent_dentry,
46036 + const struct vfsmount * parent_mnt, const char *from)
46037 +{
46038 + __u32 needmode = GR_WRITE | GR_CREATE;
46039 + __u32 mode;
46040 +
46041 + mode =
46042 + gr_check_create(new_dentry, parent_dentry, parent_mnt,
46043 + GR_CREATE | GR_AUDIT_CREATE |
46044 + GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
46045 +
46046 + if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
46047 + gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
46048 + return mode;
46049 + } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
46050 + gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
46051 + return 0;
46052 + } else if (unlikely((mode & needmode) != needmode))
46053 + return 0;
46054 +
46055 + return (GR_WRITE | GR_CREATE);
46056 +}
46057 +
46058 +static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
46059 +{
46060 + __u32 mode;
46061 +
46062 + mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
46063 +
46064 + if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
46065 + gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
46066 + return mode;
46067 + } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
46068 + gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
46069 + return 0;
46070 + } else if (unlikely((mode & (reqmode)) != (reqmode)))
46071 + return 0;
46072 +
46073 + return (reqmode);
46074 +}
46075 +
46076 +__u32
46077 +gr_acl_handle_mknod(const struct dentry * new_dentry,
46078 + const struct dentry * parent_dentry,
46079 + const struct vfsmount * parent_mnt,
46080 + const int mode)
46081 +{
46082 + __u32 reqmode = GR_WRITE | GR_CREATE;
46083 + if (unlikely(mode & (S_ISUID | S_ISGID)))
46084 + reqmode |= GR_SETID;
46085 +
46086 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
46087 + reqmode, GR_MKNOD_ACL_MSG);
46088 +}
46089 +
46090 +__u32
46091 +gr_acl_handle_mkdir(const struct dentry *new_dentry,
46092 + const struct dentry *parent_dentry,
46093 + const struct vfsmount *parent_mnt)
46094 +{
46095 + return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
46096 + GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
46097 +}
46098 +
46099 +#define RENAME_CHECK_SUCCESS(old, new) \
46100 + (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
46101 + ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
46102 +
46103 +int
46104 +gr_acl_handle_rename(struct dentry *new_dentry,
46105 + struct dentry *parent_dentry,
46106 + const struct vfsmount *parent_mnt,
46107 + struct dentry *old_dentry,
46108 + struct inode *old_parent_inode,
46109 + struct vfsmount *old_mnt, const char *newname)
46110 +{
46111 + __u32 comp1, comp2;
46112 + int error = 0;
46113 +
46114 + if (unlikely(!gr_acl_is_enabled()))
46115 + return 0;
46116 +
46117 + if (!new_dentry->d_inode) {
46118 + comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
46119 + GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
46120 + GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
46121 + comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
46122 + GR_DELETE | GR_AUDIT_DELETE |
46123 + GR_AUDIT_READ | GR_AUDIT_WRITE |
46124 + GR_SUPPRESS, old_mnt);
46125 + } else {
46126 + comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
46127 + GR_CREATE | GR_DELETE |
46128 + GR_AUDIT_CREATE | GR_AUDIT_DELETE |
46129 + GR_AUDIT_READ | GR_AUDIT_WRITE |
46130 + GR_SUPPRESS, parent_mnt);
46131 + comp2 =
46132 + gr_search_file(old_dentry,
46133 + GR_READ | GR_WRITE | GR_AUDIT_READ |
46134 + GR_DELETE | GR_AUDIT_DELETE |
46135 + GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
46136 + }
46137 +
46138 + if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
46139 + ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
46140 + gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
46141 + else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
46142 + && !(comp2 & GR_SUPPRESS)) {
46143 + gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
46144 + error = -EACCES;
46145 + } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
46146 + error = -EACCES;
46147 +
46148 + return error;
46149 +}
46150 +
46151 +void
46152 +gr_acl_handle_exit(void)
46153 +{
46154 + u16 id;
46155 + char *rolename;
46156 + struct file *exec_file;
46157 +
46158 + if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
46159 + !(current->role->roletype & GR_ROLE_PERSIST))) {
46160 + id = current->acl_role_id;
46161 + rolename = current->role->rolename;
46162 + gr_set_acls(1);
46163 + gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
46164 + }
46165 +
46166 + write_lock(&grsec_exec_file_lock);
46167 + exec_file = current->exec_file;
46168 + current->exec_file = NULL;
46169 + write_unlock(&grsec_exec_file_lock);
46170 +
46171 + if (exec_file)
46172 + fput(exec_file);
46173 +}
46174 +
46175 +int
46176 +gr_acl_handle_procpidmem(const struct task_struct *task)
46177 +{
46178 + if (unlikely(!gr_acl_is_enabled()))
46179 + return 0;
46180 +
46181 + if (task != current && task->acl->mode & GR_PROTPROCFD)
46182 + return -EACCES;
46183 +
46184 + return 0;
46185 +}
46186 diff -urNp linux-2.6.39.4/grsecurity/gracl_ip.c linux-2.6.39.4/grsecurity/gracl_ip.c
46187 --- linux-2.6.39.4/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
46188 +++ linux-2.6.39.4/grsecurity/gracl_ip.c 2011-08-05 19:44:37.000000000 -0400
46189 @@ -0,0 +1,381 @@
46190 +#include <linux/kernel.h>
46191 +#include <asm/uaccess.h>
46192 +#include <asm/errno.h>
46193 +#include <net/sock.h>
46194 +#include <linux/file.h>
46195 +#include <linux/fs.h>
46196 +#include <linux/net.h>
46197 +#include <linux/in.h>
46198 +#include <linux/skbuff.h>
46199 +#include <linux/ip.h>
46200 +#include <linux/udp.h>
46201 +#include <linux/types.h>
46202 +#include <linux/sched.h>
46203 +#include <linux/netdevice.h>
46204 +#include <linux/inetdevice.h>
46205 +#include <linux/gracl.h>
46206 +#include <linux/grsecurity.h>
46207 +#include <linux/grinternal.h>
46208 +
46209 +#define GR_BIND 0x01
46210 +#define GR_CONNECT 0x02
46211 +#define GR_INVERT 0x04
46212 +#define GR_BINDOVERRIDE 0x08
46213 +#define GR_CONNECTOVERRIDE 0x10
46214 +#define GR_SOCK_FAMILY 0x20
46215 +
46216 +static const char * gr_protocols[IPPROTO_MAX] = {
46217 + "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
46218 + "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
46219 + "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
46220 + "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
46221 + "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
46222 + "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
46223 + "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
46224 + "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
46225 + "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
46226 + "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
46227 + "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
46228 + "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
46229 + "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
46230 + "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
46231 + "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
46232 + "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
46233 + "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
46234 + "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
46235 + "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
46236 + "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
46237 + "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
46238 + "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
46239 + "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
46240 + "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
46241 + "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
46242 + "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
46243 + "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
46244 + "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
46245 + "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
46246 + "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
46247 + "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
46248 + "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
46249 + };
46250 +
46251 +static const char * gr_socktypes[SOCK_MAX] = {
46252 + "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
46253 + "unknown:7", "unknown:8", "unknown:9", "packet"
46254 + };
46255 +
46256 +static const char * gr_sockfamilies[AF_MAX+1] = {
46257 + "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
46258 + "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
46259 + "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
46260 + "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
46261 + };
46262 +
46263 +const char *
46264 +gr_proto_to_name(unsigned char proto)
46265 +{
46266 + return gr_protocols[proto];
46267 +}
46268 +
46269 +const char *
46270 +gr_socktype_to_name(unsigned char type)
46271 +{
46272 + return gr_socktypes[type];
46273 +}
46274 +
46275 +const char *
46276 +gr_sockfamily_to_name(unsigned char family)
46277 +{
46278 + return gr_sockfamilies[family];
46279 +}
46280 +
46281 +int
46282 +gr_search_socket(const int domain, const int type, const int protocol)
46283 +{
46284 + struct acl_subject_label *curr;
46285 + const struct cred *cred = current_cred();
46286 +
46287 + if (unlikely(!gr_acl_is_enabled()))
46288 + goto exit;
46289 +
46290 + if ((domain < 0) || (type < 0) || (protocol < 0) ||
46291 + (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
46292 + goto exit; // let the kernel handle it
46293 +
46294 + curr = current->acl;
46295 +
46296 + if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
46297 + /* the family is allowed, if this is PF_INET allow it only if
46298 + the extra sock type/protocol checks pass */
46299 + if (domain == PF_INET)
46300 + goto inet_check;
46301 + goto exit;
46302 + } else {
46303 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46304 + __u32 fakeip = 0;
46305 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46306 + current->role->roletype, cred->uid,
46307 + cred->gid, current->exec_file ?
46308 + gr_to_filename(current->exec_file->f_path.dentry,
46309 + current->exec_file->f_path.mnt) :
46310 + curr->filename, curr->filename,
46311 + &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
46312 + &current->signal->saved_ip);
46313 + goto exit;
46314 + }
46315 + goto exit_fail;
46316 + }
46317 +
46318 +inet_check:
46319 + /* the rest of this checking is for IPv4 only */
46320 + if (!curr->ips)
46321 + goto exit;
46322 +
46323 + if ((curr->ip_type & (1 << type)) &&
46324 + (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
46325 + goto exit;
46326 +
46327 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46328 + /* we don't place acls on raw sockets , and sometimes
46329 + dgram/ip sockets are opened for ioctl and not
46330 + bind/connect, so we'll fake a bind learn log */
46331 + if (type == SOCK_RAW || type == SOCK_PACKET) {
46332 + __u32 fakeip = 0;
46333 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46334 + current->role->roletype, cred->uid,
46335 + cred->gid, current->exec_file ?
46336 + gr_to_filename(current->exec_file->f_path.dentry,
46337 + current->exec_file->f_path.mnt) :
46338 + curr->filename, curr->filename,
46339 + &fakeip, 0, type,
46340 + protocol, GR_CONNECT, &current->signal->saved_ip);
46341 + } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
46342 + __u32 fakeip = 0;
46343 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46344 + current->role->roletype, cred->uid,
46345 + cred->gid, current->exec_file ?
46346 + gr_to_filename(current->exec_file->f_path.dentry,
46347 + current->exec_file->f_path.mnt) :
46348 + curr->filename, curr->filename,
46349 + &fakeip, 0, type,
46350 + protocol, GR_BIND, &current->signal->saved_ip);
46351 + }
46352 + /* we'll log when they use connect or bind */
46353 + goto exit;
46354 + }
46355 +
46356 +exit_fail:
46357 + if (domain == PF_INET)
46358 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
46359 + gr_socktype_to_name(type), gr_proto_to_name(protocol));
46360 + else
46361 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
46362 + gr_socktype_to_name(type), protocol);
46363 +
46364 + return 0;
46365 +exit:
46366 + return 1;
46367 +}
46368 +
46369 +int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
46370 +{
46371 + if ((ip->mode & mode) &&
46372 + (ip_port >= ip->low) &&
46373 + (ip_port <= ip->high) &&
46374 + ((ntohl(ip_addr) & our_netmask) ==
46375 + (ntohl(our_addr) & our_netmask))
46376 + && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
46377 + && (ip->type & (1 << type))) {
46378 + if (ip->mode & GR_INVERT)
46379 + return 2; // specifically denied
46380 + else
46381 + return 1; // allowed
46382 + }
46383 +
46384 + return 0; // not specifically allowed, may continue parsing
46385 +}
46386 +
46387 +static int
46388 +gr_search_connectbind(const int full_mode, struct sock *sk,
46389 + struct sockaddr_in *addr, const int type)
46390 +{
46391 + char iface[IFNAMSIZ] = {0};
46392 + struct acl_subject_label *curr;
46393 + struct acl_ip_label *ip;
46394 + struct inet_sock *isk;
46395 + struct net_device *dev;
46396 + struct in_device *idev;
46397 + unsigned long i;
46398 + int ret;
46399 + int mode = full_mode & (GR_BIND | GR_CONNECT);
46400 + __u32 ip_addr = 0;
46401 + __u32 our_addr;
46402 + __u32 our_netmask;
46403 + char *p;
46404 + __u16 ip_port = 0;
46405 + const struct cred *cred = current_cred();
46406 +
46407 + if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
46408 + return 0;
46409 +
46410 + curr = current->acl;
46411 + isk = inet_sk(sk);
46412 +
46413 + /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
46414 + if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
46415 + addr->sin_addr.s_addr = curr->inaddr_any_override;
46416 + if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
46417 + struct sockaddr_in saddr;
46418 + int err;
46419 +
46420 + saddr.sin_family = AF_INET;
46421 + saddr.sin_addr.s_addr = curr->inaddr_any_override;
46422 + saddr.sin_port = isk->inet_sport;
46423 +
46424 + err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
46425 + if (err)
46426 + return err;
46427 +
46428 + err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
46429 + if (err)
46430 + return err;
46431 + }
46432 +
46433 + if (!curr->ips)
46434 + return 0;
46435 +
46436 + ip_addr = addr->sin_addr.s_addr;
46437 + ip_port = ntohs(addr->sin_port);
46438 +
46439 + if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
46440 + security_learn(GR_IP_LEARN_MSG, current->role->rolename,
46441 + current->role->roletype, cred->uid,
46442 + cred->gid, current->exec_file ?
46443 + gr_to_filename(current->exec_file->f_path.dentry,
46444 + current->exec_file->f_path.mnt) :
46445 + curr->filename, curr->filename,
46446 + &ip_addr, ip_port, type,
46447 + sk->sk_protocol, mode, &current->signal->saved_ip);
46448 + return 0;
46449 + }
46450 +
46451 + for (i = 0; i < curr->ip_num; i++) {
46452 + ip = *(curr->ips + i);
46453 + if (ip->iface != NULL) {
46454 + strncpy(iface, ip->iface, IFNAMSIZ - 1);
46455 + p = strchr(iface, ':');
46456 + if (p != NULL)
46457 + *p = '\0';
46458 + dev = dev_get_by_name(sock_net(sk), iface);
46459 + if (dev == NULL)
46460 + continue;
46461 + idev = in_dev_get(dev);
46462 + if (idev == NULL) {
46463 + dev_put(dev);
46464 + continue;
46465 + }
46466 + rcu_read_lock();
46467 + for_ifa(idev) {
46468 + if (!strcmp(ip->iface, ifa->ifa_label)) {
46469 + our_addr = ifa->ifa_address;
46470 + our_netmask = 0xffffffff;
46471 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
46472 + if (ret == 1) {
46473 + rcu_read_unlock();
46474 + in_dev_put(idev);
46475 + dev_put(dev);
46476 + return 0;
46477 + } else if (ret == 2) {
46478 + rcu_read_unlock();
46479 + in_dev_put(idev);
46480 + dev_put(dev);
46481 + goto denied;
46482 + }
46483 + }
46484 + } endfor_ifa(idev);
46485 + rcu_read_unlock();
46486 + in_dev_put(idev);
46487 + dev_put(dev);
46488 + } else {
46489 + our_addr = ip->addr;
46490 + our_netmask = ip->netmask;
46491 + ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
46492 + if (ret == 1)
46493 + return 0;
46494 + else if (ret == 2)
46495 + goto denied;
46496 + }
46497 + }
46498 +
46499 +denied:
46500 + if (mode == GR_BIND)
46501 + gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
46502 + else if (mode == GR_CONNECT)
46503 + gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
46504 +
46505 + return -EACCES;
46506 +}
46507 +
46508 +int
46509 +gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
46510 +{
46511 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
46512 +}
46513 +
46514 +int
46515 +gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
46516 +{
46517 + return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
46518 +}
46519 +
46520 +int gr_search_listen(struct socket *sock)
46521 +{
46522 + struct sock *sk = sock->sk;
46523 + struct sockaddr_in addr;
46524 +
46525 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
46526 + addr.sin_port = inet_sk(sk)->inet_sport;
46527 +
46528 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
46529 +}
46530 +
46531 +int gr_search_accept(struct socket *sock)
46532 +{
46533 + struct sock *sk = sock->sk;
46534 + struct sockaddr_in addr;
46535 +
46536 + addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
46537 + addr.sin_port = inet_sk(sk)->inet_sport;
46538 +
46539 + return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
46540 +}
46541 +
46542 +int
46543 +gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
46544 +{
46545 + if (addr)
46546 + return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
46547 + else {
46548 + struct sockaddr_in sin;
46549 + const struct inet_sock *inet = inet_sk(sk);
46550 +
46551 + sin.sin_addr.s_addr = inet->inet_daddr;
46552 + sin.sin_port = inet->inet_dport;
46553 +
46554 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
46555 + }
46556 +}
46557 +
46558 +int
46559 +gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
46560 +{
46561 + struct sockaddr_in sin;
46562 +
46563 + if (unlikely(skb->len < sizeof (struct udphdr)))
46564 + return 0; // skip this packet
46565 +
46566 + sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
46567 + sin.sin_port = udp_hdr(skb)->source;
46568 +
46569 + return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
46570 +}
46571 diff -urNp linux-2.6.39.4/grsecurity/gracl_learn.c linux-2.6.39.4/grsecurity/gracl_learn.c
46572 --- linux-2.6.39.4/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
46573 +++ linux-2.6.39.4/grsecurity/gracl_learn.c 2011-08-05 19:44:37.000000000 -0400
46574 @@ -0,0 +1,207 @@
46575 +#include <linux/kernel.h>
46576 +#include <linux/mm.h>
46577 +#include <linux/sched.h>
46578 +#include <linux/poll.h>
46579 +#include <linux/string.h>
46580 +#include <linux/file.h>
46581 +#include <linux/types.h>
46582 +#include <linux/vmalloc.h>
46583 +#include <linux/grinternal.h>
46584 +
46585 +extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
46586 + size_t count, loff_t *ppos);
46587 +extern int gr_acl_is_enabled(void);
46588 +
46589 +static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
46590 +static int gr_learn_attached;
46591 +
46592 +/* use a 512k buffer */
46593 +#define LEARN_BUFFER_SIZE (512 * 1024)
46594 +
46595 +static DEFINE_SPINLOCK(gr_learn_lock);
46596 +static DEFINE_MUTEX(gr_learn_user_mutex);
46597 +
46598 +/* we need to maintain two buffers, so that the kernel context of grlearn
46599 + uses a semaphore around the userspace copying, and the other kernel contexts
46600 + use a spinlock when copying into the buffer, since they cannot sleep
46601 +*/
46602 +static char *learn_buffer;
46603 +static char *learn_buffer_user;
46604 +static int learn_buffer_len;
46605 +static int learn_buffer_user_len;
46606 +
46607 +static ssize_t
46608 +read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
46609 +{
46610 + DECLARE_WAITQUEUE(wait, current);
46611 + ssize_t retval = 0;
46612 +
46613 + add_wait_queue(&learn_wait, &wait);
46614 + set_current_state(TASK_INTERRUPTIBLE);
46615 + do {
46616 + mutex_lock(&gr_learn_user_mutex);
46617 + spin_lock(&gr_learn_lock);
46618 + if (learn_buffer_len)
46619 + break;
46620 + spin_unlock(&gr_learn_lock);
46621 + mutex_unlock(&gr_learn_user_mutex);
46622 + if (file->f_flags & O_NONBLOCK) {
46623 + retval = -EAGAIN;
46624 + goto out;
46625 + }
46626 + if (signal_pending(current)) {
46627 + retval = -ERESTARTSYS;
46628 + goto out;
46629 + }
46630 +
46631 + schedule();
46632 + } while (1);
46633 +
46634 + memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
46635 + learn_buffer_user_len = learn_buffer_len;
46636 + retval = learn_buffer_len;
46637 + learn_buffer_len = 0;
46638 +
46639 + spin_unlock(&gr_learn_lock);
46640 +
46641 + if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
46642 + retval = -EFAULT;
46643 +
46644 + mutex_unlock(&gr_learn_user_mutex);
46645 +out:
46646 + set_current_state(TASK_RUNNING);
46647 + remove_wait_queue(&learn_wait, &wait);
46648 + return retval;
46649 +}
46650 +
46651 +static unsigned int
46652 +poll_learn(struct file * file, poll_table * wait)
46653 +{
46654 + poll_wait(file, &learn_wait, wait);
46655 +
46656 + if (learn_buffer_len)
46657 + return (POLLIN | POLLRDNORM);
46658 +
46659 + return 0;
46660 +}
46661 +
46662 +void
46663 +gr_clear_learn_entries(void)
46664 +{
46665 + char *tmp;
46666 +
46667 + mutex_lock(&gr_learn_user_mutex);
46668 + spin_lock(&gr_learn_lock);
46669 + tmp = learn_buffer;
46670 + learn_buffer = NULL;
46671 + spin_unlock(&gr_learn_lock);
46672 + if (tmp)
46673 + vfree(tmp);
46674 + if (learn_buffer_user != NULL) {
46675 + vfree(learn_buffer_user);
46676 + learn_buffer_user = NULL;
46677 + }
46678 + learn_buffer_len = 0;
46679 + mutex_unlock(&gr_learn_user_mutex);
46680 +
46681 + return;
46682 +}
46683 +
46684 +void
46685 +gr_add_learn_entry(const char *fmt, ...)
46686 +{
46687 + va_list args;
46688 + unsigned int len;
46689 +
46690 + if (!gr_learn_attached)
46691 + return;
46692 +
46693 + spin_lock(&gr_learn_lock);
46694 +
46695 + /* leave a gap at the end so we know when it's "full" but don't have to
46696 + compute the exact length of the string we're trying to append
46697 + */
46698 + if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
46699 + spin_unlock(&gr_learn_lock);
46700 + wake_up_interruptible(&learn_wait);
46701 + return;
46702 + }
46703 + if (learn_buffer == NULL) {
46704 + spin_unlock(&gr_learn_lock);
46705 + return;
46706 + }
46707 +
46708 + va_start(args, fmt);
46709 + len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
46710 + va_end(args);
46711 +
46712 + learn_buffer_len += len + 1;
46713 +
46714 + spin_unlock(&gr_learn_lock);
46715 + wake_up_interruptible(&learn_wait);
46716 +
46717 + return;
46718 +}
46719 +
46720 +static int
46721 +open_learn(struct inode *inode, struct file *file)
46722 +{
46723 + if (file->f_mode & FMODE_READ && gr_learn_attached)
46724 + return -EBUSY;
46725 + if (file->f_mode & FMODE_READ) {
46726 + int retval = 0;
46727 + mutex_lock(&gr_learn_user_mutex);
46728 + if (learn_buffer == NULL)
46729 + learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
46730 + if (learn_buffer_user == NULL)
46731 + learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
46732 + if (learn_buffer == NULL) {
46733 + retval = -ENOMEM;
46734 + goto out_error;
46735 + }
46736 + if (learn_buffer_user == NULL) {
46737 + retval = -ENOMEM;
46738 + goto out_error;
46739 + }
46740 + learn_buffer_len = 0;
46741 + learn_buffer_user_len = 0;
46742 + gr_learn_attached = 1;
46743 +out_error:
46744 + mutex_unlock(&gr_learn_user_mutex);
46745 + return retval;
46746 + }
46747 + return 0;
46748 +}
46749 +
46750 +static int
46751 +close_learn(struct inode *inode, struct file *file)
46752 +{
46753 + if (file->f_mode & FMODE_READ) {
46754 + char *tmp = NULL;
46755 + mutex_lock(&gr_learn_user_mutex);
46756 + spin_lock(&gr_learn_lock);
46757 + tmp = learn_buffer;
46758 + learn_buffer = NULL;
46759 + spin_unlock(&gr_learn_lock);
46760 + if (tmp)
46761 + vfree(tmp);
46762 + if (learn_buffer_user != NULL) {
46763 + vfree(learn_buffer_user);
46764 + learn_buffer_user = NULL;
46765 + }
46766 + learn_buffer_len = 0;
46767 + learn_buffer_user_len = 0;
46768 + gr_learn_attached = 0;
46769 + mutex_unlock(&gr_learn_user_mutex);
46770 + }
46771 +
46772 + return 0;
46773 +}
46774 +
46775 +const struct file_operations grsec_fops = {
46776 + .read = read_learn,
46777 + .write = write_grsec_handler,
46778 + .open = open_learn,
46779 + .release = close_learn,
46780 + .poll = poll_learn,
46781 +};
46782 diff -urNp linux-2.6.39.4/grsecurity/gracl_res.c linux-2.6.39.4/grsecurity/gracl_res.c
46783 --- linux-2.6.39.4/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
46784 +++ linux-2.6.39.4/grsecurity/gracl_res.c 2011-08-05 19:44:37.000000000 -0400
46785 @@ -0,0 +1,68 @@
46786 +#include <linux/kernel.h>
46787 +#include <linux/sched.h>
46788 +#include <linux/gracl.h>
46789 +#include <linux/grinternal.h>
46790 +
46791 +static const char *restab_log[] = {
46792 + [RLIMIT_CPU] = "RLIMIT_CPU",
46793 + [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
46794 + [RLIMIT_DATA] = "RLIMIT_DATA",
46795 + [RLIMIT_STACK] = "RLIMIT_STACK",
46796 + [RLIMIT_CORE] = "RLIMIT_CORE",
46797 + [RLIMIT_RSS] = "RLIMIT_RSS",
46798 + [RLIMIT_NPROC] = "RLIMIT_NPROC",
46799 + [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
46800 + [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
46801 + [RLIMIT_AS] = "RLIMIT_AS",
46802 + [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
46803 + [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
46804 + [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
46805 + [RLIMIT_NICE] = "RLIMIT_NICE",
46806 + [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
46807 + [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
46808 + [GR_CRASH_RES] = "RLIMIT_CRASH"
46809 +};
46810 +
46811 +void
46812 +gr_log_resource(const struct task_struct *task,
46813 + const int res, const unsigned long wanted, const int gt)
46814 +{
46815 + const struct cred *cred;
46816 + unsigned long rlim;
46817 +
46818 + if (!gr_acl_is_enabled() && !grsec_resource_logging)
46819 + return;
46820 +
46821 + // not yet supported resource
46822 + if (unlikely(!restab_log[res]))
46823 + return;
46824 +
46825 + if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
46826 + rlim = task_rlimit_max(task, res);
46827 + else
46828 + rlim = task_rlimit(task, res);
46829 +
46830 + if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
46831 + return;
46832 +
46833 + rcu_read_lock();
46834 + cred = __task_cred(task);
46835 +
46836 + if (res == RLIMIT_NPROC &&
46837 + (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
46838 + cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
46839 + goto out_rcu_unlock;
46840 + else if (res == RLIMIT_MEMLOCK &&
46841 + cap_raised(cred->cap_effective, CAP_IPC_LOCK))
46842 + goto out_rcu_unlock;
46843 + else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
46844 + goto out_rcu_unlock;
46845 + rcu_read_unlock();
46846 +
46847 + gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
46848 +
46849 + return;
46850 +out_rcu_unlock:
46851 + rcu_read_unlock();
46852 + return;
46853 +}
46854 diff -urNp linux-2.6.39.4/grsecurity/gracl_segv.c linux-2.6.39.4/grsecurity/gracl_segv.c
46855 --- linux-2.6.39.4/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
46856 +++ linux-2.6.39.4/grsecurity/gracl_segv.c 2011-08-05 19:44:37.000000000 -0400
46857 @@ -0,0 +1,299 @@
46858 +#include <linux/kernel.h>
46859 +#include <linux/mm.h>
46860 +#include <asm/uaccess.h>
46861 +#include <asm/errno.h>
46862 +#include <asm/mman.h>
46863 +#include <net/sock.h>
46864 +#include <linux/file.h>
46865 +#include <linux/fs.h>
46866 +#include <linux/net.h>
46867 +#include <linux/in.h>
46868 +#include <linux/slab.h>
46869 +#include <linux/types.h>
46870 +#include <linux/sched.h>
46871 +#include <linux/timer.h>
46872 +#include <linux/gracl.h>
46873 +#include <linux/grsecurity.h>
46874 +#include <linux/grinternal.h>
46875 +
46876 +static struct crash_uid *uid_set;
46877 +static unsigned short uid_used;
46878 +static DEFINE_SPINLOCK(gr_uid_lock);
46879 +extern rwlock_t gr_inode_lock;
46880 +extern struct acl_subject_label *
46881 + lookup_acl_subj_label(const ino_t inode, const dev_t dev,
46882 + struct acl_role_label *role);
46883 +
46884 +#ifdef CONFIG_BTRFS_FS
46885 +extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
46886 +extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
46887 +#endif
46888 +
46889 +static inline dev_t __get_dev(const struct dentry *dentry)
46890 +{
46891 +#ifdef CONFIG_BTRFS_FS
46892 + if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
46893 + return get_btrfs_dev_from_inode(dentry->d_inode);
46894 + else
46895 +#endif
46896 + return dentry->d_inode->i_sb->s_dev;
46897 +}
46898 +
46899 +int
46900 +gr_init_uidset(void)
46901 +{
46902 + uid_set =
46903 + kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
46904 + uid_used = 0;
46905 +
46906 + return uid_set ? 1 : 0;
46907 +}
46908 +
46909 +void
46910 +gr_free_uidset(void)
46911 +{
46912 + if (uid_set)
46913 + kfree(uid_set);
46914 +
46915 + return;
46916 +}
46917 +
46918 +int
46919 +gr_find_uid(const uid_t uid)
46920 +{
46921 + struct crash_uid *tmp = uid_set;
46922 + uid_t buid;
46923 + int low = 0, high = uid_used - 1, mid;
46924 +
46925 + while (high >= low) {
46926 + mid = (low + high) >> 1;
46927 + buid = tmp[mid].uid;
46928 + if (buid == uid)
46929 + return mid;
46930 + if (buid > uid)
46931 + high = mid - 1;
46932 + if (buid < uid)
46933 + low = mid + 1;
46934 + }
46935 +
46936 + return -1;
46937 +}
46938 +
46939 +static __inline__ void
46940 +gr_insertsort(void)
46941 +{
46942 + unsigned short i, j;
46943 + struct crash_uid index;
46944 +
46945 + for (i = 1; i < uid_used; i++) {
46946 + index = uid_set[i];
46947 + j = i;
46948 + while ((j > 0) && uid_set[j - 1].uid > index.uid) {
46949 + uid_set[j] = uid_set[j - 1];
46950 + j--;
46951 + }
46952 + uid_set[j] = index;
46953 + }
46954 +
46955 + return;
46956 +}
46957 +
46958 +static __inline__ void
46959 +gr_insert_uid(const uid_t uid, const unsigned long expires)
46960 +{
46961 + int loc;
46962 +
46963 + if (uid_used == GR_UIDTABLE_MAX)
46964 + return;
46965 +
46966 + loc = gr_find_uid(uid);
46967 +
46968 + if (loc >= 0) {
46969 + uid_set[loc].expires = expires;
46970 + return;
46971 + }
46972 +
46973 + uid_set[uid_used].uid = uid;
46974 + uid_set[uid_used].expires = expires;
46975 + uid_used++;
46976 +
46977 + gr_insertsort();
46978 +
46979 + return;
46980 +}
46981 +
46982 +void
46983 +gr_remove_uid(const unsigned short loc)
46984 +{
46985 + unsigned short i;
46986 +
46987 + for (i = loc + 1; i < uid_used; i++)
46988 + uid_set[i - 1] = uid_set[i];
46989 +
46990 + uid_used--;
46991 +
46992 + return;
46993 +}
46994 +
46995 +int
46996 +gr_check_crash_uid(const uid_t uid)
46997 +{
46998 + int loc;
46999 + int ret = 0;
47000 +
47001 + if (unlikely(!gr_acl_is_enabled()))
47002 + return 0;
47003 +
47004 + spin_lock(&gr_uid_lock);
47005 + loc = gr_find_uid(uid);
47006 +
47007 + if (loc < 0)
47008 + goto out_unlock;
47009 +
47010 + if (time_before_eq(uid_set[loc].expires, get_seconds()))
47011 + gr_remove_uid(loc);
47012 + else
47013 + ret = 1;
47014 +
47015 +out_unlock:
47016 + spin_unlock(&gr_uid_lock);
47017 + return ret;
47018 +}
47019 +
47020 +static __inline__ int
47021 +proc_is_setxid(const struct cred *cred)
47022 +{
47023 + if (cred->uid != cred->euid || cred->uid != cred->suid ||
47024 + cred->uid != cred->fsuid)
47025 + return 1;
47026 + if (cred->gid != cred->egid || cred->gid != cred->sgid ||
47027 + cred->gid != cred->fsgid)
47028 + return 1;
47029 +
47030 + return 0;
47031 +}
47032 +
47033 +extern int gr_fake_force_sig(int sig, struct task_struct *t);
47034 +
47035 +void
47036 +gr_handle_crash(struct task_struct *task, const int sig)
47037 +{
47038 + struct acl_subject_label *curr;
47039 + struct acl_subject_label *curr2;
47040 + struct task_struct *tsk, *tsk2;
47041 + const struct cred *cred;
47042 + const struct cred *cred2;
47043 +
47044 + if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
47045 + return;
47046 +
47047 + if (unlikely(!gr_acl_is_enabled()))
47048 + return;
47049 +
47050 + curr = task->acl;
47051 +
47052 + if (!(curr->resmask & (1 << GR_CRASH_RES)))
47053 + return;
47054 +
47055 + if (time_before_eq(curr->expires, get_seconds())) {
47056 + curr->expires = 0;
47057 + curr->crashes = 0;
47058 + }
47059 +
47060 + curr->crashes++;
47061 +
47062 + if (!curr->expires)
47063 + curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
47064 +
47065 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
47066 + time_after(curr->expires, get_seconds())) {
47067 + rcu_read_lock();
47068 + cred = __task_cred(task);
47069 + if (cred->uid && proc_is_setxid(cred)) {
47070 + gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
47071 + spin_lock(&gr_uid_lock);
47072 + gr_insert_uid(cred->uid, curr->expires);
47073 + spin_unlock(&gr_uid_lock);
47074 + curr->expires = 0;
47075 + curr->crashes = 0;
47076 + read_lock(&tasklist_lock);
47077 + do_each_thread(tsk2, tsk) {
47078 + cred2 = __task_cred(tsk);
47079 + if (tsk != task && cred2->uid == cred->uid)
47080 + gr_fake_force_sig(SIGKILL, tsk);
47081 + } while_each_thread(tsk2, tsk);
47082 + read_unlock(&tasklist_lock);
47083 + } else {
47084 + gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
47085 + read_lock(&tasklist_lock);
47086 + do_each_thread(tsk2, tsk) {
47087 + if (likely(tsk != task)) {
47088 + curr2 = tsk->acl;
47089 +
47090 + if (curr2->device == curr->device &&
47091 + curr2->inode == curr->inode)
47092 + gr_fake_force_sig(SIGKILL, tsk);
47093 + }
47094 + } while_each_thread(tsk2, tsk);
47095 + read_unlock(&tasklist_lock);
47096 + }
47097 + rcu_read_unlock();
47098 + }
47099 +
47100 + return;
47101 +}
47102 +
47103 +int
47104 +gr_check_crash_exec(const struct file *filp)
47105 +{
47106 + struct acl_subject_label *curr;
47107 +
47108 + if (unlikely(!gr_acl_is_enabled()))
47109 + return 0;
47110 +
47111 + read_lock(&gr_inode_lock);
47112 + curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
47113 + __get_dev(filp->f_path.dentry),
47114 + current->role);
47115 + read_unlock(&gr_inode_lock);
47116 +
47117 + if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
47118 + (!curr->crashes && !curr->expires))
47119 + return 0;
47120 +
47121 + if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
47122 + time_after(curr->expires, get_seconds()))
47123 + return 1;
47124 + else if (time_before_eq(curr->expires, get_seconds())) {
47125 + curr->crashes = 0;
47126 + curr->expires = 0;
47127 + }
47128 +
47129 + return 0;
47130 +}
47131 +
47132 +void
47133 +gr_handle_alertkill(struct task_struct *task)
47134 +{
47135 + struct acl_subject_label *curracl;
47136 + __u32 curr_ip;
47137 + struct task_struct *p, *p2;
47138 +
47139 + if (unlikely(!gr_acl_is_enabled()))
47140 + return;
47141 +
47142 + curracl = task->acl;
47143 + curr_ip = task->signal->curr_ip;
47144 +
47145 + if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
47146 + read_lock(&tasklist_lock);
47147 + do_each_thread(p2, p) {
47148 + if (p->signal->curr_ip == curr_ip)
47149 + gr_fake_force_sig(SIGKILL, p);
47150 + } while_each_thread(p2, p);
47151 + read_unlock(&tasklist_lock);
47152 + } else if (curracl->mode & GR_KILLPROC)
47153 + gr_fake_force_sig(SIGKILL, task);
47154 +
47155 + return;
47156 +}
47157 diff -urNp linux-2.6.39.4/grsecurity/gracl_shm.c linux-2.6.39.4/grsecurity/gracl_shm.c
47158 --- linux-2.6.39.4/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
47159 +++ linux-2.6.39.4/grsecurity/gracl_shm.c 2011-08-05 19:44:37.000000000 -0400
47160 @@ -0,0 +1,40 @@
47161 +#include <linux/kernel.h>
47162 +#include <linux/mm.h>
47163 +#include <linux/sched.h>
47164 +#include <linux/file.h>
47165 +#include <linux/ipc.h>
47166 +#include <linux/gracl.h>
47167 +#include <linux/grsecurity.h>
47168 +#include <linux/grinternal.h>
47169 +
47170 +int
47171 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47172 + const time_t shm_createtime, const uid_t cuid, const int shmid)
47173 +{
47174 + struct task_struct *task;
47175 +
47176 + if (!gr_acl_is_enabled())
47177 + return 1;
47178 +
47179 + rcu_read_lock();
47180 + read_lock(&tasklist_lock);
47181 +
47182 + task = find_task_by_vpid(shm_cprid);
47183 +
47184 + if (unlikely(!task))
47185 + task = find_task_by_vpid(shm_lapid);
47186 +
47187 + if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
47188 + (task->pid == shm_lapid)) &&
47189 + (task->acl->mode & GR_PROTSHM) &&
47190 + (task->acl != current->acl))) {
47191 + read_unlock(&tasklist_lock);
47192 + rcu_read_unlock();
47193 + gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
47194 + return 0;
47195 + }
47196 + read_unlock(&tasklist_lock);
47197 + rcu_read_unlock();
47198 +
47199 + return 1;
47200 +}
47201 diff -urNp linux-2.6.39.4/grsecurity/grsec_chdir.c linux-2.6.39.4/grsecurity/grsec_chdir.c
47202 --- linux-2.6.39.4/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
47203 +++ linux-2.6.39.4/grsecurity/grsec_chdir.c 2011-08-05 19:44:37.000000000 -0400
47204 @@ -0,0 +1,19 @@
47205 +#include <linux/kernel.h>
47206 +#include <linux/sched.h>
47207 +#include <linux/fs.h>
47208 +#include <linux/file.h>
47209 +#include <linux/grsecurity.h>
47210 +#include <linux/grinternal.h>
47211 +
47212 +void
47213 +gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
47214 +{
47215 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
47216 + if ((grsec_enable_chdir && grsec_enable_group &&
47217 + in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
47218 + !grsec_enable_group)) {
47219 + gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
47220 + }
47221 +#endif
47222 + return;
47223 +}
47224 diff -urNp linux-2.6.39.4/grsecurity/grsec_chroot.c linux-2.6.39.4/grsecurity/grsec_chroot.c
47225 --- linux-2.6.39.4/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
47226 +++ linux-2.6.39.4/grsecurity/grsec_chroot.c 2011-08-05 19:44:37.000000000 -0400
47227 @@ -0,0 +1,349 @@
47228 +#include <linux/kernel.h>
47229 +#include <linux/module.h>
47230 +#include <linux/sched.h>
47231 +#include <linux/file.h>
47232 +#include <linux/fs.h>
47233 +#include <linux/mount.h>
47234 +#include <linux/types.h>
47235 +#include <linux/pid_namespace.h>
47236 +#include <linux/grsecurity.h>
47237 +#include <linux/grinternal.h>
47238 +
47239 +void gr_set_chroot_entries(struct task_struct *task, struct path *path)
47240 +{
47241 +#ifdef CONFIG_GRKERNSEC
47242 + if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
47243 + path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
47244 + task->gr_is_chrooted = 1;
47245 + else
47246 + task->gr_is_chrooted = 0;
47247 +
47248 + task->gr_chroot_dentry = path->dentry;
47249 +#endif
47250 + return;
47251 +}
47252 +
47253 +void gr_clear_chroot_entries(struct task_struct *task)
47254 +{
47255 +#ifdef CONFIG_GRKERNSEC
47256 + task->gr_is_chrooted = 0;
47257 + task->gr_chroot_dentry = NULL;
47258 +#endif
47259 + return;
47260 +}
47261 +
47262 +int
47263 +gr_handle_chroot_unix(const pid_t pid)
47264 +{
47265 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
47266 + struct task_struct *p;
47267 +
47268 + if (unlikely(!grsec_enable_chroot_unix))
47269 + return 1;
47270 +
47271 + if (likely(!proc_is_chrooted(current)))
47272 + return 1;
47273 +
47274 + rcu_read_lock();
47275 + read_lock(&tasklist_lock);
47276 + p = find_task_by_vpid_unrestricted(pid);
47277 + if (unlikely(p && !have_same_root(current, p))) {
47278 + read_unlock(&tasklist_lock);
47279 + rcu_read_unlock();
47280 + gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
47281 + return 0;
47282 + }
47283 + read_unlock(&tasklist_lock);
47284 + rcu_read_unlock();
47285 +#endif
47286 + return 1;
47287 +}
47288 +
47289 +int
47290 +gr_handle_chroot_nice(void)
47291 +{
47292 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
47293 + if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
47294 + gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
47295 + return -EPERM;
47296 + }
47297 +#endif
47298 + return 0;
47299 +}
47300 +
47301 +int
47302 +gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
47303 +{
47304 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
47305 + if (grsec_enable_chroot_nice && (niceval < task_nice(p))
47306 + && proc_is_chrooted(current)) {
47307 + gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
47308 + return -EACCES;
47309 + }
47310 +#endif
47311 + return 0;
47312 +}
47313 +
47314 +int
47315 +gr_handle_chroot_rawio(const struct inode *inode)
47316 +{
47317 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47318 + if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
47319 + inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
47320 + return 1;
47321 +#endif
47322 + return 0;
47323 +}
47324 +
47325 +int
47326 +gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
47327 +{
47328 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
47329 + struct task_struct *p;
47330 + int ret = 0;
47331 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
47332 + return ret;
47333 +
47334 + read_lock(&tasklist_lock);
47335 + do_each_pid_task(pid, type, p) {
47336 + if (!have_same_root(current, p)) {
47337 + ret = 1;
47338 + goto out;
47339 + }
47340 + } while_each_pid_task(pid, type, p);
47341 +out:
47342 + read_unlock(&tasklist_lock);
47343 + return ret;
47344 +#endif
47345 + return 0;
47346 +}
47347 +
47348 +int
47349 +gr_pid_is_chrooted(struct task_struct *p)
47350 +{
47351 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
47352 + if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
47353 + return 0;
47354 +
47355 + if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
47356 + !have_same_root(current, p)) {
47357 + return 1;
47358 + }
47359 +#endif
47360 + return 0;
47361 +}
47362 +
47363 +EXPORT_SYMBOL(gr_pid_is_chrooted);
47364 +
47365 +#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
47366 +int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
47367 +{
47368 + struct path path, currentroot;
47369 + int ret = 0;
47370 +
47371 + path.dentry = (struct dentry *)u_dentry;
47372 + path.mnt = (struct vfsmount *)u_mnt;
47373 + get_fs_root(current->fs, &currentroot);
47374 + if (path_is_under(&path, &currentroot))
47375 + ret = 1;
47376 + path_put(&currentroot);
47377 +
47378 + return ret;
47379 +}
47380 +#endif
47381 +
47382 +int
47383 +gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
47384 +{
47385 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
47386 + if (!grsec_enable_chroot_fchdir)
47387 + return 1;
47388 +
47389 + if (!proc_is_chrooted(current))
47390 + return 1;
47391 + else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
47392 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
47393 + return 0;
47394 + }
47395 +#endif
47396 + return 1;
47397 +}
47398 +
47399 +int
47400 +gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47401 + const time_t shm_createtime)
47402 +{
47403 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
47404 + struct task_struct *p;
47405 + time_t starttime;
47406 +
47407 + if (unlikely(!grsec_enable_chroot_shmat))
47408 + return 1;
47409 +
47410 + if (likely(!proc_is_chrooted(current)))
47411 + return 1;
47412 +
47413 + rcu_read_lock();
47414 + read_lock(&tasklist_lock);
47415 +
47416 + if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
47417 + starttime = p->start_time.tv_sec;
47418 + if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
47419 + if (have_same_root(current, p)) {
47420 + goto allow;
47421 + } else {
47422 + read_unlock(&tasklist_lock);
47423 + rcu_read_unlock();
47424 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
47425 + return 0;
47426 + }
47427 + }
47428 + /* creator exited, pid reuse, fall through to next check */
47429 + }
47430 + if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
47431 + if (unlikely(!have_same_root(current, p))) {
47432 + read_unlock(&tasklist_lock);
47433 + rcu_read_unlock();
47434 + gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
47435 + return 0;
47436 + }
47437 + }
47438 +
47439 +allow:
47440 + read_unlock(&tasklist_lock);
47441 + rcu_read_unlock();
47442 +#endif
47443 + return 1;
47444 +}
47445 +
47446 +void
47447 +gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
47448 +{
47449 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
47450 + if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
47451 + gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
47452 +#endif
47453 + return;
47454 +}
47455 +
47456 +int
47457 +gr_handle_chroot_mknod(const struct dentry *dentry,
47458 + const struct vfsmount *mnt, const int mode)
47459 +{
47460 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
47461 + if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
47462 + proc_is_chrooted(current)) {
47463 + gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
47464 + return -EPERM;
47465 + }
47466 +#endif
47467 + return 0;
47468 +}
47469 +
47470 +int
47471 +gr_handle_chroot_mount(const struct dentry *dentry,
47472 + const struct vfsmount *mnt, const char *dev_name)
47473 +{
47474 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
47475 + if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
47476 + gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
47477 + return -EPERM;
47478 + }
47479 +#endif
47480 + return 0;
47481 +}
47482 +
47483 +int
47484 +gr_handle_chroot_pivot(void)
47485 +{
47486 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
47487 + if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
47488 + gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
47489 + return -EPERM;
47490 + }
47491 +#endif
47492 + return 0;
47493 +}
47494 +
47495 +int
47496 +gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
47497 +{
47498 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
47499 + if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
47500 + !gr_is_outside_chroot(dentry, mnt)) {
47501 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
47502 + return -EPERM;
47503 + }
47504 +#endif
47505 + return 0;
47506 +}
47507 +
47508 +int
47509 +gr_handle_chroot_caps(struct path *path)
47510 +{
47511 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47512 + if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
47513 + (init_task.fs->root.dentry != path->dentry) &&
47514 + (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
47515 +
47516 + kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
47517 + const struct cred *old = current_cred();
47518 + struct cred *new = prepare_creds();
47519 + if (new == NULL)
47520 + return 1;
47521 +
47522 + new->cap_permitted = cap_drop(old->cap_permitted,
47523 + chroot_caps);
47524 + new->cap_inheritable = cap_drop(old->cap_inheritable,
47525 + chroot_caps);
47526 + new->cap_effective = cap_drop(old->cap_effective,
47527 + chroot_caps);
47528 +
47529 + commit_creds(new);
47530 +
47531 + return 0;
47532 + }
47533 +#endif
47534 + return 0;
47535 +}
47536 +
47537 +int
47538 +gr_handle_chroot_sysctl(const int op)
47539 +{
47540 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
47541 + if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
47542 + proc_is_chrooted(current))
47543 + return -EACCES;
47544 +#endif
47545 + return 0;
47546 +}
47547 +
47548 +void
47549 +gr_handle_chroot_chdir(struct path *path)
47550 +{
47551 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
47552 + if (grsec_enable_chroot_chdir)
47553 + set_fs_pwd(current->fs, path);
47554 +#endif
47555 + return;
47556 +}
47557 +
47558 +int
47559 +gr_handle_chroot_chmod(const struct dentry *dentry,
47560 + const struct vfsmount *mnt, const int mode)
47561 +{
47562 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
47563 + /* allow chmod +s on directories, but not files */
47564 + if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
47565 + ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
47566 + proc_is_chrooted(current)) {
47567 + gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
47568 + return -EPERM;
47569 + }
47570 +#endif
47571 + return 0;
47572 +}
47573 +
47574 +#ifdef CONFIG_SECURITY
47575 +EXPORT_SYMBOL(gr_handle_chroot_caps);
47576 +#endif
47577 diff -urNp linux-2.6.39.4/grsecurity/grsec_disabled.c linux-2.6.39.4/grsecurity/grsec_disabled.c
47578 --- linux-2.6.39.4/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
47579 +++ linux-2.6.39.4/grsecurity/grsec_disabled.c 2011-08-05 19:44:37.000000000 -0400
47580 @@ -0,0 +1,447 @@
47581 +#include <linux/kernel.h>
47582 +#include <linux/module.h>
47583 +#include <linux/sched.h>
47584 +#include <linux/file.h>
47585 +#include <linux/fs.h>
47586 +#include <linux/kdev_t.h>
47587 +#include <linux/net.h>
47588 +#include <linux/in.h>
47589 +#include <linux/ip.h>
47590 +#include <linux/skbuff.h>
47591 +#include <linux/sysctl.h>
47592 +
47593 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
47594 +void
47595 +pax_set_initial_flags(struct linux_binprm *bprm)
47596 +{
47597 + return;
47598 +}
47599 +#endif
47600 +
47601 +#ifdef CONFIG_SYSCTL
47602 +__u32
47603 +gr_handle_sysctl(const struct ctl_table * table, const int op)
47604 +{
47605 + return 0;
47606 +}
47607 +#endif
47608 +
47609 +#ifdef CONFIG_TASKSTATS
47610 +int gr_is_taskstats_denied(int pid)
47611 +{
47612 + return 0;
47613 +}
47614 +#endif
47615 +
47616 +int
47617 +gr_acl_is_enabled(void)
47618 +{
47619 + return 0;
47620 +}
47621 +
47622 +int
47623 +gr_handle_rawio(const struct inode *inode)
47624 +{
47625 + return 0;
47626 +}
47627 +
47628 +void
47629 +gr_acl_handle_psacct(struct task_struct *task, const long code)
47630 +{
47631 + return;
47632 +}
47633 +
47634 +int
47635 +gr_handle_ptrace(struct task_struct *task, const long request)
47636 +{
47637 + return 0;
47638 +}
47639 +
47640 +int
47641 +gr_handle_proc_ptrace(struct task_struct *task)
47642 +{
47643 + return 0;
47644 +}
47645 +
47646 +void
47647 +gr_learn_resource(const struct task_struct *task,
47648 + const int res, const unsigned long wanted, const int gt)
47649 +{
47650 + return;
47651 +}
47652 +
47653 +int
47654 +gr_set_acls(const int type)
47655 +{
47656 + return 0;
47657 +}
47658 +
47659 +int
47660 +gr_check_hidden_task(const struct task_struct *tsk)
47661 +{
47662 + return 0;
47663 +}
47664 +
47665 +int
47666 +gr_check_protected_task(const struct task_struct *task)
47667 +{
47668 + return 0;
47669 +}
47670 +
47671 +int
47672 +gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
47673 +{
47674 + return 0;
47675 +}
47676 +
47677 +void
47678 +gr_copy_label(struct task_struct *tsk)
47679 +{
47680 + return;
47681 +}
47682 +
47683 +void
47684 +gr_set_pax_flags(struct task_struct *task)
47685 +{
47686 + return;
47687 +}
47688 +
47689 +int
47690 +gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
47691 + const int unsafe_share)
47692 +{
47693 + return 0;
47694 +}
47695 +
47696 +void
47697 +gr_handle_delete(const ino_t ino, const dev_t dev)
47698 +{
47699 + return;
47700 +}
47701 +
47702 +void
47703 +gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
47704 +{
47705 + return;
47706 +}
47707 +
47708 +void
47709 +gr_handle_crash(struct task_struct *task, const int sig)
47710 +{
47711 + return;
47712 +}
47713 +
47714 +int
47715 +gr_check_crash_exec(const struct file *filp)
47716 +{
47717 + return 0;
47718 +}
47719 +
47720 +int
47721 +gr_check_crash_uid(const uid_t uid)
47722 +{
47723 + return 0;
47724 +}
47725 +
47726 +void
47727 +gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
47728 + struct dentry *old_dentry,
47729 + struct dentry *new_dentry,
47730 + struct vfsmount *mnt, const __u8 replace)
47731 +{
47732 + return;
47733 +}
47734 +
47735 +int
47736 +gr_search_socket(const int family, const int type, const int protocol)
47737 +{
47738 + return 1;
47739 +}
47740 +
47741 +int
47742 +gr_search_connectbind(const int mode, const struct socket *sock,
47743 + const struct sockaddr_in *addr)
47744 +{
47745 + return 0;
47746 +}
47747 +
47748 +int
47749 +gr_is_capable(const int cap)
47750 +{
47751 + return 1;
47752 +}
47753 +
47754 +int
47755 +gr_is_capable_nolog(const int cap)
47756 +{
47757 + return 1;
47758 +}
47759 +
47760 +void
47761 +gr_handle_alertkill(struct task_struct *task)
47762 +{
47763 + return;
47764 +}
47765 +
47766 +__u32
47767 +gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
47768 +{
47769 + return 1;
47770 +}
47771 +
47772 +__u32
47773 +gr_acl_handle_hidden_file(const struct dentry * dentry,
47774 + const struct vfsmount * mnt)
47775 +{
47776 + return 1;
47777 +}
47778 +
47779 +__u32
47780 +gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
47781 + const int fmode)
47782 +{
47783 + return 1;
47784 +}
47785 +
47786 +__u32
47787 +gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
47788 +{
47789 + return 1;
47790 +}
47791 +
47792 +__u32
47793 +gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
47794 +{
47795 + return 1;
47796 +}
47797 +
47798 +int
47799 +gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
47800 + unsigned int *vm_flags)
47801 +{
47802 + return 1;
47803 +}
47804 +
47805 +__u32
47806 +gr_acl_handle_truncate(const struct dentry * dentry,
47807 + const struct vfsmount * mnt)
47808 +{
47809 + return 1;
47810 +}
47811 +
47812 +__u32
47813 +gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
47814 +{
47815 + return 1;
47816 +}
47817 +
47818 +__u32
47819 +gr_acl_handle_access(const struct dentry * dentry,
47820 + const struct vfsmount * mnt, const int fmode)
47821 +{
47822 + return 1;
47823 +}
47824 +
47825 +__u32
47826 +gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
47827 + mode_t mode)
47828 +{
47829 + return 1;
47830 +}
47831 +
47832 +__u32
47833 +gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
47834 + mode_t mode)
47835 +{
47836 + return 1;
47837 +}
47838 +
47839 +__u32
47840 +gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
47841 +{
47842 + return 1;
47843 +}
47844 +
47845 +__u32
47846 +gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
47847 +{
47848 + return 1;
47849 +}
47850 +
47851 +void
47852 +grsecurity_init(void)
47853 +{
47854 + return;
47855 +}
47856 +
47857 +__u32
47858 +gr_acl_handle_mknod(const struct dentry * new_dentry,
47859 + const struct dentry * parent_dentry,
47860 + const struct vfsmount * parent_mnt,
47861 + const int mode)
47862 +{
47863 + return 1;
47864 +}
47865 +
47866 +__u32
47867 +gr_acl_handle_mkdir(const struct dentry * new_dentry,
47868 + const struct dentry * parent_dentry,
47869 + const struct vfsmount * parent_mnt)
47870 +{
47871 + return 1;
47872 +}
47873 +
47874 +__u32
47875 +gr_acl_handle_symlink(const struct dentry * new_dentry,
47876 + const struct dentry * parent_dentry,
47877 + const struct vfsmount * parent_mnt, const char *from)
47878 +{
47879 + return 1;
47880 +}
47881 +
47882 +__u32
47883 +gr_acl_handle_link(const struct dentry * new_dentry,
47884 + const struct dentry * parent_dentry,
47885 + const struct vfsmount * parent_mnt,
47886 + const struct dentry * old_dentry,
47887 + const struct vfsmount * old_mnt, const char *to)
47888 +{
47889 + return 1;
47890 +}
47891 +
47892 +int
47893 +gr_acl_handle_rename(const struct dentry *new_dentry,
47894 + const struct dentry *parent_dentry,
47895 + const struct vfsmount *parent_mnt,
47896 + const struct dentry *old_dentry,
47897 + const struct inode *old_parent_inode,
47898 + const struct vfsmount *old_mnt, const char *newname)
47899 +{
47900 + return 0;
47901 +}
47902 +
47903 +int
47904 +gr_acl_handle_filldir(const struct file *file, const char *name,
47905 + const int namelen, const ino_t ino)
47906 +{
47907 + return 1;
47908 +}
47909 +
47910 +int
47911 +gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
47912 + const time_t shm_createtime, const uid_t cuid, const int shmid)
47913 +{
47914 + return 1;
47915 +}
47916 +
47917 +int
47918 +gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
47919 +{
47920 + return 0;
47921 +}
47922 +
47923 +int
47924 +gr_search_accept(const struct socket *sock)
47925 +{
47926 + return 0;
47927 +}
47928 +
47929 +int
47930 +gr_search_listen(const struct socket *sock)
47931 +{
47932 + return 0;
47933 +}
47934 +
47935 +int
47936 +gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
47937 +{
47938 + return 0;
47939 +}
47940 +
47941 +__u32
47942 +gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
47943 +{
47944 + return 1;
47945 +}
47946 +
47947 +__u32
47948 +gr_acl_handle_creat(const struct dentry * dentry,
47949 + const struct dentry * p_dentry,
47950 + const struct vfsmount * p_mnt, const int fmode,
47951 + const int imode)
47952 +{
47953 + return 1;
47954 +}
47955 +
47956 +void
47957 +gr_acl_handle_exit(void)
47958 +{
47959 + return;
47960 +}
47961 +
47962 +int
47963 +gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
47964 +{
47965 + return 1;
47966 +}
47967 +
47968 +void
47969 +gr_set_role_label(const uid_t uid, const gid_t gid)
47970 +{
47971 + return;
47972 +}
47973 +
47974 +int
47975 +gr_acl_handle_procpidmem(const struct task_struct *task)
47976 +{
47977 + return 0;
47978 +}
47979 +
47980 +int
47981 +gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
47982 +{
47983 + return 0;
47984 +}
47985 +
47986 +int
47987 +gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
47988 +{
47989 + return 0;
47990 +}
47991 +
47992 +void
47993 +gr_set_kernel_label(struct task_struct *task)
47994 +{
47995 + return;
47996 +}
47997 +
47998 +int
47999 +gr_check_user_change(int real, int effective, int fs)
48000 +{
48001 + return 0;
48002 +}
48003 +
48004 +int
48005 +gr_check_group_change(int real, int effective, int fs)
48006 +{
48007 + return 0;
48008 +}
48009 +
48010 +int gr_acl_enable_at_secure(void)
48011 +{
48012 + return 0;
48013 +}
48014 +
48015 +dev_t gr_get_dev_from_dentry(struct dentry *dentry)
48016 +{
48017 + return dentry->d_inode->i_sb->s_dev;
48018 +}
48019 +
48020 +EXPORT_SYMBOL(gr_is_capable);
48021 +EXPORT_SYMBOL(gr_is_capable_nolog);
48022 +EXPORT_SYMBOL(gr_learn_resource);
48023 +EXPORT_SYMBOL(gr_set_kernel_label);
48024 +#ifdef CONFIG_SECURITY
48025 +EXPORT_SYMBOL(gr_check_user_change);
48026 +EXPORT_SYMBOL(gr_check_group_change);
48027 +#endif
48028 diff -urNp linux-2.6.39.4/grsecurity/grsec_exec.c linux-2.6.39.4/grsecurity/grsec_exec.c
48029 --- linux-2.6.39.4/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
48030 +++ linux-2.6.39.4/grsecurity/grsec_exec.c 2011-08-05 19:44:37.000000000 -0400
48031 @@ -0,0 +1,146 @@
48032 +#include <linux/kernel.h>
48033 +#include <linux/sched.h>
48034 +#include <linux/file.h>
48035 +#include <linux/binfmts.h>
48036 +#include <linux/fs.h>
48037 +#include <linux/types.h>
48038 +#include <linux/grdefs.h>
48039 +#include <linux/grinternal.h>
48040 +#include <linux/capability.h>
48041 +#include <linux/compat.h>
48042 +
48043 +#include <asm/uaccess.h>
48044 +
48045 +#ifdef CONFIG_GRKERNSEC_EXECLOG
48046 +static char gr_exec_arg_buf[132];
48047 +static DEFINE_MUTEX(gr_exec_arg_mutex);
48048 +#endif
48049 +
48050 +int
48051 +gr_handle_nproc(void)
48052 +{
48053 +#ifdef CONFIG_GRKERNSEC_EXECVE
48054 + const struct cred *cred = current_cred();
48055 + if (grsec_enable_execve && cred->user &&
48056 + (atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) &&
48057 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
48058 + gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG);
48059 + return -EAGAIN;
48060 + }
48061 +#endif
48062 + return 0;
48063 +}
48064 +
48065 +void
48066 +gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
48067 +{
48068 +#ifdef CONFIG_GRKERNSEC_EXECLOG
48069 + char *grarg = gr_exec_arg_buf;
48070 + unsigned int i, x, execlen = 0;
48071 + char c;
48072 +
48073 + if (!((grsec_enable_execlog && grsec_enable_group &&
48074 + in_group_p(grsec_audit_gid))
48075 + || (grsec_enable_execlog && !grsec_enable_group)))
48076 + return;
48077 +
48078 + mutex_lock(&gr_exec_arg_mutex);
48079 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
48080 +
48081 + if (unlikely(argv == NULL))
48082 + goto log;
48083 +
48084 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
48085 + const char __user *p;
48086 + unsigned int len;
48087 +
48088 + if (copy_from_user(&p, argv + i, sizeof(p)))
48089 + goto log;
48090 + if (!p)
48091 + goto log;
48092 + len = strnlen_user(p, 128 - execlen);
48093 + if (len > 128 - execlen)
48094 + len = 128 - execlen;
48095 + else if (len > 0)
48096 + len--;
48097 + if (copy_from_user(grarg + execlen, p, len))
48098 + goto log;
48099 +
48100 + /* rewrite unprintable characters */
48101 + for (x = 0; x < len; x++) {
48102 + c = *(grarg + execlen + x);
48103 + if (c < 32 || c > 126)
48104 + *(grarg + execlen + x) = ' ';
48105 + }
48106 +
48107 + execlen += len;
48108 + *(grarg + execlen) = ' ';
48109 + *(grarg + execlen + 1) = '\0';
48110 + execlen++;
48111 + }
48112 +
48113 + log:
48114 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
48115 + bprm->file->f_path.mnt, grarg);
48116 + mutex_unlock(&gr_exec_arg_mutex);
48117 +#endif
48118 + return;
48119 +}
48120 +
48121 +#ifdef CONFIG_COMPAT
48122 +void
48123 +gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
48124 +{
48125 +#ifdef CONFIG_GRKERNSEC_EXECLOG
48126 + char *grarg = gr_exec_arg_buf;
48127 + unsigned int i, x, execlen = 0;
48128 + char c;
48129 +
48130 + if (!((grsec_enable_execlog && grsec_enable_group &&
48131 + in_group_p(grsec_audit_gid))
48132 + || (grsec_enable_execlog && !grsec_enable_group)))
48133 + return;
48134 +
48135 + mutex_lock(&gr_exec_arg_mutex);
48136 + memset(grarg, 0, sizeof(gr_exec_arg_buf));
48137 +
48138 + if (unlikely(argv == NULL))
48139 + goto log;
48140 +
48141 + for (i = 0; i < bprm->argc && execlen < 128; i++) {
48142 + compat_uptr_t p;
48143 + unsigned int len;
48144 +
48145 + if (get_user(p, argv + i))
48146 + goto log;
48147 + len = strnlen_user(compat_ptr(p), 128 - execlen);
48148 + if (len > 128 - execlen)
48149 + len = 128 - execlen;
48150 + else if (len > 0)
48151 + len--;
48152 + else
48153 + goto log;
48154 + if (copy_from_user(grarg + execlen, compat_ptr(p), len))
48155 + goto log;
48156 +
48157 + /* rewrite unprintable characters */
48158 + for (x = 0; x < len; x++) {
48159 + c = *(grarg + execlen + x);
48160 + if (c < 32 || c > 126)
48161 + *(grarg + execlen + x) = ' ';
48162 + }
48163 +
48164 + execlen += len;
48165 + *(grarg + execlen) = ' ';
48166 + *(grarg + execlen + 1) = '\0';
48167 + execlen++;
48168 + }
48169 +
48170 + log:
48171 + gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
48172 + bprm->file->f_path.mnt, grarg);
48173 + mutex_unlock(&gr_exec_arg_mutex);
48174 +#endif
48175 + return;
48176 +}
48177 +#endif
48178 diff -urNp linux-2.6.39.4/grsecurity/grsec_fifo.c linux-2.6.39.4/grsecurity/grsec_fifo.c
48179 --- linux-2.6.39.4/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
48180 +++ linux-2.6.39.4/grsecurity/grsec_fifo.c 2011-08-05 19:44:37.000000000 -0400
48181 @@ -0,0 +1,24 @@
48182 +#include <linux/kernel.h>
48183 +#include <linux/sched.h>
48184 +#include <linux/fs.h>
48185 +#include <linux/file.h>
48186 +#include <linux/grinternal.h>
48187 +
48188 +int
48189 +gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
48190 + const struct dentry *dir, const int flag, const int acc_mode)
48191 +{
48192 +#ifdef CONFIG_GRKERNSEC_FIFO
48193 + const struct cred *cred = current_cred();
48194 +
48195 + if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
48196 + !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
48197 + (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
48198 + (cred->fsuid != dentry->d_inode->i_uid)) {
48199 + if (!inode_permission(dentry->d_inode, acc_mode))
48200 + gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
48201 + return -EACCES;
48202 + }
48203 +#endif
48204 + return 0;
48205 +}
48206 diff -urNp linux-2.6.39.4/grsecurity/grsec_fork.c linux-2.6.39.4/grsecurity/grsec_fork.c
48207 --- linux-2.6.39.4/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
48208 +++ linux-2.6.39.4/grsecurity/grsec_fork.c 2011-08-05 19:44:37.000000000 -0400
48209 @@ -0,0 +1,23 @@
48210 +#include <linux/kernel.h>
48211 +#include <linux/sched.h>
48212 +#include <linux/grsecurity.h>
48213 +#include <linux/grinternal.h>
48214 +#include <linux/errno.h>
48215 +
48216 +void
48217 +gr_log_forkfail(const int retval)
48218 +{
48219 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
48220 + if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
48221 + switch (retval) {
48222 + case -EAGAIN:
48223 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
48224 + break;
48225 + case -ENOMEM:
48226 + gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
48227 + break;
48228 + }
48229 + }
48230 +#endif
48231 + return;
48232 +}
48233 diff -urNp linux-2.6.39.4/grsecurity/grsec_init.c linux-2.6.39.4/grsecurity/grsec_init.c
48234 --- linux-2.6.39.4/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
48235 +++ linux-2.6.39.4/grsecurity/grsec_init.c 2011-08-05 19:44:37.000000000 -0400
48236 @@ -0,0 +1,273 @@
48237 +#include <linux/kernel.h>
48238 +#include <linux/sched.h>
48239 +#include <linux/mm.h>
48240 +#include <linux/gracl.h>
48241 +#include <linux/slab.h>
48242 +#include <linux/vmalloc.h>
48243 +#include <linux/percpu.h>
48244 +#include <linux/module.h>
48245 +
48246 +int grsec_enable_brute;
48247 +int grsec_enable_link;
48248 +int grsec_enable_dmesg;
48249 +int grsec_enable_harden_ptrace;
48250 +int grsec_enable_fifo;
48251 +int grsec_enable_execve;
48252 +int grsec_enable_execlog;
48253 +int grsec_enable_signal;
48254 +int grsec_enable_forkfail;
48255 +int grsec_enable_audit_ptrace;
48256 +int grsec_enable_time;
48257 +int grsec_enable_audit_textrel;
48258 +int grsec_enable_group;
48259 +int grsec_audit_gid;
48260 +int grsec_enable_chdir;
48261 +int grsec_enable_mount;
48262 +int grsec_enable_rofs;
48263 +int grsec_enable_chroot_findtask;
48264 +int grsec_enable_chroot_mount;
48265 +int grsec_enable_chroot_shmat;
48266 +int grsec_enable_chroot_fchdir;
48267 +int grsec_enable_chroot_double;
48268 +int grsec_enable_chroot_pivot;
48269 +int grsec_enable_chroot_chdir;
48270 +int grsec_enable_chroot_chmod;
48271 +int grsec_enable_chroot_mknod;
48272 +int grsec_enable_chroot_nice;
48273 +int grsec_enable_chroot_execlog;
48274 +int grsec_enable_chroot_caps;
48275 +int grsec_enable_chroot_sysctl;
48276 +int grsec_enable_chroot_unix;
48277 +int grsec_enable_tpe;
48278 +int grsec_tpe_gid;
48279 +int grsec_enable_blackhole;
48280 +#ifdef CONFIG_IPV6_MODULE
48281 +EXPORT_SYMBOL(grsec_enable_blackhole);
48282 +#endif
48283 +int grsec_lastack_retries;
48284 +int grsec_enable_tpe_all;
48285 +int grsec_enable_tpe_invert;
48286 +int grsec_enable_socket_all;
48287 +int grsec_socket_all_gid;
48288 +int grsec_enable_socket_client;
48289 +int grsec_socket_client_gid;
48290 +int grsec_enable_socket_server;
48291 +int grsec_socket_server_gid;
48292 +int grsec_resource_logging;
48293 +int grsec_disable_privio;
48294 +int grsec_enable_log_rwxmaps;
48295 +int grsec_lock;
48296 +
48297 +DEFINE_SPINLOCK(grsec_alert_lock);
48298 +unsigned long grsec_alert_wtime = 0;
48299 +unsigned long grsec_alert_fyet = 0;
48300 +
48301 +DEFINE_SPINLOCK(grsec_audit_lock);
48302 +
48303 +DEFINE_RWLOCK(grsec_exec_file_lock);
48304 +
48305 +char *gr_shared_page[4];
48306 +
48307 +char *gr_alert_log_fmt;
48308 +char *gr_audit_log_fmt;
48309 +char *gr_alert_log_buf;
48310 +char *gr_audit_log_buf;
48311 +
48312 +extern struct gr_arg *gr_usermode;
48313 +extern unsigned char *gr_system_salt;
48314 +extern unsigned char *gr_system_sum;
48315 +
48316 +void __init
48317 +grsecurity_init(void)
48318 +{
48319 + int j;
48320 + /* create the per-cpu shared pages */
48321 +
48322 +#ifdef CONFIG_X86
48323 + memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
48324 +#endif
48325 +
48326 + for (j = 0; j < 4; j++) {
48327 + gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
48328 + if (gr_shared_page[j] == NULL) {
48329 + panic("Unable to allocate grsecurity shared page");
48330 + return;
48331 + }
48332 + }
48333 +
48334 + /* allocate log buffers */
48335 + gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
48336 + if (!gr_alert_log_fmt) {
48337 + panic("Unable to allocate grsecurity alert log format buffer");
48338 + return;
48339 + }
48340 + gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
48341 + if (!gr_audit_log_fmt) {
48342 + panic("Unable to allocate grsecurity audit log format buffer");
48343 + return;
48344 + }
48345 + gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
48346 + if (!gr_alert_log_buf) {
48347 + panic("Unable to allocate grsecurity alert log buffer");
48348 + return;
48349 + }
48350 + gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
48351 + if (!gr_audit_log_buf) {
48352 + panic("Unable to allocate grsecurity audit log buffer");
48353 + return;
48354 + }
48355 +
48356 + /* allocate memory for authentication structure */
48357 + gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
48358 + gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
48359 + gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
48360 +
48361 + if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
48362 + panic("Unable to allocate grsecurity authentication structure");
48363 + return;
48364 + }
48365 +
48366 +
48367 +#ifdef CONFIG_GRKERNSEC_IO
48368 +#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
48369 + grsec_disable_privio = 1;
48370 +#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
48371 + grsec_disable_privio = 1;
48372 +#else
48373 + grsec_disable_privio = 0;
48374 +#endif
48375 +#endif
48376 +
48377 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
48378 + /* for backward compatibility, tpe_invert always defaults to on if
48379 + enabled in the kernel
48380 + */
48381 + grsec_enable_tpe_invert = 1;
48382 +#endif
48383 +
48384 +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
48385 +#ifndef CONFIG_GRKERNSEC_SYSCTL
48386 + grsec_lock = 1;
48387 +#endif
48388 +
48389 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
48390 + grsec_enable_audit_textrel = 1;
48391 +#endif
48392 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48393 + grsec_enable_log_rwxmaps = 1;
48394 +#endif
48395 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
48396 + grsec_enable_group = 1;
48397 + grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
48398 +#endif
48399 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
48400 + grsec_enable_chdir = 1;
48401 +#endif
48402 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
48403 + grsec_enable_harden_ptrace = 1;
48404 +#endif
48405 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48406 + grsec_enable_mount = 1;
48407 +#endif
48408 +#ifdef CONFIG_GRKERNSEC_LINK
48409 + grsec_enable_link = 1;
48410 +#endif
48411 +#ifdef CONFIG_GRKERNSEC_BRUTE
48412 + grsec_enable_brute = 1;
48413 +#endif
48414 +#ifdef CONFIG_GRKERNSEC_DMESG
48415 + grsec_enable_dmesg = 1;
48416 +#endif
48417 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
48418 + grsec_enable_blackhole = 1;
48419 + grsec_lastack_retries = 4;
48420 +#endif
48421 +#ifdef CONFIG_GRKERNSEC_FIFO
48422 + grsec_enable_fifo = 1;
48423 +#endif
48424 +#ifdef CONFIG_GRKERNSEC_EXECVE
48425 + grsec_enable_execve = 1;
48426 +#endif
48427 +#ifdef CONFIG_GRKERNSEC_EXECLOG
48428 + grsec_enable_execlog = 1;
48429 +#endif
48430 +#ifdef CONFIG_GRKERNSEC_SIGNAL
48431 + grsec_enable_signal = 1;
48432 +#endif
48433 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
48434 + grsec_enable_forkfail = 1;
48435 +#endif
48436 +#ifdef CONFIG_GRKERNSEC_TIME
48437 + grsec_enable_time = 1;
48438 +#endif
48439 +#ifdef CONFIG_GRKERNSEC_RESLOG
48440 + grsec_resource_logging = 1;
48441 +#endif
48442 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
48443 + grsec_enable_chroot_findtask = 1;
48444 +#endif
48445 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
48446 + grsec_enable_chroot_unix = 1;
48447 +#endif
48448 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
48449 + grsec_enable_chroot_mount = 1;
48450 +#endif
48451 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
48452 + grsec_enable_chroot_fchdir = 1;
48453 +#endif
48454 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
48455 + grsec_enable_chroot_shmat = 1;
48456 +#endif
48457 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
48458 + grsec_enable_audit_ptrace = 1;
48459 +#endif
48460 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
48461 + grsec_enable_chroot_double = 1;
48462 +#endif
48463 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
48464 + grsec_enable_chroot_pivot = 1;
48465 +#endif
48466 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
48467 + grsec_enable_chroot_chdir = 1;
48468 +#endif
48469 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
48470 + grsec_enable_chroot_chmod = 1;
48471 +#endif
48472 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
48473 + grsec_enable_chroot_mknod = 1;
48474 +#endif
48475 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
48476 + grsec_enable_chroot_nice = 1;
48477 +#endif
48478 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
48479 + grsec_enable_chroot_execlog = 1;
48480 +#endif
48481 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
48482 + grsec_enable_chroot_caps = 1;
48483 +#endif
48484 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
48485 + grsec_enable_chroot_sysctl = 1;
48486 +#endif
48487 +#ifdef CONFIG_GRKERNSEC_TPE
48488 + grsec_enable_tpe = 1;
48489 + grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
48490 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
48491 + grsec_enable_tpe_all = 1;
48492 +#endif
48493 +#endif
48494 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
48495 + grsec_enable_socket_all = 1;
48496 + grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
48497 +#endif
48498 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
48499 + grsec_enable_socket_client = 1;
48500 + grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
48501 +#endif
48502 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
48503 + grsec_enable_socket_server = 1;
48504 + grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
48505 +#endif
48506 +#endif
48507 +
48508 + return;
48509 +}
48510 diff -urNp linux-2.6.39.4/grsecurity/grsec_link.c linux-2.6.39.4/grsecurity/grsec_link.c
48511 --- linux-2.6.39.4/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
48512 +++ linux-2.6.39.4/grsecurity/grsec_link.c 2011-08-05 19:44:37.000000000 -0400
48513 @@ -0,0 +1,43 @@
48514 +#include <linux/kernel.h>
48515 +#include <linux/sched.h>
48516 +#include <linux/fs.h>
48517 +#include <linux/file.h>
48518 +#include <linux/grinternal.h>
48519 +
48520 +int
48521 +gr_handle_follow_link(const struct inode *parent,
48522 + const struct inode *inode,
48523 + const struct dentry *dentry, const struct vfsmount *mnt)
48524 +{
48525 +#ifdef CONFIG_GRKERNSEC_LINK
48526 + const struct cred *cred = current_cred();
48527 +
48528 + if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
48529 + (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
48530 + (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
48531 + gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
48532 + return -EACCES;
48533 + }
48534 +#endif
48535 + return 0;
48536 +}
48537 +
48538 +int
48539 +gr_handle_hardlink(const struct dentry *dentry,
48540 + const struct vfsmount *mnt,
48541 + struct inode *inode, const int mode, const char *to)
48542 +{
48543 +#ifdef CONFIG_GRKERNSEC_LINK
48544 + const struct cred *cred = current_cred();
48545 +
48546 + if (grsec_enable_link && cred->fsuid != inode->i_uid &&
48547 + (!S_ISREG(mode) || (mode & S_ISUID) ||
48548 + ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
48549 + (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
48550 + !capable(CAP_FOWNER) && cred->uid) {
48551 + gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
48552 + return -EPERM;
48553 + }
48554 +#endif
48555 + return 0;
48556 +}
48557 diff -urNp linux-2.6.39.4/grsecurity/grsec_log.c linux-2.6.39.4/grsecurity/grsec_log.c
48558 --- linux-2.6.39.4/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
48559 +++ linux-2.6.39.4/grsecurity/grsec_log.c 2011-08-05 19:44:37.000000000 -0400
48560 @@ -0,0 +1,310 @@
48561 +#include <linux/kernel.h>
48562 +#include <linux/sched.h>
48563 +#include <linux/file.h>
48564 +#include <linux/tty.h>
48565 +#include <linux/fs.h>
48566 +#include <linux/grinternal.h>
48567 +
48568 +#ifdef CONFIG_TREE_PREEMPT_RCU
48569 +#define DISABLE_PREEMPT() preempt_disable()
48570 +#define ENABLE_PREEMPT() preempt_enable()
48571 +#else
48572 +#define DISABLE_PREEMPT()
48573 +#define ENABLE_PREEMPT()
48574 +#endif
48575 +
48576 +#define BEGIN_LOCKS(x) \
48577 + DISABLE_PREEMPT(); \
48578 + rcu_read_lock(); \
48579 + read_lock(&tasklist_lock); \
48580 + read_lock(&grsec_exec_file_lock); \
48581 + if (x != GR_DO_AUDIT) \
48582 + spin_lock(&grsec_alert_lock); \
48583 + else \
48584 + spin_lock(&grsec_audit_lock)
48585 +
48586 +#define END_LOCKS(x) \
48587 + if (x != GR_DO_AUDIT) \
48588 + spin_unlock(&grsec_alert_lock); \
48589 + else \
48590 + spin_unlock(&grsec_audit_lock); \
48591 + read_unlock(&grsec_exec_file_lock); \
48592 + read_unlock(&tasklist_lock); \
48593 + rcu_read_unlock(); \
48594 + ENABLE_PREEMPT(); \
48595 + if (x == GR_DONT_AUDIT) \
48596 + gr_handle_alertkill(current)
48597 +
48598 +enum {
48599 + FLOODING,
48600 + NO_FLOODING
48601 +};
48602 +
48603 +extern char *gr_alert_log_fmt;
48604 +extern char *gr_audit_log_fmt;
48605 +extern char *gr_alert_log_buf;
48606 +extern char *gr_audit_log_buf;
48607 +
48608 +static int gr_log_start(int audit)
48609 +{
48610 + char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
48611 + char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
48612 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48613 +
48614 + if (audit == GR_DO_AUDIT)
48615 + goto set_fmt;
48616 +
48617 + if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
48618 + grsec_alert_wtime = jiffies;
48619 + grsec_alert_fyet = 0;
48620 + } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
48621 + grsec_alert_fyet++;
48622 + } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
48623 + grsec_alert_wtime = jiffies;
48624 + grsec_alert_fyet++;
48625 + printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
48626 + return FLOODING;
48627 + } else return FLOODING;
48628 +
48629 +set_fmt:
48630 + memset(buf, 0, PAGE_SIZE);
48631 + if (current->signal->curr_ip && gr_acl_is_enabled()) {
48632 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
48633 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
48634 + } else if (current->signal->curr_ip) {
48635 + sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
48636 + snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
48637 + } else if (gr_acl_is_enabled()) {
48638 + sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
48639 + snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
48640 + } else {
48641 + sprintf(fmt, "%s%s", loglevel, "grsec: ");
48642 + strcpy(buf, fmt);
48643 + }
48644 +
48645 + return NO_FLOODING;
48646 +}
48647 +
48648 +static void gr_log_middle(int audit, const char *msg, va_list ap)
48649 + __attribute__ ((format (printf, 2, 0)));
48650 +
48651 +static void gr_log_middle(int audit, const char *msg, va_list ap)
48652 +{
48653 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48654 + unsigned int len = strlen(buf);
48655 +
48656 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
48657 +
48658 + return;
48659 +}
48660 +
48661 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
48662 + __attribute__ ((format (printf, 2, 3)));
48663 +
48664 +static void gr_log_middle_varargs(int audit, const char *msg, ...)
48665 +{
48666 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48667 + unsigned int len = strlen(buf);
48668 + va_list ap;
48669 +
48670 + va_start(ap, msg);
48671 + vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
48672 + va_end(ap);
48673 +
48674 + return;
48675 +}
48676 +
48677 +static void gr_log_end(int audit)
48678 +{
48679 + char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
48680 + unsigned int len = strlen(buf);
48681 +
48682 + snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
48683 + printk("%s\n", buf);
48684 +
48685 + return;
48686 +}
48687 +
48688 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
48689 +{
48690 + int logtype;
48691 + char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
48692 + char *str1 = NULL, *str2 = NULL, *str3 = NULL;
48693 + void *voidptr = NULL;
48694 + int num1 = 0, num2 = 0;
48695 + unsigned long ulong1 = 0, ulong2 = 0;
48696 + struct dentry *dentry = NULL;
48697 + struct vfsmount *mnt = NULL;
48698 + struct file *file = NULL;
48699 + struct task_struct *task = NULL;
48700 + const struct cred *cred, *pcred;
48701 + va_list ap;
48702 +
48703 + BEGIN_LOCKS(audit);
48704 + logtype = gr_log_start(audit);
48705 + if (logtype == FLOODING) {
48706 + END_LOCKS(audit);
48707 + return;
48708 + }
48709 + va_start(ap, argtypes);
48710 + switch (argtypes) {
48711 + case GR_TTYSNIFF:
48712 + task = va_arg(ap, struct task_struct *);
48713 + gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
48714 + break;
48715 + case GR_SYSCTL_HIDDEN:
48716 + str1 = va_arg(ap, char *);
48717 + gr_log_middle_varargs(audit, msg, result, str1);
48718 + break;
48719 + case GR_RBAC:
48720 + dentry = va_arg(ap, struct dentry *);
48721 + mnt = va_arg(ap, struct vfsmount *);
48722 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
48723 + break;
48724 + case GR_RBAC_STR:
48725 + dentry = va_arg(ap, struct dentry *);
48726 + mnt = va_arg(ap, struct vfsmount *);
48727 + str1 = va_arg(ap, char *);
48728 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
48729 + break;
48730 + case GR_STR_RBAC:
48731 + str1 = va_arg(ap, char *);
48732 + dentry = va_arg(ap, struct dentry *);
48733 + mnt = va_arg(ap, struct vfsmount *);
48734 + gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
48735 + break;
48736 + case GR_RBAC_MODE2:
48737 + dentry = va_arg(ap, struct dentry *);
48738 + mnt = va_arg(ap, struct vfsmount *);
48739 + str1 = va_arg(ap, char *);
48740 + str2 = va_arg(ap, char *);
48741 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
48742 + break;
48743 + case GR_RBAC_MODE3:
48744 + dentry = va_arg(ap, struct dentry *);
48745 + mnt = va_arg(ap, struct vfsmount *);
48746 + str1 = va_arg(ap, char *);
48747 + str2 = va_arg(ap, char *);
48748 + str3 = va_arg(ap, char *);
48749 + gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
48750 + break;
48751 + case GR_FILENAME:
48752 + dentry = va_arg(ap, struct dentry *);
48753 + mnt = va_arg(ap, struct vfsmount *);
48754 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
48755 + break;
48756 + case GR_STR_FILENAME:
48757 + str1 = va_arg(ap, char *);
48758 + dentry = va_arg(ap, struct dentry *);
48759 + mnt = va_arg(ap, struct vfsmount *);
48760 + gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
48761 + break;
48762 + case GR_FILENAME_STR:
48763 + dentry = va_arg(ap, struct dentry *);
48764 + mnt = va_arg(ap, struct vfsmount *);
48765 + str1 = va_arg(ap, char *);
48766 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
48767 + break;
48768 + case GR_FILENAME_TWO_INT:
48769 + dentry = va_arg(ap, struct dentry *);
48770 + mnt = va_arg(ap, struct vfsmount *);
48771 + num1 = va_arg(ap, int);
48772 + num2 = va_arg(ap, int);
48773 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
48774 + break;
48775 + case GR_FILENAME_TWO_INT_STR:
48776 + dentry = va_arg(ap, struct dentry *);
48777 + mnt = va_arg(ap, struct vfsmount *);
48778 + num1 = va_arg(ap, int);
48779 + num2 = va_arg(ap, int);
48780 + str1 = va_arg(ap, char *);
48781 + gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
48782 + break;
48783 + case GR_TEXTREL:
48784 + file = va_arg(ap, struct file *);
48785 + ulong1 = va_arg(ap, unsigned long);
48786 + ulong2 = va_arg(ap, unsigned long);
48787 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
48788 + break;
48789 + case GR_PTRACE:
48790 + task = va_arg(ap, struct task_struct *);
48791 + gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
48792 + break;
48793 + case GR_RESOURCE:
48794 + task = va_arg(ap, struct task_struct *);
48795 + cred = __task_cred(task);
48796 + pcred = __task_cred(task->real_parent);
48797 + ulong1 = va_arg(ap, unsigned long);
48798 + str1 = va_arg(ap, char *);
48799 + ulong2 = va_arg(ap, unsigned long);
48800 + gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48801 + break;
48802 + case GR_CAP:
48803 + task = va_arg(ap, struct task_struct *);
48804 + cred = __task_cred(task);
48805 + pcred = __task_cred(task->real_parent);
48806 + str1 = va_arg(ap, char *);
48807 + gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48808 + break;
48809 + case GR_SIG:
48810 + str1 = va_arg(ap, char *);
48811 + voidptr = va_arg(ap, void *);
48812 + gr_log_middle_varargs(audit, msg, str1, voidptr);
48813 + break;
48814 + case GR_SIG2:
48815 + task = va_arg(ap, struct task_struct *);
48816 + cred = __task_cred(task);
48817 + pcred = __task_cred(task->real_parent);
48818 + num1 = va_arg(ap, int);
48819 + gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48820 + break;
48821 + case GR_CRASH1:
48822 + task = va_arg(ap, struct task_struct *);
48823 + cred = __task_cred(task);
48824 + pcred = __task_cred(task->real_parent);
48825 + ulong1 = va_arg(ap, unsigned long);
48826 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
48827 + break;
48828 + case GR_CRASH2:
48829 + task = va_arg(ap, struct task_struct *);
48830 + cred = __task_cred(task);
48831 + pcred = __task_cred(task->real_parent);
48832 + ulong1 = va_arg(ap, unsigned long);
48833 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
48834 + break;
48835 + case GR_RWXMAP:
48836 + file = va_arg(ap, struct file *);
48837 + gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
48838 + break;
48839 + case GR_PSACCT:
48840 + {
48841 + unsigned int wday, cday;
48842 + __u8 whr, chr;
48843 + __u8 wmin, cmin;
48844 + __u8 wsec, csec;
48845 + char cur_tty[64] = { 0 };
48846 + char parent_tty[64] = { 0 };
48847 +
48848 + task = va_arg(ap, struct task_struct *);
48849 + wday = va_arg(ap, unsigned int);
48850 + cday = va_arg(ap, unsigned int);
48851 + whr = va_arg(ap, int);
48852 + chr = va_arg(ap, int);
48853 + wmin = va_arg(ap, int);
48854 + cmin = va_arg(ap, int);
48855 + wsec = va_arg(ap, int);
48856 + csec = va_arg(ap, int);
48857 + ulong1 = va_arg(ap, unsigned long);
48858 + cred = __task_cred(task);
48859 + pcred = __task_cred(task->real_parent);
48860 +
48861 + gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
48862 + }
48863 + break;
48864 + default:
48865 + gr_log_middle(audit, msg, ap);
48866 + }
48867 + va_end(ap);
48868 + gr_log_end(audit);
48869 + END_LOCKS(audit);
48870 +}
48871 diff -urNp linux-2.6.39.4/grsecurity/grsec_mem.c linux-2.6.39.4/grsecurity/grsec_mem.c
48872 --- linux-2.6.39.4/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
48873 +++ linux-2.6.39.4/grsecurity/grsec_mem.c 2011-08-05 19:44:37.000000000 -0400
48874 @@ -0,0 +1,33 @@
48875 +#include <linux/kernel.h>
48876 +#include <linux/sched.h>
48877 +#include <linux/mm.h>
48878 +#include <linux/mman.h>
48879 +#include <linux/grinternal.h>
48880 +
48881 +void
48882 +gr_handle_ioperm(void)
48883 +{
48884 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
48885 + return;
48886 +}
48887 +
48888 +void
48889 +gr_handle_iopl(void)
48890 +{
48891 + gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
48892 + return;
48893 +}
48894 +
48895 +void
48896 +gr_handle_mem_readwrite(u64 from, u64 to)
48897 +{
48898 + gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
48899 + return;
48900 +}
48901 +
48902 +void
48903 +gr_handle_vm86(void)
48904 +{
48905 + gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
48906 + return;
48907 +}
48908 diff -urNp linux-2.6.39.4/grsecurity/grsec_mount.c linux-2.6.39.4/grsecurity/grsec_mount.c
48909 --- linux-2.6.39.4/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
48910 +++ linux-2.6.39.4/grsecurity/grsec_mount.c 2011-08-05 19:44:37.000000000 -0400
48911 @@ -0,0 +1,62 @@
48912 +#include <linux/kernel.h>
48913 +#include <linux/sched.h>
48914 +#include <linux/mount.h>
48915 +#include <linux/grsecurity.h>
48916 +#include <linux/grinternal.h>
48917 +
48918 +void
48919 +gr_log_remount(const char *devname, const int retval)
48920 +{
48921 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48922 + if (grsec_enable_mount && (retval >= 0))
48923 + gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
48924 +#endif
48925 + return;
48926 +}
48927 +
48928 +void
48929 +gr_log_unmount(const char *devname, const int retval)
48930 +{
48931 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48932 + if (grsec_enable_mount && (retval >= 0))
48933 + gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
48934 +#endif
48935 + return;
48936 +}
48937 +
48938 +void
48939 +gr_log_mount(const char *from, const char *to, const int retval)
48940 +{
48941 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
48942 + if (grsec_enable_mount && (retval >= 0))
48943 + gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
48944 +#endif
48945 + return;
48946 +}
48947 +
48948 +int
48949 +gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
48950 +{
48951 +#ifdef CONFIG_GRKERNSEC_ROFS
48952 + if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
48953 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
48954 + return -EPERM;
48955 + } else
48956 + return 0;
48957 +#endif
48958 + return 0;
48959 +}
48960 +
48961 +int
48962 +gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
48963 +{
48964 +#ifdef CONFIG_GRKERNSEC_ROFS
48965 + if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
48966 + dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
48967 + gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
48968 + return -EPERM;
48969 + } else
48970 + return 0;
48971 +#endif
48972 + return 0;
48973 +}
48974 diff -urNp linux-2.6.39.4/grsecurity/grsec_pax.c linux-2.6.39.4/grsecurity/grsec_pax.c
48975 --- linux-2.6.39.4/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
48976 +++ linux-2.6.39.4/grsecurity/grsec_pax.c 2011-08-05 19:44:37.000000000 -0400
48977 @@ -0,0 +1,36 @@
48978 +#include <linux/kernel.h>
48979 +#include <linux/sched.h>
48980 +#include <linux/mm.h>
48981 +#include <linux/file.h>
48982 +#include <linux/grinternal.h>
48983 +#include <linux/grsecurity.h>
48984 +
48985 +void
48986 +gr_log_textrel(struct vm_area_struct * vma)
48987 +{
48988 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
48989 + if (grsec_enable_audit_textrel)
48990 + gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
48991 +#endif
48992 + return;
48993 +}
48994 +
48995 +void
48996 +gr_log_rwxmmap(struct file *file)
48997 +{
48998 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
48999 + if (grsec_enable_log_rwxmaps)
49000 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
49001 +#endif
49002 + return;
49003 +}
49004 +
49005 +void
49006 +gr_log_rwxmprotect(struct file *file)
49007 +{
49008 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
49009 + if (grsec_enable_log_rwxmaps)
49010 + gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
49011 +#endif
49012 + return;
49013 +}
49014 diff -urNp linux-2.6.39.4/grsecurity/grsec_ptrace.c linux-2.6.39.4/grsecurity/grsec_ptrace.c
49015 --- linux-2.6.39.4/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
49016 +++ linux-2.6.39.4/grsecurity/grsec_ptrace.c 2011-08-05 19:44:37.000000000 -0400
49017 @@ -0,0 +1,14 @@
49018 +#include <linux/kernel.h>
49019 +#include <linux/sched.h>
49020 +#include <linux/grinternal.h>
49021 +#include <linux/grsecurity.h>
49022 +
49023 +void
49024 +gr_audit_ptrace(struct task_struct *task)
49025 +{
49026 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
49027 + if (grsec_enable_audit_ptrace)
49028 + gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
49029 +#endif
49030 + return;
49031 +}
49032 diff -urNp linux-2.6.39.4/grsecurity/grsec_sig.c linux-2.6.39.4/grsecurity/grsec_sig.c
49033 --- linux-2.6.39.4/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
49034 +++ linux-2.6.39.4/grsecurity/grsec_sig.c 2011-08-05 19:44:37.000000000 -0400
49035 @@ -0,0 +1,206 @@
49036 +#include <linux/kernel.h>
49037 +#include <linux/sched.h>
49038 +#include <linux/delay.h>
49039 +#include <linux/grsecurity.h>
49040 +#include <linux/grinternal.h>
49041 +#include <linux/hardirq.h>
49042 +
49043 +char *signames[] = {
49044 + [SIGSEGV] = "Segmentation fault",
49045 + [SIGILL] = "Illegal instruction",
49046 + [SIGABRT] = "Abort",
49047 + [SIGBUS] = "Invalid alignment/Bus error"
49048 +};
49049 +
49050 +void
49051 +gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
49052 +{
49053 +#ifdef CONFIG_GRKERNSEC_SIGNAL
49054 + if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
49055 + (sig == SIGABRT) || (sig == SIGBUS))) {
49056 + if (t->pid == current->pid) {
49057 + gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
49058 + } else {
49059 + gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
49060 + }
49061 + }
49062 +#endif
49063 + return;
49064 +}
49065 +
49066 +int
49067 +gr_handle_signal(const struct task_struct *p, const int sig)
49068 +{
49069 +#ifdef CONFIG_GRKERNSEC
49070 + if (current->pid > 1 && gr_check_protected_task(p)) {
49071 + gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
49072 + return -EPERM;
49073 + } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
49074 + return -EPERM;
49075 + }
49076 +#endif
49077 + return 0;
49078 +}
49079 +
49080 +#ifdef CONFIG_GRKERNSEC
49081 +extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
49082 +
49083 +int gr_fake_force_sig(int sig, struct task_struct *t)
49084 +{
49085 + unsigned long int flags;
49086 + int ret, blocked, ignored;
49087 + struct k_sigaction *action;
49088 +
49089 + spin_lock_irqsave(&t->sighand->siglock, flags);
49090 + action = &t->sighand->action[sig-1];
49091 + ignored = action->sa.sa_handler == SIG_IGN;
49092 + blocked = sigismember(&t->blocked, sig);
49093 + if (blocked || ignored) {
49094 + action->sa.sa_handler = SIG_DFL;
49095 + if (blocked) {
49096 + sigdelset(&t->blocked, sig);
49097 + recalc_sigpending_and_wake(t);
49098 + }
49099 + }
49100 + if (action->sa.sa_handler == SIG_DFL)
49101 + t->signal->flags &= ~SIGNAL_UNKILLABLE;
49102 + ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
49103 +
49104 + spin_unlock_irqrestore(&t->sighand->siglock, flags);
49105 +
49106 + return ret;
49107 +}
49108 +#endif
49109 +
49110 +#ifdef CONFIG_GRKERNSEC_BRUTE
49111 +#define GR_USER_BAN_TIME (15 * 60)
49112 +
49113 +static int __get_dumpable(unsigned long mm_flags)
49114 +{
49115 + int ret;
49116 +
49117 + ret = mm_flags & MMF_DUMPABLE_MASK;
49118 + return (ret >= 2) ? 2 : ret;
49119 +}
49120 +#endif
49121 +
49122 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
49123 +{
49124 +#ifdef CONFIG_GRKERNSEC_BRUTE
49125 + uid_t uid = 0;
49126 +
49127 + if (!grsec_enable_brute)
49128 + return;
49129 +
49130 + rcu_read_lock();
49131 + read_lock(&tasklist_lock);
49132 + read_lock(&grsec_exec_file_lock);
49133 + if (p->real_parent && p->real_parent->exec_file == p->exec_file)
49134 + p->real_parent->brute = 1;
49135 + else {
49136 + const struct cred *cred = __task_cred(p), *cred2;
49137 + struct task_struct *tsk, *tsk2;
49138 +
49139 + if (!__get_dumpable(mm_flags) && cred->uid) {
49140 + struct user_struct *user;
49141 +
49142 + uid = cred->uid;
49143 +
49144 + /* this is put upon execution past expiration */
49145 + user = find_user(uid);
49146 + if (user == NULL)
49147 + goto unlock;
49148 + user->banned = 1;
49149 + user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
49150 + if (user->ban_expires == ~0UL)
49151 + user->ban_expires--;
49152 +
49153 + do_each_thread(tsk2, tsk) {
49154 + cred2 = __task_cred(tsk);
49155 + if (tsk != p && cred2->uid == uid)
49156 + gr_fake_force_sig(SIGKILL, tsk);
49157 + } while_each_thread(tsk2, tsk);
49158 + }
49159 + }
49160 +unlock:
49161 + read_unlock(&grsec_exec_file_lock);
49162 + read_unlock(&tasklist_lock);
49163 + rcu_read_unlock();
49164 +
49165 + if (uid)
49166 + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
49167 +
49168 +#endif
49169 + return;
49170 +}
49171 +
49172 +void gr_handle_brute_check(void)
49173 +{
49174 +#ifdef CONFIG_GRKERNSEC_BRUTE
49175 + if (current->brute)
49176 + msleep(30 * 1000);
49177 +#endif
49178 + return;
49179 +}
49180 +
49181 +void gr_handle_kernel_exploit(void)
49182 +{
49183 +#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
49184 + const struct cred *cred;
49185 + struct task_struct *tsk, *tsk2;
49186 + struct user_struct *user;
49187 + uid_t uid;
49188 +
49189 + if (in_irq() || in_serving_softirq() || in_nmi())
49190 + panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
49191 +
49192 + uid = current_uid();
49193 +
49194 + if (uid == 0)
49195 + panic("grsec: halting the system due to suspicious kernel crash caused by root");
49196 + else {
49197 + /* kill all the processes of this user, hold a reference
49198 + to their creds struct, and prevent them from creating
49199 + another process until system reset
49200 + */
49201 + printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
49202 + /* we intentionally leak this ref */
49203 + user = get_uid(current->cred->user);
49204 + if (user) {
49205 + user->banned = 1;
49206 + user->ban_expires = ~0UL;
49207 + }
49208 +
49209 + read_lock(&tasklist_lock);
49210 + do_each_thread(tsk2, tsk) {
49211 + cred = __task_cred(tsk);
49212 + if (cred->uid == uid)
49213 + gr_fake_force_sig(SIGKILL, tsk);
49214 + } while_each_thread(tsk2, tsk);
49215 + read_unlock(&tasklist_lock);
49216 + }
49217 +#endif
49218 +}
49219 +
49220 +int __gr_process_user_ban(struct user_struct *user)
49221 +{
49222 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49223 + if (unlikely(user->banned)) {
49224 + if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
49225 + user->banned = 0;
49226 + user->ban_expires = 0;
49227 + free_uid(user);
49228 + } else
49229 + return -EPERM;
49230 + }
49231 +#endif
49232 + return 0;
49233 +}
49234 +
49235 +int gr_process_user_ban(void)
49236 +{
49237 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49238 + return __gr_process_user_ban(current->cred->user);
49239 +#endif
49240 + return 0;
49241 +}
49242 diff -urNp linux-2.6.39.4/grsecurity/grsec_sock.c linux-2.6.39.4/grsecurity/grsec_sock.c
49243 --- linux-2.6.39.4/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
49244 +++ linux-2.6.39.4/grsecurity/grsec_sock.c 2011-08-05 19:44:37.000000000 -0400
49245 @@ -0,0 +1,244 @@
49246 +#include <linux/kernel.h>
49247 +#include <linux/module.h>
49248 +#include <linux/sched.h>
49249 +#include <linux/file.h>
49250 +#include <linux/net.h>
49251 +#include <linux/in.h>
49252 +#include <linux/ip.h>
49253 +#include <net/sock.h>
49254 +#include <net/inet_sock.h>
49255 +#include <linux/grsecurity.h>
49256 +#include <linux/grinternal.h>
49257 +#include <linux/gracl.h>
49258 +
49259 +extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
49260 +extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
49261 +
49262 +EXPORT_SYMBOL(gr_search_udp_recvmsg);
49263 +EXPORT_SYMBOL(gr_search_udp_sendmsg);
49264 +
49265 +#ifdef CONFIG_UNIX_MODULE
49266 +EXPORT_SYMBOL(gr_acl_handle_unix);
49267 +EXPORT_SYMBOL(gr_acl_handle_mknod);
49268 +EXPORT_SYMBOL(gr_handle_chroot_unix);
49269 +EXPORT_SYMBOL(gr_handle_create);
49270 +#endif
49271 +
49272 +#ifdef CONFIG_GRKERNSEC
49273 +#define gr_conn_table_size 32749
49274 +struct conn_table_entry {
49275 + struct conn_table_entry *next;
49276 + struct signal_struct *sig;
49277 +};
49278 +
49279 +struct conn_table_entry *gr_conn_table[gr_conn_table_size];
49280 +DEFINE_SPINLOCK(gr_conn_table_lock);
49281 +
49282 +extern const char * gr_socktype_to_name(unsigned char type);
49283 +extern const char * gr_proto_to_name(unsigned char proto);
49284 +extern const char * gr_sockfamily_to_name(unsigned char family);
49285 +
49286 +static __inline__ int
49287 +conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
49288 +{
49289 + return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
49290 +}
49291 +
49292 +static __inline__ int
49293 +conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
49294 + __u16 sport, __u16 dport)
49295 +{
49296 + if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
49297 + sig->gr_sport == sport && sig->gr_dport == dport))
49298 + return 1;
49299 + else
49300 + return 0;
49301 +}
49302 +
49303 +static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
49304 +{
49305 + struct conn_table_entry **match;
49306 + unsigned int index;
49307 +
49308 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
49309 + sig->gr_sport, sig->gr_dport,
49310 + gr_conn_table_size);
49311 +
49312 + newent->sig = sig;
49313 +
49314 + match = &gr_conn_table[index];
49315 + newent->next = *match;
49316 + *match = newent;
49317 +
49318 + return;
49319 +}
49320 +
49321 +static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
49322 +{
49323 + struct conn_table_entry *match, *last = NULL;
49324 + unsigned int index;
49325 +
49326 + index = conn_hash(sig->gr_saddr, sig->gr_daddr,
49327 + sig->gr_sport, sig->gr_dport,
49328 + gr_conn_table_size);
49329 +
49330 + match = gr_conn_table[index];
49331 + while (match && !conn_match(match->sig,
49332 + sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
49333 + sig->gr_dport)) {
49334 + last = match;
49335 + match = match->next;
49336 + }
49337 +
49338 + if (match) {
49339 + if (last)
49340 + last->next = match->next;
49341 + else
49342 + gr_conn_table[index] = NULL;
49343 + kfree(match);
49344 + }
49345 +
49346 + return;
49347 +}
49348 +
49349 +static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
49350 + __u16 sport, __u16 dport)
49351 +{
49352 + struct conn_table_entry *match;
49353 + unsigned int index;
49354 +
49355 + index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
49356 +
49357 + match = gr_conn_table[index];
49358 + while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
49359 + match = match->next;
49360 +
49361 + if (match)
49362 + return match->sig;
49363 + else
49364 + return NULL;
49365 +}
49366 +
49367 +#endif
49368 +
49369 +void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
49370 +{
49371 +#ifdef CONFIG_GRKERNSEC
49372 + struct signal_struct *sig = task->signal;
49373 + struct conn_table_entry *newent;
49374 +
49375 + newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
49376 + if (newent == NULL)
49377 + return;
49378 + /* no bh lock needed since we are called with bh disabled */
49379 + spin_lock(&gr_conn_table_lock);
49380 + gr_del_task_from_ip_table_nolock(sig);
49381 + sig->gr_saddr = inet->inet_rcv_saddr;
49382 + sig->gr_daddr = inet->inet_daddr;
49383 + sig->gr_sport = inet->inet_sport;
49384 + sig->gr_dport = inet->inet_dport;
49385 + gr_add_to_task_ip_table_nolock(sig, newent);
49386 + spin_unlock(&gr_conn_table_lock);
49387 +#endif
49388 + return;
49389 +}
49390 +
49391 +void gr_del_task_from_ip_table(struct task_struct *task)
49392 +{
49393 +#ifdef CONFIG_GRKERNSEC
49394 + spin_lock_bh(&gr_conn_table_lock);
49395 + gr_del_task_from_ip_table_nolock(task->signal);
49396 + spin_unlock_bh(&gr_conn_table_lock);
49397 +#endif
49398 + return;
49399 +}
49400 +
49401 +void
49402 +gr_attach_curr_ip(const struct sock *sk)
49403 +{
49404 +#ifdef CONFIG_GRKERNSEC
49405 + struct signal_struct *p, *set;
49406 + const struct inet_sock *inet = inet_sk(sk);
49407 +
49408 + if (unlikely(sk->sk_protocol != IPPROTO_TCP))
49409 + return;
49410 +
49411 + set = current->signal;
49412 +
49413 + spin_lock_bh(&gr_conn_table_lock);
49414 + p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
49415 + inet->inet_dport, inet->inet_sport);
49416 + if (unlikely(p != NULL)) {
49417 + set->curr_ip = p->curr_ip;
49418 + set->used_accept = 1;
49419 + gr_del_task_from_ip_table_nolock(p);
49420 + spin_unlock_bh(&gr_conn_table_lock);
49421 + return;
49422 + }
49423 + spin_unlock_bh(&gr_conn_table_lock);
49424 +
49425 + set->curr_ip = inet->inet_daddr;
49426 + set->used_accept = 1;
49427 +#endif
49428 + return;
49429 +}
49430 +
49431 +int
49432 +gr_handle_sock_all(const int family, const int type, const int protocol)
49433 +{
49434 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
49435 + if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
49436 + (family != AF_UNIX)) {
49437 + if (family == AF_INET)
49438 + gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
49439 + else
49440 + gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
49441 + return -EACCES;
49442 + }
49443 +#endif
49444 + return 0;
49445 +}
49446 +
49447 +int
49448 +gr_handle_sock_server(const struct sockaddr *sck)
49449 +{
49450 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49451 + if (grsec_enable_socket_server &&
49452 + in_group_p(grsec_socket_server_gid) &&
49453 + sck && (sck->sa_family != AF_UNIX) &&
49454 + (sck->sa_family != AF_LOCAL)) {
49455 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
49456 + return -EACCES;
49457 + }
49458 +#endif
49459 + return 0;
49460 +}
49461 +
49462 +int
49463 +gr_handle_sock_server_other(const struct sock *sck)
49464 +{
49465 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49466 + if (grsec_enable_socket_server &&
49467 + in_group_p(grsec_socket_server_gid) &&
49468 + sck && (sck->sk_family != AF_UNIX) &&
49469 + (sck->sk_family != AF_LOCAL)) {
49470 + gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
49471 + return -EACCES;
49472 + }
49473 +#endif
49474 + return 0;
49475 +}
49476 +
49477 +int
49478 +gr_handle_sock_client(const struct sockaddr *sck)
49479 +{
49480 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
49481 + if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
49482 + sck && (sck->sa_family != AF_UNIX) &&
49483 + (sck->sa_family != AF_LOCAL)) {
49484 + gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
49485 + return -EACCES;
49486 + }
49487 +#endif
49488 + return 0;
49489 +}
49490 diff -urNp linux-2.6.39.4/grsecurity/grsec_sysctl.c linux-2.6.39.4/grsecurity/grsec_sysctl.c
49491 --- linux-2.6.39.4/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
49492 +++ linux-2.6.39.4/grsecurity/grsec_sysctl.c 2011-08-05 19:44:37.000000000 -0400
49493 @@ -0,0 +1,442 @@
49494 +#include <linux/kernel.h>
49495 +#include <linux/sched.h>
49496 +#include <linux/sysctl.h>
49497 +#include <linux/grsecurity.h>
49498 +#include <linux/grinternal.h>
49499 +
49500 +int
49501 +gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
49502 +{
49503 +#ifdef CONFIG_GRKERNSEC_SYSCTL
49504 + if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
49505 + gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
49506 + return -EACCES;
49507 + }
49508 +#endif
49509 + return 0;
49510 +}
49511 +
49512 +#ifdef CONFIG_GRKERNSEC_ROFS
49513 +static int __maybe_unused one = 1;
49514 +#endif
49515 +
49516 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
49517 +struct ctl_table grsecurity_table[] = {
49518 +#ifdef CONFIG_GRKERNSEC_SYSCTL
49519 +#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
49520 +#ifdef CONFIG_GRKERNSEC_IO
49521 + {
49522 + .procname = "disable_priv_io",
49523 + .data = &grsec_disable_privio,
49524 + .maxlen = sizeof(int),
49525 + .mode = 0600,
49526 + .proc_handler = &proc_dointvec,
49527 + },
49528 +#endif
49529 +#endif
49530 +#ifdef CONFIG_GRKERNSEC_LINK
49531 + {
49532 + .procname = "linking_restrictions",
49533 + .data = &grsec_enable_link,
49534 + .maxlen = sizeof(int),
49535 + .mode = 0600,
49536 + .proc_handler = &proc_dointvec,
49537 + },
49538 +#endif
49539 +#ifdef CONFIG_GRKERNSEC_BRUTE
49540 + {
49541 + .procname = "deter_bruteforce",
49542 + .data = &grsec_enable_brute,
49543 + .maxlen = sizeof(int),
49544 + .mode = 0600,
49545 + .proc_handler = &proc_dointvec,
49546 + },
49547 +#endif
49548 +#ifdef CONFIG_GRKERNSEC_FIFO
49549 + {
49550 + .procname = "fifo_restrictions",
49551 + .data = &grsec_enable_fifo,
49552 + .maxlen = sizeof(int),
49553 + .mode = 0600,
49554 + .proc_handler = &proc_dointvec,
49555 + },
49556 +#endif
49557 +#ifdef CONFIG_GRKERNSEC_EXECVE
49558 + {
49559 + .procname = "execve_limiting",
49560 + .data = &grsec_enable_execve,
49561 + .maxlen = sizeof(int),
49562 + .mode = 0600,
49563 + .proc_handler = &proc_dointvec,
49564 + },
49565 +#endif
49566 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
49567 + {
49568 + .procname = "ip_blackhole",
49569 + .data = &grsec_enable_blackhole,
49570 + .maxlen = sizeof(int),
49571 + .mode = 0600,
49572 + .proc_handler = &proc_dointvec,
49573 + },
49574 + {
49575 + .procname = "lastack_retries",
49576 + .data = &grsec_lastack_retries,
49577 + .maxlen = sizeof(int),
49578 + .mode = 0600,
49579 + .proc_handler = &proc_dointvec,
49580 + },
49581 +#endif
49582 +#ifdef CONFIG_GRKERNSEC_EXECLOG
49583 + {
49584 + .procname = "exec_logging",
49585 + .data = &grsec_enable_execlog,
49586 + .maxlen = sizeof(int),
49587 + .mode = 0600,
49588 + .proc_handler = &proc_dointvec,
49589 + },
49590 +#endif
49591 +#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
49592 + {
49593 + .procname = "rwxmap_logging",
49594 + .data = &grsec_enable_log_rwxmaps,
49595 + .maxlen = sizeof(int),
49596 + .mode = 0600,
49597 + .proc_handler = &proc_dointvec,
49598 + },
49599 +#endif
49600 +#ifdef CONFIG_GRKERNSEC_SIGNAL
49601 + {
49602 + .procname = "signal_logging",
49603 + .data = &grsec_enable_signal,
49604 + .maxlen = sizeof(int),
49605 + .mode = 0600,
49606 + .proc_handler = &proc_dointvec,
49607 + },
49608 +#endif
49609 +#ifdef CONFIG_GRKERNSEC_FORKFAIL
49610 + {
49611 + .procname = "forkfail_logging",
49612 + .data = &grsec_enable_forkfail,
49613 + .maxlen = sizeof(int),
49614 + .mode = 0600,
49615 + .proc_handler = &proc_dointvec,
49616 + },
49617 +#endif
49618 +#ifdef CONFIG_GRKERNSEC_TIME
49619 + {
49620 + .procname = "timechange_logging",
49621 + .data = &grsec_enable_time,
49622 + .maxlen = sizeof(int),
49623 + .mode = 0600,
49624 + .proc_handler = &proc_dointvec,
49625 + },
49626 +#endif
49627 +#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
49628 + {
49629 + .procname = "chroot_deny_shmat",
49630 + .data = &grsec_enable_chroot_shmat,
49631 + .maxlen = sizeof(int),
49632 + .mode = 0600,
49633 + .proc_handler = &proc_dointvec,
49634 + },
49635 +#endif
49636 +#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
49637 + {
49638 + .procname = "chroot_deny_unix",
49639 + .data = &grsec_enable_chroot_unix,
49640 + .maxlen = sizeof(int),
49641 + .mode = 0600,
49642 + .proc_handler = &proc_dointvec,
49643 + },
49644 +#endif
49645 +#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
49646 + {
49647 + .procname = "chroot_deny_mount",
49648 + .data = &grsec_enable_chroot_mount,
49649 + .maxlen = sizeof(int),
49650 + .mode = 0600,
49651 + .proc_handler = &proc_dointvec,
49652 + },
49653 +#endif
49654 +#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
49655 + {
49656 + .procname = "chroot_deny_fchdir",
49657 + .data = &grsec_enable_chroot_fchdir,
49658 + .maxlen = sizeof(int),
49659 + .mode = 0600,
49660 + .proc_handler = &proc_dointvec,
49661 + },
49662 +#endif
49663 +#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
49664 + {
49665 + .procname = "chroot_deny_chroot",
49666 + .data = &grsec_enable_chroot_double,
49667 + .maxlen = sizeof(int),
49668 + .mode = 0600,
49669 + .proc_handler = &proc_dointvec,
49670 + },
49671 +#endif
49672 +#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
49673 + {
49674 + .procname = "chroot_deny_pivot",
49675 + .data = &grsec_enable_chroot_pivot,
49676 + .maxlen = sizeof(int),
49677 + .mode = 0600,
49678 + .proc_handler = &proc_dointvec,
49679 + },
49680 +#endif
49681 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
49682 + {
49683 + .procname = "chroot_enforce_chdir",
49684 + .data = &grsec_enable_chroot_chdir,
49685 + .maxlen = sizeof(int),
49686 + .mode = 0600,
49687 + .proc_handler = &proc_dointvec,
49688 + },
49689 +#endif
49690 +#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
49691 + {
49692 + .procname = "chroot_deny_chmod",
49693 + .data = &grsec_enable_chroot_chmod,
49694 + .maxlen = sizeof(int),
49695 + .mode = 0600,
49696 + .proc_handler = &proc_dointvec,
49697 + },
49698 +#endif
49699 +#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
49700 + {
49701 + .procname = "chroot_deny_mknod",
49702 + .data = &grsec_enable_chroot_mknod,
49703 + .maxlen = sizeof(int),
49704 + .mode = 0600,
49705 + .proc_handler = &proc_dointvec,
49706 + },
49707 +#endif
49708 +#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
49709 + {
49710 + .procname = "chroot_restrict_nice",
49711 + .data = &grsec_enable_chroot_nice,
49712 + .maxlen = sizeof(int),
49713 + .mode = 0600,
49714 + .proc_handler = &proc_dointvec,
49715 + },
49716 +#endif
49717 +#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
49718 + {
49719 + .procname = "chroot_execlog",
49720 + .data = &grsec_enable_chroot_execlog,
49721 + .maxlen = sizeof(int),
49722 + .mode = 0600,
49723 + .proc_handler = &proc_dointvec,
49724 + },
49725 +#endif
49726 +#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
49727 + {
49728 + .procname = "chroot_caps",
49729 + .data = &grsec_enable_chroot_caps,
49730 + .maxlen = sizeof(int),
49731 + .mode = 0600,
49732 + .proc_handler = &proc_dointvec,
49733 + },
49734 +#endif
49735 +#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
49736 + {
49737 + .procname = "chroot_deny_sysctl",
49738 + .data = &grsec_enable_chroot_sysctl,
49739 + .maxlen = sizeof(int),
49740 + .mode = 0600,
49741 + .proc_handler = &proc_dointvec,
49742 + },
49743 +#endif
49744 +#ifdef CONFIG_GRKERNSEC_TPE
49745 + {
49746 + .procname = "tpe",
49747 + .data = &grsec_enable_tpe,
49748 + .maxlen = sizeof(int),
49749 + .mode = 0600,
49750 + .proc_handler = &proc_dointvec,
49751 + },
49752 + {
49753 + .procname = "tpe_gid",
49754 + .data = &grsec_tpe_gid,
49755 + .maxlen = sizeof(int),
49756 + .mode = 0600,
49757 + .proc_handler = &proc_dointvec,
49758 + },
49759 +#endif
49760 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
49761 + {
49762 + .procname = "tpe_invert",
49763 + .data = &grsec_enable_tpe_invert,
49764 + .maxlen = sizeof(int),
49765 + .mode = 0600,
49766 + .proc_handler = &proc_dointvec,
49767 + },
49768 +#endif
49769 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
49770 + {
49771 + .procname = "tpe_restrict_all",
49772 + .data = &grsec_enable_tpe_all,
49773 + .maxlen = sizeof(int),
49774 + .mode = 0600,
49775 + .proc_handler = &proc_dointvec,
49776 + },
49777 +#endif
49778 +#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
49779 + {
49780 + .procname = "socket_all",
49781 + .data = &grsec_enable_socket_all,
49782 + .maxlen = sizeof(int),
49783 + .mode = 0600,
49784 + .proc_handler = &proc_dointvec,
49785 + },
49786 + {
49787 + .procname = "socket_all_gid",
49788 + .data = &grsec_socket_all_gid,
49789 + .maxlen = sizeof(int),
49790 + .mode = 0600,
49791 + .proc_handler = &proc_dointvec,
49792 + },
49793 +#endif
49794 +#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
49795 + {
49796 + .procname = "socket_client",
49797 + .data = &grsec_enable_socket_client,
49798 + .maxlen = sizeof(int),
49799 + .mode = 0600,
49800 + .proc_handler = &proc_dointvec,
49801 + },
49802 + {
49803 + .procname = "socket_client_gid",
49804 + .data = &grsec_socket_client_gid,
49805 + .maxlen = sizeof(int),
49806 + .mode = 0600,
49807 + .proc_handler = &proc_dointvec,
49808 + },
49809 +#endif
49810 +#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
49811 + {
49812 + .procname = "socket_server",
49813 + .data = &grsec_enable_socket_server,
49814 + .maxlen = sizeof(int),
49815 + .mode = 0600,
49816 + .proc_handler = &proc_dointvec,
49817 + },
49818 + {
49819 + .procname = "socket_server_gid",
49820 + .data = &grsec_socket_server_gid,
49821 + .maxlen = sizeof(int),
49822 + .mode = 0600,
49823 + .proc_handler = &proc_dointvec,
49824 + },
49825 +#endif
49826 +#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
49827 + {
49828 + .procname = "audit_group",
49829 + .data = &grsec_enable_group,
49830 + .maxlen = sizeof(int),
49831 + .mode = 0600,
49832 + .proc_handler = &proc_dointvec,
49833 + },
49834 + {
49835 + .procname = "audit_gid",
49836 + .data = &grsec_audit_gid,
49837 + .maxlen = sizeof(int),
49838 + .mode = 0600,
49839 + .proc_handler = &proc_dointvec,
49840 + },
49841 +#endif
49842 +#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
49843 + {
49844 + .procname = "audit_chdir",
49845 + .data = &grsec_enable_chdir,
49846 + .maxlen = sizeof(int),
49847 + .mode = 0600,
49848 + .proc_handler = &proc_dointvec,
49849 + },
49850 +#endif
49851 +#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
49852 + {
49853 + .procname = "audit_mount",
49854 + .data = &grsec_enable_mount,
49855 + .maxlen = sizeof(int),
49856 + .mode = 0600,
49857 + .proc_handler = &proc_dointvec,
49858 + },
49859 +#endif
49860 +#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
49861 + {
49862 + .procname = "audit_textrel",
49863 + .data = &grsec_enable_audit_textrel,
49864 + .maxlen = sizeof(int),
49865 + .mode = 0600,
49866 + .proc_handler = &proc_dointvec,
49867 + },
49868 +#endif
49869 +#ifdef CONFIG_GRKERNSEC_DMESG
49870 + {
49871 + .procname = "dmesg",
49872 + .data = &grsec_enable_dmesg,
49873 + .maxlen = sizeof(int),
49874 + .mode = 0600,
49875 + .proc_handler = &proc_dointvec,
49876 + },
49877 +#endif
49878 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
49879 + {
49880 + .procname = "chroot_findtask",
49881 + .data = &grsec_enable_chroot_findtask,
49882 + .maxlen = sizeof(int),
49883 + .mode = 0600,
49884 + .proc_handler = &proc_dointvec,
49885 + },
49886 +#endif
49887 +#ifdef CONFIG_GRKERNSEC_RESLOG
49888 + {
49889 + .procname = "resource_logging",
49890 + .data = &grsec_resource_logging,
49891 + .maxlen = sizeof(int),
49892 + .mode = 0600,
49893 + .proc_handler = &proc_dointvec,
49894 + },
49895 +#endif
49896 +#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
49897 + {
49898 + .procname = "audit_ptrace",
49899 + .data = &grsec_enable_audit_ptrace,
49900 + .maxlen = sizeof(int),
49901 + .mode = 0600,
49902 + .proc_handler = &proc_dointvec,
49903 + },
49904 +#endif
49905 +#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
49906 + {
49907 + .procname = "harden_ptrace",
49908 + .data = &grsec_enable_harden_ptrace,
49909 + .maxlen = sizeof(int),
49910 + .mode = 0600,
49911 + .proc_handler = &proc_dointvec,
49912 + },
49913 +#endif
49914 + {
49915 + .procname = "grsec_lock",
49916 + .data = &grsec_lock,
49917 + .maxlen = sizeof(int),
49918 + .mode = 0600,
49919 + .proc_handler = &proc_dointvec,
49920 + },
49921 +#endif
49922 +#ifdef CONFIG_GRKERNSEC_ROFS
49923 + {
49924 + .procname = "romount_protect",
49925 + .data = &grsec_enable_rofs,
49926 + .maxlen = sizeof(int),
49927 + .mode = 0600,
49928 + .proc_handler = &proc_dointvec_minmax,
49929 + .extra1 = &one,
49930 + .extra2 = &one,
49931 + },
49932 +#endif
49933 + { }
49934 +};
49935 +#endif
49936 diff -urNp linux-2.6.39.4/grsecurity/grsec_time.c linux-2.6.39.4/grsecurity/grsec_time.c
49937 --- linux-2.6.39.4/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
49938 +++ linux-2.6.39.4/grsecurity/grsec_time.c 2011-08-05 19:44:37.000000000 -0400
49939 @@ -0,0 +1,16 @@
49940 +#include <linux/kernel.h>
49941 +#include <linux/sched.h>
49942 +#include <linux/grinternal.h>
49943 +#include <linux/module.h>
49944 +
49945 +void
49946 +gr_log_timechange(void)
49947 +{
49948 +#ifdef CONFIG_GRKERNSEC_TIME
49949 + if (grsec_enable_time)
49950 + gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
49951 +#endif
49952 + return;
49953 +}
49954 +
49955 +EXPORT_SYMBOL(gr_log_timechange);
49956 diff -urNp linux-2.6.39.4/grsecurity/grsec_tpe.c linux-2.6.39.4/grsecurity/grsec_tpe.c
49957 --- linux-2.6.39.4/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
49958 +++ linux-2.6.39.4/grsecurity/grsec_tpe.c 2011-08-05 19:44:37.000000000 -0400
49959 @@ -0,0 +1,39 @@
49960 +#include <linux/kernel.h>
49961 +#include <linux/sched.h>
49962 +#include <linux/file.h>
49963 +#include <linux/fs.h>
49964 +#include <linux/grinternal.h>
49965 +
49966 +extern int gr_acl_tpe_check(void);
49967 +
49968 +int
49969 +gr_tpe_allow(const struct file *file)
49970 +{
49971 +#ifdef CONFIG_GRKERNSEC
49972 + struct inode *inode = file->f_path.dentry->d_parent->d_inode;
49973 + const struct cred *cred = current_cred();
49974 +
49975 + if (cred->uid && ((grsec_enable_tpe &&
49976 +#ifdef CONFIG_GRKERNSEC_TPE_INVERT
49977 + ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
49978 + (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
49979 +#else
49980 + in_group_p(grsec_tpe_gid)
49981 +#endif
49982 + ) || gr_acl_tpe_check()) &&
49983 + (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
49984 + (inode->i_mode & S_IWOTH))))) {
49985 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
49986 + return 0;
49987 + }
49988 +#ifdef CONFIG_GRKERNSEC_TPE_ALL
49989 + if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
49990 + ((inode->i_uid && (inode->i_uid != cred->uid)) ||
49991 + (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
49992 + gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
49993 + return 0;
49994 + }
49995 +#endif
49996 +#endif
49997 + return 1;
49998 +}
49999 diff -urNp linux-2.6.39.4/grsecurity/grsum.c linux-2.6.39.4/grsecurity/grsum.c
50000 --- linux-2.6.39.4/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
50001 +++ linux-2.6.39.4/grsecurity/grsum.c 2011-08-05 19:44:37.000000000 -0400
50002 @@ -0,0 +1,61 @@
50003 +#include <linux/err.h>
50004 +#include <linux/kernel.h>
50005 +#include <linux/sched.h>
50006 +#include <linux/mm.h>
50007 +#include <linux/scatterlist.h>
50008 +#include <linux/crypto.h>
50009 +#include <linux/gracl.h>
50010 +
50011 +
50012 +#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
50013 +#error "crypto and sha256 must be built into the kernel"
50014 +#endif
50015 +
50016 +int
50017 +chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
50018 +{
50019 + char *p;
50020 + struct crypto_hash *tfm;
50021 + struct hash_desc desc;
50022 + struct scatterlist sg;
50023 + unsigned char temp_sum[GR_SHA_LEN];
50024 + volatile int retval = 0;
50025 + volatile int dummy = 0;
50026 + unsigned int i;
50027 +
50028 + sg_init_table(&sg, 1);
50029 +
50030 + tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
50031 + if (IS_ERR(tfm)) {
50032 + /* should never happen, since sha256 should be built in */
50033 + return 1;
50034 + }
50035 +
50036 + desc.tfm = tfm;
50037 + desc.flags = 0;
50038 +
50039 + crypto_hash_init(&desc);
50040 +
50041 + p = salt;
50042 + sg_set_buf(&sg, p, GR_SALT_LEN);
50043 + crypto_hash_update(&desc, &sg, sg.length);
50044 +
50045 + p = entry->pw;
50046 + sg_set_buf(&sg, p, strlen(p));
50047 +
50048 + crypto_hash_update(&desc, &sg, sg.length);
50049 +
50050 + crypto_hash_final(&desc, temp_sum);
50051 +
50052 + memset(entry->pw, 0, GR_PW_LEN);
50053 +
50054 + for (i = 0; i < GR_SHA_LEN; i++)
50055 + if (sum[i] != temp_sum[i])
50056 + retval = 1;
50057 + else
50058 + dummy = 1; // waste a cycle
50059 +
50060 + crypto_free_hash(tfm);
50061 +
50062 + return retval;
50063 +}
50064 diff -urNp linux-2.6.39.4/grsecurity/Kconfig linux-2.6.39.4/grsecurity/Kconfig
50065 --- linux-2.6.39.4/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
50066 +++ linux-2.6.39.4/grsecurity/Kconfig 2011-08-17 19:04:52.000000000 -0400
50067 @@ -0,0 +1,1050 @@
50068 +#
50069 +# grecurity configuration
50070 +#
50071 +
50072 +menu "Grsecurity"
50073 +
50074 +config GRKERNSEC
50075 + bool "Grsecurity"
50076 + select CRYPTO
50077 + select CRYPTO_SHA256
50078 + help
50079 + If you say Y here, you will be able to configure many features
50080 + that will enhance the security of your system. It is highly
50081 + recommended that you say Y here and read through the help
50082 + for each option so that you fully understand the features and
50083 + can evaluate their usefulness for your machine.
50084 +
50085 +choice
50086 + prompt "Security Level"
50087 + depends on GRKERNSEC
50088 + default GRKERNSEC_CUSTOM
50089 +
50090 +config GRKERNSEC_LOW
50091 + bool "Low"
50092 + select GRKERNSEC_LINK
50093 + select GRKERNSEC_FIFO
50094 + select GRKERNSEC_EXECVE
50095 + select GRKERNSEC_RANDNET
50096 + select GRKERNSEC_DMESG
50097 + select GRKERNSEC_CHROOT
50098 + select GRKERNSEC_CHROOT_CHDIR
50099 +
50100 + help
50101 + If you choose this option, several of the grsecurity options will
50102 + be enabled that will give you greater protection against a number
50103 + of attacks, while assuring that none of your software will have any
50104 + conflicts with the additional security measures. If you run a lot
50105 + of unusual software, or you are having problems with the higher
50106 + security levels, you should say Y here. With this option, the
50107 + following features are enabled:
50108 +
50109 + - Linking restrictions
50110 + - FIFO restrictions
50111 + - Enforcing RLIMIT_NPROC on execve
50112 + - Restricted dmesg
50113 + - Enforced chdir("/") on chroot
50114 + - Runtime module disabling
50115 +
50116 +config GRKERNSEC_MEDIUM
50117 + bool "Medium"
50118 + select PAX
50119 + select PAX_EI_PAX
50120 + select PAX_PT_PAX_FLAGS
50121 + select PAX_HAVE_ACL_FLAGS
50122 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
50123 + select GRKERNSEC_CHROOT
50124 + select GRKERNSEC_CHROOT_SYSCTL
50125 + select GRKERNSEC_LINK
50126 + select GRKERNSEC_FIFO
50127 + select GRKERNSEC_EXECVE
50128 + select GRKERNSEC_DMESG
50129 + select GRKERNSEC_RANDNET
50130 + select GRKERNSEC_FORKFAIL
50131 + select GRKERNSEC_TIME
50132 + select GRKERNSEC_SIGNAL
50133 + select GRKERNSEC_CHROOT
50134 + select GRKERNSEC_CHROOT_UNIX
50135 + select GRKERNSEC_CHROOT_MOUNT
50136 + select GRKERNSEC_CHROOT_PIVOT
50137 + select GRKERNSEC_CHROOT_DOUBLE
50138 + select GRKERNSEC_CHROOT_CHDIR
50139 + select GRKERNSEC_CHROOT_MKNOD
50140 + select GRKERNSEC_PROC
50141 + select GRKERNSEC_PROC_USERGROUP
50142 + select PAX_RANDUSTACK
50143 + select PAX_ASLR
50144 + select PAX_RANDMMAP
50145 + select PAX_REFCOUNT if (X86 || SPARC64)
50146 + select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
50147 +
50148 + help
50149 + If you say Y here, several features in addition to those included
50150 + in the low additional security level will be enabled. These
50151 + features provide even more security to your system, though in rare
50152 + cases they may be incompatible with very old or poorly written
50153 + software. If you enable this option, make sure that your auth
50154 + service (identd) is running as gid 1001. With this option,
50155 + the following features (in addition to those provided in the
50156 + low additional security level) will be enabled:
50157 +
50158 + - Failed fork logging
50159 + - Time change logging
50160 + - Signal logging
50161 + - Deny mounts in chroot
50162 + - Deny double chrooting
50163 + - Deny sysctl writes in chroot
50164 + - Deny mknod in chroot
50165 + - Deny access to abstract AF_UNIX sockets out of chroot
50166 + - Deny pivot_root in chroot
50167 + - Denied writes of /dev/kmem, /dev/mem, and /dev/port
50168 + - /proc restrictions with special GID set to 10 (usually wheel)
50169 + - Address Space Layout Randomization (ASLR)
50170 + - Prevent exploitation of most refcount overflows
50171 + - Bounds checking of copying between the kernel and userland
50172 +
50173 +config GRKERNSEC_HIGH
50174 + bool "High"
50175 + select GRKERNSEC_LINK
50176 + select GRKERNSEC_FIFO
50177 + select GRKERNSEC_EXECVE
50178 + select GRKERNSEC_DMESG
50179 + select GRKERNSEC_FORKFAIL
50180 + select GRKERNSEC_TIME
50181 + select GRKERNSEC_SIGNAL
50182 + select GRKERNSEC_CHROOT
50183 + select GRKERNSEC_CHROOT_SHMAT
50184 + select GRKERNSEC_CHROOT_UNIX
50185 + select GRKERNSEC_CHROOT_MOUNT
50186 + select GRKERNSEC_CHROOT_FCHDIR
50187 + select GRKERNSEC_CHROOT_PIVOT
50188 + select GRKERNSEC_CHROOT_DOUBLE
50189 + select GRKERNSEC_CHROOT_CHDIR
50190 + select GRKERNSEC_CHROOT_MKNOD
50191 + select GRKERNSEC_CHROOT_CAPS
50192 + select GRKERNSEC_CHROOT_SYSCTL
50193 + select GRKERNSEC_CHROOT_FINDTASK
50194 + select GRKERNSEC_SYSFS_RESTRICT
50195 + select GRKERNSEC_PROC
50196 + select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
50197 + select GRKERNSEC_HIDESYM
50198 + select GRKERNSEC_BRUTE
50199 + select GRKERNSEC_PROC_USERGROUP
50200 + select GRKERNSEC_KMEM
50201 + select GRKERNSEC_RESLOG
50202 + select GRKERNSEC_RANDNET
50203 + select GRKERNSEC_PROC_ADD
50204 + select GRKERNSEC_CHROOT_CHMOD
50205 + select GRKERNSEC_CHROOT_NICE
50206 + select GRKERNSEC_AUDIT_MOUNT
50207 + select GRKERNSEC_MODHARDEN if (MODULES)
50208 + select GRKERNSEC_HARDEN_PTRACE
50209 + select GRKERNSEC_VM86 if (X86_32)
50210 + select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
50211 + select PAX
50212 + select PAX_RANDUSTACK
50213 + select PAX_ASLR
50214 + select PAX_RANDMMAP
50215 + select PAX_NOEXEC
50216 + select PAX_MPROTECT
50217 + select PAX_EI_PAX
50218 + select PAX_PT_PAX_FLAGS
50219 + select PAX_HAVE_ACL_FLAGS
50220 + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
50221 + select PAX_MEMORY_UDEREF if (X86 && !XEN)
50222 + select PAX_RANDKSTACK if (X86_TSC && X86)
50223 + select PAX_SEGMEXEC if (X86_32)
50224 + select PAX_PAGEEXEC
50225 + select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
50226 + select PAX_EMUTRAMP if (PARISC)
50227 + select PAX_EMUSIGRT if (PARISC)
50228 + select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
50229 + select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
50230 + select PAX_REFCOUNT if (X86 || SPARC64)
50231 + select PAX_USERCOPY if ((X86 || PPC || SPARC || ARM) && (SLAB || SLUB || SLOB))
50232 + help
50233 + If you say Y here, many of the features of grsecurity will be
50234 + enabled, which will protect you against many kinds of attacks
50235 + against your system. The heightened security comes at a cost
50236 + of an increased chance of incompatibilities with rare software
50237 + on your machine. Since this security level enables PaX, you should
50238 + view <http://pax.grsecurity.net> and read about the PaX
50239 + project. While you are there, download chpax and run it on
50240 + binaries that cause problems with PaX. Also remember that
50241 + since the /proc restrictions are enabled, you must run your
50242 + identd as gid 1001. This security level enables the following
50243 + features in addition to those listed in the low and medium
50244 + security levels:
50245 +
50246 + - Additional /proc restrictions
50247 + - Chmod restrictions in chroot
50248 + - No signals, ptrace, or viewing of processes outside of chroot
50249 + - Capability restrictions in chroot
50250 + - Deny fchdir out of chroot
50251 + - Priority restrictions in chroot
50252 + - Segmentation-based implementation of PaX
50253 + - Mprotect restrictions
50254 + - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
50255 + - Kernel stack randomization
50256 + - Mount/unmount/remount logging
50257 + - Kernel symbol hiding
50258 + - Prevention of memory exhaustion-based exploits
50259 + - Hardening of module auto-loading
50260 + - Ptrace restrictions
50261 + - Restricted vm86 mode
50262 + - Restricted sysfs/debugfs
50263 + - Active kernel exploit response
50264 +
50265 +config GRKERNSEC_CUSTOM
50266 + bool "Custom"
50267 + help
50268 + If you say Y here, you will be able to configure every grsecurity
50269 + option, which allows you to enable many more features that aren't
50270 + covered in the basic security levels. These additional features
50271 + include TPE, socket restrictions, and the sysctl system for
50272 + grsecurity. It is advised that you read through the help for
50273 + each option to determine its usefulness in your situation.
50274 +
50275 +endchoice
50276 +
50277 +menu "Address Space Protection"
50278 +depends on GRKERNSEC
50279 +
50280 +config GRKERNSEC_KMEM
50281 + bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
50282 + select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
50283 + help
50284 + If you say Y here, /dev/kmem and /dev/mem won't be allowed to
50285 + be written to via mmap or otherwise to modify the running kernel.
50286 + /dev/port will also not be allowed to be opened. If you have module
50287 + support disabled, enabling this will close up four ways that are
50288 + currently used to insert malicious code into the running kernel.
50289 + Even with all these features enabled, we still highly recommend that
50290 + you use the RBAC system, as it is still possible for an attacker to
50291 + modify the running kernel through privileged I/O granted by ioperm/iopl.
50292 + If you are not using XFree86, you may be able to stop this additional
50293 + case by enabling the 'Disable privileged I/O' option. Though nothing
50294 + legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
50295 + but only to video memory, which is the only writing we allow in this
50296 + case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
50297 + not be allowed to mprotect it with PROT_WRITE later.
50298 + It is highly recommended that you say Y here if you meet all the
50299 + conditions above.
50300 +
50301 +config GRKERNSEC_VM86
50302 + bool "Restrict VM86 mode"
50303 + depends on X86_32
50304 +
50305 + help
50306 + If you say Y here, only processes with CAP_SYS_RAWIO will be able to
50307 + make use of a special execution mode on 32bit x86 processors called
50308 + Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
50309 + video cards and will still work with this option enabled. The purpose
50310 + of the option is to prevent exploitation of emulation errors in
50311 + virtualization of vm86 mode like the one discovered in VMWare in 2009.
50312 + Nearly all users should be able to enable this option.
50313 +
50314 +config GRKERNSEC_IO
50315 + bool "Disable privileged I/O"
50316 + depends on X86
50317 + select RTC_CLASS
50318 + select RTC_INTF_DEV
50319 + select RTC_DRV_CMOS
50320 +
50321 + help
50322 + If you say Y here, all ioperm and iopl calls will return an error.
50323 + Ioperm and iopl can be used to modify the running kernel.
50324 + Unfortunately, some programs need this access to operate properly,
50325 + the most notable of which are XFree86 and hwclock. hwclock can be
50326 + remedied by having RTC support in the kernel, so real-time
50327 + clock support is enabled if this option is enabled, to ensure
50328 + that hwclock operates correctly. XFree86 still will not
50329 + operate correctly with this option enabled, so DO NOT CHOOSE Y
50330 + IF YOU USE XFree86. If you use XFree86 and you still want to
50331 + protect your kernel against modification, use the RBAC system.
50332 +
50333 +config GRKERNSEC_PROC_MEMMAP
50334 + bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
50335 + default y if (PAX_NOEXEC || PAX_ASLR)
50336 + depends on PAX_NOEXEC || PAX_ASLR
50337 + help
50338 + If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
50339 + give no information about the addresses of its mappings if
50340 + PaX features that rely on random addresses are enabled on the task.
50341 + If you use PaX it is greatly recommended that you say Y here as it
50342 + closes up a hole that makes the full ASLR useless for suid
50343 + binaries.
50344 +
50345 +config GRKERNSEC_BRUTE
50346 + bool "Deter exploit bruteforcing"
50347 + help
50348 + If you say Y here, attempts to bruteforce exploits against forking
50349 + daemons such as apache or sshd, as well as against suid/sgid binaries
50350 + will be deterred. When a child of a forking daemon is killed by PaX
50351 + or crashes due to an illegal instruction or other suspicious signal,
50352 + the parent process will be delayed 30 seconds upon every subsequent
50353 + fork until the administrator is able to assess the situation and
50354 + restart the daemon.
50355 + In the suid/sgid case, the attempt is logged, the user has all their
50356 + processes terminated, and they are prevented from executing any further
50357 + processes for 15 minutes.
50358 + It is recommended that you also enable signal logging in the auditing
50359 + section so that logs are generated when a process triggers a suspicious
50360 + signal.
50361 + If the sysctl option is enabled, a sysctl option with name
50362 + "deter_bruteforce" is created.
50363 +
50364 +
50365 +config GRKERNSEC_MODHARDEN
50366 + bool "Harden module auto-loading"
50367 + depends on MODULES
50368 + help
50369 + If you say Y here, module auto-loading in response to use of some
50370 + feature implemented by an unloaded module will be restricted to
50371 + root users. Enabling this option helps defend against attacks
50372 + by unprivileged users who abuse the auto-loading behavior to
50373 + cause a vulnerable module to load that is then exploited.
50374 +
50375 + If this option prevents a legitimate use of auto-loading for a
50376 + non-root user, the administrator can execute modprobe manually
50377 + with the exact name of the module mentioned in the alert log.
50378 + Alternatively, the administrator can add the module to the list
50379 + of modules loaded at boot by modifying init scripts.
50380 +
50381 + Modification of init scripts will most likely be needed on
50382 + Ubuntu servers with encrypted home directory support enabled,
50383 + as the first non-root user logging in will cause the ecb(aes),
50384 + ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
50385 +
50386 +config GRKERNSEC_HIDESYM
50387 + bool "Hide kernel symbols"
50388 + help
50389 + If you say Y here, getting information on loaded modules, and
50390 + displaying all kernel symbols through a syscall will be restricted
50391 + to users with CAP_SYS_MODULE. For software compatibility reasons,
50392 + /proc/kallsyms will be restricted to the root user. The RBAC
50393 + system can hide that entry even from root.
50394 +
50395 + This option also prevents leaking of kernel addresses through
50396 + several /proc entries.
50397 +
50398 + Note that this option is only effective provided the following
50399 + conditions are met:
50400 + 1) The kernel using grsecurity is not precompiled by some distribution
50401 + 2) You have also enabled GRKERNSEC_DMESG
50402 + 3) You are using the RBAC system and hiding other files such as your
50403 + kernel image and System.map. Alternatively, enabling this option
50404 + causes the permissions on /boot, /lib/modules, and the kernel
50405 + source directory to change at compile time to prevent
50406 + reading by non-root users.
50407 + If the above conditions are met, this option will aid in providing a
50408 + useful protection against local kernel exploitation of overflows
50409 + and arbitrary read/write vulnerabilities.
50410 +
50411 +config GRKERNSEC_KERN_LOCKOUT
50412 + bool "Active kernel exploit response"
50413 + depends on X86 || ARM || PPC || SPARC
50414 + help
50415 + If you say Y here, when a PaX alert is triggered due to suspicious
50416 + activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
50417 + or an OOPs occurs due to bad memory accesses, instead of just
50418 + terminating the offending process (and potentially allowing
50419 + a subsequent exploit from the same user), we will take one of two
50420 + actions:
50421 + If the user was root, we will panic the system
50422 + If the user was non-root, we will log the attempt, terminate
50423 + all processes owned by the user, then prevent them from creating
50424 + any new processes until the system is restarted
50425 + This deters repeated kernel exploitation/bruteforcing attempts
50426 + and is useful for later forensics.
50427 +
50428 +endmenu
50429 +menu "Role Based Access Control Options"
50430 +depends on GRKERNSEC
50431 +
50432 +config GRKERNSEC_RBAC_DEBUG
50433 + bool
50434 +
50435 +config GRKERNSEC_NO_RBAC
50436 + bool "Disable RBAC system"
50437 + help
50438 + If you say Y here, the /dev/grsec device will be removed from the kernel,
50439 + preventing the RBAC system from being enabled. You should only say Y
50440 + here if you have no intention of using the RBAC system, so as to prevent
50441 + an attacker with root access from misusing the RBAC system to hide files
50442 + and processes when loadable module support and /dev/[k]mem have been
50443 + locked down.
50444 +
50445 +config GRKERNSEC_ACL_HIDEKERN
50446 + bool "Hide kernel processes"
50447 + help
50448 + If you say Y here, all kernel threads will be hidden to all
50449 + processes but those whose subject has the "view hidden processes"
50450 + flag.
50451 +
50452 +config GRKERNSEC_ACL_MAXTRIES
50453 + int "Maximum tries before password lockout"
50454 + default 3
50455 + help
50456 + This option enforces the maximum number of times a user can attempt
50457 + to authorize themselves with the grsecurity RBAC system before being
50458 + denied the ability to attempt authorization again for a specified time.
50459 + The lower the number, the harder it will be to brute-force a password.
50460 +
50461 +config GRKERNSEC_ACL_TIMEOUT
50462 + int "Time to wait after max password tries, in seconds"
50463 + default 30
50464 + help
50465 + This option specifies the time the user must wait after attempting to
50466 + authorize to the RBAC system with the maximum number of invalid
50467 + passwords. The higher the number, the harder it will be to brute-force
50468 + a password.
50469 +
50470 +endmenu
50471 +menu "Filesystem Protections"
50472 +depends on GRKERNSEC
50473 +
50474 +config GRKERNSEC_PROC
50475 + bool "Proc restrictions"
50476 + help
50477 + If you say Y here, the permissions of the /proc filesystem
50478 + will be altered to enhance system security and privacy. You MUST
50479 + choose either a user only restriction or a user and group restriction.
50480 + Depending upon the option you choose, you can either restrict users to
50481 + see only the processes they themselves run, or choose a group that can
50482 + view all processes and files normally restricted to root if you choose
50483 + the "restrict to user only" option. NOTE: If you're running identd as
50484 + a non-root user, you will have to run it as the group you specify here.
50485 +
50486 +config GRKERNSEC_PROC_USER
50487 + bool "Restrict /proc to user only"
50488 + depends on GRKERNSEC_PROC
50489 + help
50490 + If you say Y here, non-root users will only be able to view their own
50491 + processes, and restricts them from viewing network-related information,
50492 + and viewing kernel symbol and module information.
50493 +
50494 +config GRKERNSEC_PROC_USERGROUP
50495 + bool "Allow special group"
50496 + depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
50497 + help
50498 + If you say Y here, you will be able to select a group that will be
50499 + able to view all processes and network-related information. If you've
50500 + enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
50501 + remain hidden. This option is useful if you want to run identd as
50502 + a non-root user.
50503 +
50504 +config GRKERNSEC_PROC_GID
50505 + int "GID for special group"
50506 + depends on GRKERNSEC_PROC_USERGROUP
50507 + default 1001
50508 +
50509 +config GRKERNSEC_PROC_ADD
50510 + bool "Additional restrictions"
50511 + depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
50512 + help
50513 + If you say Y here, additional restrictions will be placed on
50514 + /proc that keep normal users from viewing device information and
50515 + slabinfo information that could be useful for exploits.
50516 +
50517 +config GRKERNSEC_LINK
50518 + bool "Linking restrictions"
50519 + help
50520 + If you say Y here, /tmp race exploits will be prevented, since users
50521 + will no longer be able to follow symlinks owned by other users in
50522 + world-writable +t directories (e.g. /tmp), unless the owner of the
50523 + symlink is the owner of the directory. users will also not be
50524 + able to hardlink to files they do not own. If the sysctl option is
50525 + enabled, a sysctl option with name "linking_restrictions" is created.
50526 +
50527 +config GRKERNSEC_FIFO
50528 + bool "FIFO restrictions"
50529 + help
50530 + If you say Y here, users will not be able to write to FIFOs they don't
50531 + own in world-writable +t directories (e.g. /tmp), unless the owner of
50532 + the FIFO is the same owner of the directory it's held in. If the sysctl
50533 + option is enabled, a sysctl option with name "fifo_restrictions" is
50534 + created.
50535 +
50536 +config GRKERNSEC_SYSFS_RESTRICT
50537 + bool "Sysfs/debugfs restriction"
50538 + depends on SYSFS
50539 + help
50540 + If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
50541 + any filesystem normally mounted under it (e.g. debugfs) will only
50542 + be accessible by root. These filesystems generally provide access
50543 + to hardware and debug information that isn't appropriate for unprivileged
50544 + users of the system. Sysfs and debugfs have also become a large source
50545 + of new vulnerabilities, ranging from infoleaks to local compromise.
50546 + There has been very little oversight with an eye toward security involved
50547 + in adding new exporters of information to these filesystems, so their
50548 + use is discouraged.
50549 + This option is equivalent to a chmod 0700 of the mount paths.
50550 +
50551 +config GRKERNSEC_ROFS
50552 + bool "Runtime read-only mount protection"
50553 + help
50554 + If you say Y here, a sysctl option with name "romount_protect" will
50555 + be created. By setting this option to 1 at runtime, filesystems
50556 + will be protected in the following ways:
50557 + * No new writable mounts will be allowed
50558 + * Existing read-only mounts won't be able to be remounted read/write
50559 + * Write operations will be denied on all block devices
50560 + This option acts independently of grsec_lock: once it is set to 1,
50561 + it cannot be turned off. Therefore, please be mindful of the resulting
50562 + behavior if this option is enabled in an init script on a read-only
50563 + filesystem. This feature is mainly intended for secure embedded systems.
50564 +
50565 +config GRKERNSEC_CHROOT
50566 + bool "Chroot jail restrictions"
50567 + help
50568 + If you say Y here, you will be able to choose several options that will
50569 + make breaking out of a chrooted jail much more difficult. If you
50570 + encounter no software incompatibilities with the following options, it
50571 + is recommended that you enable each one.
50572 +
50573 +config GRKERNSEC_CHROOT_MOUNT
50574 + bool "Deny mounts"
50575 + depends on GRKERNSEC_CHROOT
50576 + help
50577 + If you say Y here, processes inside a chroot will not be able to
50578 + mount or remount filesystems. If the sysctl option is enabled, a
50579 + sysctl option with name "chroot_deny_mount" is created.
50580 +
50581 +config GRKERNSEC_CHROOT_DOUBLE
50582 + bool "Deny double-chroots"
50583 + depends on GRKERNSEC_CHROOT
50584 + help
50585 + If you say Y here, processes inside a chroot will not be able to chroot
50586 + again outside the chroot. This is a widely used method of breaking
50587 + out of a chroot jail and should not be allowed. If the sysctl
50588 + option is enabled, a sysctl option with name
50589 + "chroot_deny_chroot" is created.
50590 +
50591 +config GRKERNSEC_CHROOT_PIVOT
50592 + bool "Deny pivot_root in chroot"
50593 + depends on GRKERNSEC_CHROOT
50594 + help
50595 + If you say Y here, processes inside a chroot will not be able to use
50596 + a function called pivot_root() that was introduced in Linux 2.3.41. It
50597 + works similar to chroot in that it changes the root filesystem. This
50598 + function could be misused in a chrooted process to attempt to break out
50599 + of the chroot, and therefore should not be allowed. If the sysctl
50600 + option is enabled, a sysctl option with name "chroot_deny_pivot" is
50601 + created.
50602 +
50603 +config GRKERNSEC_CHROOT_CHDIR
50604 + bool "Enforce chdir(\"/\") on all chroots"
50605 + depends on GRKERNSEC_CHROOT
50606 + help
50607 + If you say Y here, the current working directory of all newly-chrooted
50608 + applications will be set to the the root directory of the chroot.
50609 + The man page on chroot(2) states:
50610 + Note that this call does not change the current working
50611 + directory, so that `.' can be outside the tree rooted at
50612 + `/'. In particular, the super-user can escape from a
50613 + `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
50614 +
50615 + It is recommended that you say Y here, since it's not known to break
50616 + any software. If the sysctl option is enabled, a sysctl option with
50617 + name "chroot_enforce_chdir" is created.
50618 +
50619 +config GRKERNSEC_CHROOT_CHMOD
50620 + bool "Deny (f)chmod +s"
50621 + depends on GRKERNSEC_CHROOT
50622 + help
50623 + If you say Y here, processes inside a chroot will not be able to chmod
50624 + or fchmod files to make them have suid or sgid bits. This protects
50625 + against another published method of breaking a chroot. If the sysctl
50626 + option is enabled, a sysctl option with name "chroot_deny_chmod" is
50627 + created.
50628 +
50629 +config GRKERNSEC_CHROOT_FCHDIR
50630 + bool "Deny fchdir out of chroot"
50631 + depends on GRKERNSEC_CHROOT
50632 + help
50633 + If you say Y here, a well-known method of breaking chroots by fchdir'ing
50634 + to a file descriptor of the chrooting process that points to a directory
50635 + outside the filesystem will be stopped. If the sysctl option
50636 + is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
50637 +
50638 +config GRKERNSEC_CHROOT_MKNOD
50639 + bool "Deny mknod"
50640 + depends on GRKERNSEC_CHROOT
50641 + help
50642 + If you say Y here, processes inside a chroot will not be allowed to
50643 + mknod. The problem with using mknod inside a chroot is that it
50644 + would allow an attacker to create a device entry that is the same
50645 + as one on the physical root of your system, which could range from
50646 + anything from the console device to a device for your harddrive (which
50647 + they could then use to wipe the drive or steal data). It is recommended
50648 + that you say Y here, unless you run into software incompatibilities.
50649 + If the sysctl option is enabled, a sysctl option with name
50650 + "chroot_deny_mknod" is created.
50651 +
50652 +config GRKERNSEC_CHROOT_SHMAT
50653 + bool "Deny shmat() out of chroot"
50654 + depends on GRKERNSEC_CHROOT
50655 + help
50656 + If you say Y here, processes inside a chroot will not be able to attach
50657 + to shared memory segments that were created outside of the chroot jail.
50658 + It is recommended that you say Y here. If the sysctl option is enabled,
50659 + a sysctl option with name "chroot_deny_shmat" is created.
50660 +
50661 +config GRKERNSEC_CHROOT_UNIX
50662 + bool "Deny access to abstract AF_UNIX sockets out of chroot"
50663 + depends on GRKERNSEC_CHROOT
50664 + help
50665 + If you say Y here, processes inside a chroot will not be able to
50666 + connect to abstract (meaning not belonging to a filesystem) Unix
50667 + domain sockets that were bound outside of a chroot. It is recommended
50668 + that you say Y here. If the sysctl option is enabled, a sysctl option
50669 + with name "chroot_deny_unix" is created.
50670 +
50671 +config GRKERNSEC_CHROOT_FINDTASK
50672 + bool "Protect outside processes"
50673 + depends on GRKERNSEC_CHROOT
50674 + help
50675 + If you say Y here, processes inside a chroot will not be able to
50676 + kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
50677 + getsid, or view any process outside of the chroot. If the sysctl
50678 + option is enabled, a sysctl option with name "chroot_findtask" is
50679 + created.
50680 +
50681 +config GRKERNSEC_CHROOT_NICE
50682 + bool "Restrict priority changes"
50683 + depends on GRKERNSEC_CHROOT
50684 + help
50685 + If you say Y here, processes inside a chroot will not be able to raise
50686 + the priority of processes in the chroot, or alter the priority of
50687 + processes outside the chroot. This provides more security than simply
50688 + removing CAP_SYS_NICE from the process' capability set. If the
50689 + sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
50690 + is created.
50691 +
50692 +config GRKERNSEC_CHROOT_SYSCTL
50693 + bool "Deny sysctl writes"
50694 + depends on GRKERNSEC_CHROOT
50695 + help
50696 + If you say Y here, an attacker in a chroot will not be able to
50697 + write to sysctl entries, either by sysctl(2) or through a /proc
50698 + interface. It is strongly recommended that you say Y here. If the
50699 + sysctl option is enabled, a sysctl option with name
50700 + "chroot_deny_sysctl" is created.
50701 +
50702 +config GRKERNSEC_CHROOT_CAPS
50703 + bool "Capability restrictions"
50704 + depends on GRKERNSEC_CHROOT
50705 + help
50706 + If you say Y here, the capabilities on all root processes within a
50707 + chroot jail will be lowered to stop module insertion, raw i/o,
50708 + system and net admin tasks, rebooting the system, modifying immutable
50709 + files, modifying IPC owned by another, and changing the system time.
50710 + This is left an option because it can break some apps. Disable this
50711 + if your chrooted apps are having problems performing those kinds of
50712 + tasks. If the sysctl option is enabled, a sysctl option with
50713 + name "chroot_caps" is created.
50714 +
50715 +endmenu
50716 +menu "Kernel Auditing"
50717 +depends on GRKERNSEC
50718 +
50719 +config GRKERNSEC_AUDIT_GROUP
50720 + bool "Single group for auditing"
50721 + help
50722 + If you say Y here, the exec, chdir, and (un)mount logging features
50723 + will only operate on a group you specify. This option is recommended
50724 + if you only want to watch certain users instead of having a large
50725 + amount of logs from the entire system. If the sysctl option is enabled,
50726 + a sysctl option with name "audit_group" is created.
50727 +
50728 +config GRKERNSEC_AUDIT_GID
50729 + int "GID for auditing"
50730 + depends on GRKERNSEC_AUDIT_GROUP
50731 + default 1007
50732 +
50733 +config GRKERNSEC_EXECLOG
50734 + bool "Exec logging"
50735 + help
50736 + If you say Y here, all execve() calls will be logged (since the
50737 + other exec*() calls are frontends to execve(), all execution
50738 + will be logged). Useful for shell-servers that like to keep track
50739 + of their users. If the sysctl option is enabled, a sysctl option with
50740 + name "exec_logging" is created.
50741 + WARNING: This option when enabled will produce a LOT of logs, especially
50742 + on an active system.
50743 +
50744 +config GRKERNSEC_RESLOG
50745 + bool "Resource logging"
50746 + help
50747 + If you say Y here, all attempts to overstep resource limits will
50748 + be logged with the resource name, the requested size, and the current
50749 + limit. It is highly recommended that you say Y here. If the sysctl
50750 + option is enabled, a sysctl option with name "resource_logging" is
50751 + created. If the RBAC system is enabled, the sysctl value is ignored.
50752 +
50753 +config GRKERNSEC_CHROOT_EXECLOG
50754 + bool "Log execs within chroot"
50755 + help
50756 + If you say Y here, all executions inside a chroot jail will be logged
50757 + to syslog. This can cause a large amount of logs if certain
50758 + applications (eg. djb's daemontools) are installed on the system, and
50759 + is therefore left as an option. If the sysctl option is enabled, a
50760 + sysctl option with name "chroot_execlog" is created.
50761 +
50762 +config GRKERNSEC_AUDIT_PTRACE
50763 + bool "Ptrace logging"
50764 + help
50765 + If you say Y here, all attempts to attach to a process via ptrace
50766 + will be logged. If the sysctl option is enabled, a sysctl option
50767 + with name "audit_ptrace" is created.
50768 +
50769 +config GRKERNSEC_AUDIT_CHDIR
50770 + bool "Chdir logging"
50771 + help
50772 + If you say Y here, all chdir() calls will be logged. If the sysctl
50773 + option is enabled, a sysctl option with name "audit_chdir" is created.
50774 +
50775 +config GRKERNSEC_AUDIT_MOUNT
50776 + bool "(Un)Mount logging"
50777 + help
50778 + If you say Y here, all mounts and unmounts will be logged. If the
50779 + sysctl option is enabled, a sysctl option with name "audit_mount" is
50780 + created.
50781 +
50782 +config GRKERNSEC_SIGNAL
50783 + bool "Signal logging"
50784 + help
50785 + If you say Y here, certain important signals will be logged, such as
50786 + SIGSEGV, which will as a result inform you of when a error in a program
50787 + occurred, which in some cases could mean a possible exploit attempt.
50788 + If the sysctl option is enabled, a sysctl option with name
50789 + "signal_logging" is created.
50790 +
50791 +config GRKERNSEC_FORKFAIL
50792 + bool "Fork failure logging"
50793 + help
50794 + If you say Y here, all failed fork() attempts will be logged.
50795 + This could suggest a fork bomb, or someone attempting to overstep
50796 + their process limit. If the sysctl option is enabled, a sysctl option
50797 + with name "forkfail_logging" is created.
50798 +
50799 +config GRKERNSEC_TIME
50800 + bool "Time change logging"
50801 + help
50802 + If you say Y here, any changes of the system clock will be logged.
50803 + If the sysctl option is enabled, a sysctl option with name
50804 + "timechange_logging" is created.
50805 +
50806 +config GRKERNSEC_PROC_IPADDR
50807 + bool "/proc/<pid>/ipaddr support"
50808 + help
50809 + If you say Y here, a new entry will be added to each /proc/<pid>
50810 + directory that contains the IP address of the person using the task.
50811 + The IP is carried across local TCP and AF_UNIX stream sockets.
50812 + This information can be useful for IDS/IPSes to perform remote response
50813 + to a local attack. The entry is readable by only the owner of the
50814 + process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
50815 + the RBAC system), and thus does not create privacy concerns.
50816 +
50817 +config GRKERNSEC_RWXMAP_LOG
50818 + bool 'Denied RWX mmap/mprotect logging'
50819 + depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
50820 + help
50821 + If you say Y here, calls to mmap() and mprotect() with explicit
50822 + usage of PROT_WRITE and PROT_EXEC together will be logged when
50823 + denied by the PAX_MPROTECT feature. If the sysctl option is
50824 + enabled, a sysctl option with name "rwxmap_logging" is created.
50825 +
50826 +config GRKERNSEC_AUDIT_TEXTREL
50827 + bool 'ELF text relocations logging (READ HELP)'
50828 + depends on PAX_MPROTECT
50829 + help
50830 + If you say Y here, text relocations will be logged with the filename
50831 + of the offending library or binary. The purpose of the feature is
50832 + to help Linux distribution developers get rid of libraries and
50833 + binaries that need text relocations which hinder the future progress
50834 + of PaX. Only Linux distribution developers should say Y here, and
50835 + never on a production machine, as this option creates an information
50836 + leak that could aid an attacker in defeating the randomization of
50837 + a single memory region. If the sysctl option is enabled, a sysctl
50838 + option with name "audit_textrel" is created.
50839 +
50840 +endmenu
50841 +
50842 +menu "Executable Protections"
50843 +depends on GRKERNSEC
50844 +
50845 +config GRKERNSEC_EXECVE
50846 + bool "Enforce RLIMIT_NPROC on execs"
50847 + help
50848 + If you say Y here, users with a resource limit on processes will
50849 + have the value checked during execve() calls. The current system
50850 + only checks the system limit during fork() calls. If the sysctl option
50851 + is enabled, a sysctl option with name "execve_limiting" is created.
50852 +
50853 +config GRKERNSEC_DMESG
50854 + bool "Dmesg(8) restriction"
50855 + help
50856 + If you say Y here, non-root users will not be able to use dmesg(8)
50857 + to view up to the last 4kb of messages in the kernel's log buffer.
50858 + The kernel's log buffer often contains kernel addresses and other
50859 + identifying information useful to an attacker in fingerprinting a
50860 + system for a targeted exploit.
50861 + If the sysctl option is enabled, a sysctl option with name "dmesg" is
50862 + created.
50863 +
50864 +config GRKERNSEC_HARDEN_PTRACE
50865 + bool "Deter ptrace-based process snooping"
50866 + help
50867 + If you say Y here, TTY sniffers and other malicious monitoring
50868 + programs implemented through ptrace will be defeated. If you
50869 + have been using the RBAC system, this option has already been
50870 + enabled for several years for all users, with the ability to make
50871 + fine-grained exceptions.
50872 +
50873 + This option only affects the ability of non-root users to ptrace
50874 + processes that are not a descendent of the ptracing process.
50875 + This means that strace ./binary and gdb ./binary will still work,
50876 + but attaching to arbitrary processes will not. If the sysctl
50877 + option is enabled, a sysctl option with name "harden_ptrace" is
50878 + created.
50879 +
50880 +config GRKERNSEC_TPE
50881 + bool "Trusted Path Execution (TPE)"
50882 + help
50883 + If you say Y here, you will be able to choose a gid to add to the
50884 + supplementary groups of users you want to mark as "untrusted."
50885 + These users will not be able to execute any files that are not in
50886 + root-owned directories writable only by root. If the sysctl option
50887 + is enabled, a sysctl option with name "tpe" is created.
50888 +
50889 +config GRKERNSEC_TPE_ALL
50890 + bool "Partially restrict all non-root users"
50891 + depends on GRKERNSEC_TPE
50892 + help
50893 + If you say Y here, all non-root users will be covered under
50894 + a weaker TPE restriction. This is separate from, and in addition to,
50895 + the main TPE options that you have selected elsewhere. Thus, if a
50896 + "trusted" GID is chosen, this restriction applies to even that GID.
50897 + Under this restriction, all non-root users will only be allowed to
50898 + execute files in directories they own that are not group or
50899 + world-writable, or in directories owned by root and writable only by
50900 + root. If the sysctl option is enabled, a sysctl option with name
50901 + "tpe_restrict_all" is created.
50902 +
50903 +config GRKERNSEC_TPE_INVERT
50904 + bool "Invert GID option"
50905 + depends on GRKERNSEC_TPE
50906 + help
50907 + If you say Y here, the group you specify in the TPE configuration will
50908 + decide what group TPE restrictions will be *disabled* for. This
50909 + option is useful if you want TPE restrictions to be applied to most
50910 + users on the system. If the sysctl option is enabled, a sysctl option
50911 + with name "tpe_invert" is created. Unlike other sysctl options, this
50912 + entry will default to on for backward-compatibility.
50913 +
50914 +config GRKERNSEC_TPE_GID
50915 + int "GID for untrusted users"
50916 + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
50917 + default 1005
50918 + help
50919 + Setting this GID determines what group TPE restrictions will be
50920 + *enabled* for. If the sysctl option is enabled, a sysctl option
50921 + with name "tpe_gid" is created.
50922 +
50923 +config GRKERNSEC_TPE_GID
50924 + int "GID for trusted users"
50925 + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
50926 + default 1005
50927 + help
50928 + Setting this GID determines what group TPE restrictions will be
50929 + *disabled* for. If the sysctl option is enabled, a sysctl option
50930 + with name "tpe_gid" is created.
50931 +
50932 +endmenu
50933 +menu "Network Protections"
50934 +depends on GRKERNSEC
50935 +
50936 +config GRKERNSEC_RANDNET
50937 + bool "Larger entropy pools"
50938 + help
50939 + If you say Y here, the entropy pools used for many features of Linux
50940 + and grsecurity will be doubled in size. Since several grsecurity
50941 + features use additional randomness, it is recommended that you say Y
50942 + here. Saying Y here has a similar effect as modifying
50943 + /proc/sys/kernel/random/poolsize.
50944 +
50945 +config GRKERNSEC_BLACKHOLE
50946 + bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
50947 + depends on NET
50948 + help
50949 + If you say Y here, neither TCP resets nor ICMP
50950 + destination-unreachable packets will be sent in response to packets
50951 + sent to ports for which no associated listening process exists.
50952 + This feature supports both IPV4 and IPV6 and exempts the
50953 + loopback interface from blackholing. Enabling this feature
50954 + makes a host more resilient to DoS attacks and reduces network
50955 + visibility against scanners.
50956 +
50957 + The blackhole feature as-implemented is equivalent to the FreeBSD
50958 + blackhole feature, as it prevents RST responses to all packets, not
50959 + just SYNs. Under most application behavior this causes no
50960 + problems, but applications (like haproxy) may not close certain
50961 + connections in a way that cleanly terminates them on the remote
50962 + end, leaving the remote host in LAST_ACK state. Because of this
50963 + side-effect and to prevent intentional LAST_ACK DoSes, this
50964 + feature also adds automatic mitigation against such attacks.
50965 + The mitigation drastically reduces the amount of time a socket
50966 + can spend in LAST_ACK state. If you're using haproxy and not
50967 + all servers it connects to have this option enabled, consider
50968 + disabling this feature on the haproxy host.
50969 +
50970 + If the sysctl option is enabled, two sysctl options with names
50971 + "ip_blackhole" and "lastack_retries" will be created.
50972 + While "ip_blackhole" takes the standard zero/non-zero on/off
50973 + toggle, "lastack_retries" uses the same kinds of values as
50974 + "tcp_retries1" and "tcp_retries2". The default value of 4
50975 + prevents a socket from lasting more than 45 seconds in LAST_ACK
50976 + state.
50977 +
50978 +config GRKERNSEC_SOCKET
50979 + bool "Socket restrictions"
50980 + depends on NET
50981 + help
50982 + If you say Y here, you will be able to choose from several options.
50983 + If you assign a GID on your system and add it to the supplementary
50984 + groups of users you want to restrict socket access to, this patch
50985 + will perform up to three things, based on the option(s) you choose.
50986 +
50987 +config GRKERNSEC_SOCKET_ALL
50988 + bool "Deny any sockets to group"
50989 + depends on GRKERNSEC_SOCKET
50990 + help
50991 + If you say Y here, you will be able to choose a GID of whose users will
50992 + be unable to connect to other hosts from your machine or run server
50993 + applications from your machine. If the sysctl option is enabled, a
50994 + sysctl option with name "socket_all" is created.
50995 +
50996 +config GRKERNSEC_SOCKET_ALL_GID
50997 + int "GID to deny all sockets for"
50998 + depends on GRKERNSEC_SOCKET_ALL
50999 + default 1004
51000 + help
51001 + Here you can choose the GID to disable socket access for. Remember to
51002 + add the users you want socket access disabled for to the GID
51003 + specified here. If the sysctl option is enabled, a sysctl option
51004 + with name "socket_all_gid" is created.
51005 +
51006 +config GRKERNSEC_SOCKET_CLIENT
51007 + bool "Deny client sockets to group"
51008 + depends on GRKERNSEC_SOCKET
51009 + help
51010 + If you say Y here, you will be able to choose a GID of whose users will
51011 + be unable to connect to other hosts from your machine, but will be
51012 + able to run servers. If this option is enabled, all users in the group
51013 + you specify will have to use passive mode when initiating ftp transfers
51014 + from the shell on your machine. If the sysctl option is enabled, a
51015 + sysctl option with name "socket_client" is created.
51016 +
51017 +config GRKERNSEC_SOCKET_CLIENT_GID
51018 + int "GID to deny client sockets for"
51019 + depends on GRKERNSEC_SOCKET_CLIENT
51020 + default 1003
51021 + help
51022 + Here you can choose the GID to disable client socket access for.
51023 + Remember to add the users you want client socket access disabled for to
51024 + the GID specified here. If the sysctl option is enabled, a sysctl
51025 + option with name "socket_client_gid" is created.
51026 +
51027 +config GRKERNSEC_SOCKET_SERVER
51028 + bool "Deny server sockets to group"
51029 + depends on GRKERNSEC_SOCKET
51030 + help
51031 + If you say Y here, you will be able to choose a GID of whose users will
51032 + be unable to run server applications from your machine. If the sysctl
51033 + option is enabled, a sysctl option with name "socket_server" is created.
51034 +
51035 +config GRKERNSEC_SOCKET_SERVER_GID
51036 + int "GID to deny server sockets for"
51037 + depends on GRKERNSEC_SOCKET_SERVER
51038 + default 1002
51039 + help
51040 + Here you can choose the GID to disable server socket access for.
51041 + Remember to add the users you want server socket access disabled for to
51042 + the GID specified here. If the sysctl option is enabled, a sysctl
51043 + option with name "socket_server_gid" is created.
51044 +
51045 +endmenu
51046 +menu "Sysctl support"
51047 +depends on GRKERNSEC && SYSCTL
51048 +
51049 +config GRKERNSEC_SYSCTL
51050 + bool "Sysctl support"
51051 + help
51052 + If you say Y here, you will be able to change the options that
51053 + grsecurity runs with at bootup, without having to recompile your
51054 + kernel. You can echo values to files in /proc/sys/kernel/grsecurity
51055 + to enable (1) or disable (0) various features. All the sysctl entries
51056 + are mutable until the "grsec_lock" entry is set to a non-zero value.
51057 + All features enabled in the kernel configuration are disabled at boot
51058 + if you do not say Y to the "Turn on features by default" option.
51059 + All options should be set at startup, and the grsec_lock entry should
51060 + be set to a non-zero value after all the options are set.
51061 + *THIS IS EXTREMELY IMPORTANT*
51062 +
51063 +config GRKERNSEC_SYSCTL_DISTRO
51064 + bool "Extra sysctl support for distro makers (READ HELP)"
51065 + depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
51066 + help
51067 + If you say Y here, additional sysctl options will be created
51068 + for features that affect processes running as root. Therefore,
51069 + it is critical when using this option that the grsec_lock entry be
51070 + enabled after boot. Only distros with prebuilt kernel packages
51071 + with this option enabled that can ensure grsec_lock is enabled
51072 + after boot should use this option.
51073 + *Failure to set grsec_lock after boot makes all grsec features
51074 + this option covers useless*
51075 +
51076 + Currently this option creates the following sysctl entries:
51077 + "Disable Privileged I/O": "disable_priv_io"
51078 +
51079 +config GRKERNSEC_SYSCTL_ON
51080 + bool "Turn on features by default"
51081 + depends on GRKERNSEC_SYSCTL
51082 + help
51083 + If you say Y here, instead of having all features enabled in the
51084 + kernel configuration disabled at boot time, the features will be
51085 + enabled at boot time. It is recommended you say Y here unless
51086 + there is some reason you would want all sysctl-tunable features to
51087 + be disabled by default. As mentioned elsewhere, it is important
51088 + to enable the grsec_lock entry once you have finished modifying
51089 + the sysctl entries.
51090 +
51091 +endmenu
51092 +menu "Logging Options"
51093 +depends on GRKERNSEC
51094 +
51095 +config GRKERNSEC_FLOODTIME
51096 + int "Seconds in between log messages (minimum)"
51097 + default 10
51098 + help
51099 + This option allows you to enforce the number of seconds between
51100 + grsecurity log messages. The default should be suitable for most
51101 + people, however, if you choose to change it, choose a value small enough
51102 + to allow informative logs to be produced, but large enough to
51103 + prevent flooding.
51104 +
51105 +config GRKERNSEC_FLOODBURST
51106 + int "Number of messages in a burst (maximum)"
51107 + default 4
51108 + help
51109 + This option allows you to choose the maximum number of messages allowed
51110 + within the flood time interval you chose in a separate option. The
51111 + default should be suitable for most people, however if you find that
51112 + many of your logs are being interpreted as flooding, you may want to
51113 + raise this value.
51114 +
51115 +endmenu
51116 +
51117 +endmenu
51118 diff -urNp linux-2.6.39.4/grsecurity/Makefile linux-2.6.39.4/grsecurity/Makefile
51119 --- linux-2.6.39.4/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
51120 +++ linux-2.6.39.4/grsecurity/Makefile 2011-08-17 19:03:10.000000000 -0400
51121 @@ -0,0 +1,33 @@
51122 +# grsecurity's ACL system was originally written in 2001 by Michael Dalton
51123 +# during 2001-2009 it has been completely redesigned by Brad Spengler
51124 +# into an RBAC system
51125 +#
51126 +# All code in this directory and various hooks inserted throughout the kernel
51127 +# are copyright Brad Spengler - Open Source Security, Inc., and released
51128 +# under the GPL v2 or higher
51129 +
51130 +obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
51131 + grsec_mount.o grsec_sig.o grsec_sysctl.o \
51132 + grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
51133 +
51134 +obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
51135 + gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
51136 + gracl_learn.o grsec_log.o
51137 +obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
51138 +
51139 +ifdef CONFIG_NET
51140 +obj-$(CONFIG_GRKERNSEC) += gracl_ip.o grsec_sock.o
51141 +endif
51142 +
51143 +ifndef CONFIG_GRKERNSEC
51144 +obj-y += grsec_disabled.o
51145 +endif
51146 +
51147 +ifdef CONFIG_GRKERNSEC_HIDESYM
51148 +extra-y := grsec_hidesym.o
51149 +$(obj)/grsec_hidesym.o:
51150 + @-chmod -f 500 /boot
51151 + @-chmod -f 500 /lib/modules
51152 + @-chmod -f 700 .
51153 + @echo ' grsec: protected kernel image paths'
51154 +endif
51155 diff -urNp linux-2.6.39.4/include/acpi/acpi_bus.h linux-2.6.39.4/include/acpi/acpi_bus.h
51156 --- linux-2.6.39.4/include/acpi/acpi_bus.h 2011-05-19 00:06:34.000000000 -0400
51157 +++ linux-2.6.39.4/include/acpi/acpi_bus.h 2011-08-05 20:34:06.000000000 -0400
51158 @@ -107,7 +107,7 @@ struct acpi_device_ops {
51159 acpi_op_bind bind;
51160 acpi_op_unbind unbind;
51161 acpi_op_notify notify;
51162 -};
51163 +} __no_const;
51164
51165 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
51166
51167 diff -urNp linux-2.6.39.4/include/asm-generic/atomic-long.h linux-2.6.39.4/include/asm-generic/atomic-long.h
51168 --- linux-2.6.39.4/include/asm-generic/atomic-long.h 2011-05-19 00:06:34.000000000 -0400
51169 +++ linux-2.6.39.4/include/asm-generic/atomic-long.h 2011-08-05 20:34:06.000000000 -0400
51170 @@ -22,6 +22,12 @@
51171
51172 typedef atomic64_t atomic_long_t;
51173
51174 +#ifdef CONFIG_PAX_REFCOUNT
51175 +typedef atomic64_unchecked_t atomic_long_unchecked_t;
51176 +#else
51177 +typedef atomic64_t atomic_long_unchecked_t;
51178 +#endif
51179 +
51180 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
51181
51182 static inline long atomic_long_read(atomic_long_t *l)
51183 @@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
51184 return (long)atomic64_read(v);
51185 }
51186
51187 +#ifdef CONFIG_PAX_REFCOUNT
51188 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
51189 +{
51190 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51191 +
51192 + return (long)atomic64_read_unchecked(v);
51193 +}
51194 +#endif
51195 +
51196 static inline void atomic_long_set(atomic_long_t *l, long i)
51197 {
51198 atomic64_t *v = (atomic64_t *)l;
51199 @@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
51200 atomic64_set(v, i);
51201 }
51202
51203 +#ifdef CONFIG_PAX_REFCOUNT
51204 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
51205 +{
51206 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51207 +
51208 + atomic64_set_unchecked(v, i);
51209 +}
51210 +#endif
51211 +
51212 static inline void atomic_long_inc(atomic_long_t *l)
51213 {
51214 atomic64_t *v = (atomic64_t *)l;
51215 @@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
51216 atomic64_inc(v);
51217 }
51218
51219 +#ifdef CONFIG_PAX_REFCOUNT
51220 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
51221 +{
51222 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51223 +
51224 + atomic64_inc_unchecked(v);
51225 +}
51226 +#endif
51227 +
51228 static inline void atomic_long_dec(atomic_long_t *l)
51229 {
51230 atomic64_t *v = (atomic64_t *)l;
51231 @@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
51232 atomic64_dec(v);
51233 }
51234
51235 +#ifdef CONFIG_PAX_REFCOUNT
51236 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
51237 +{
51238 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51239 +
51240 + atomic64_dec_unchecked(v);
51241 +}
51242 +#endif
51243 +
51244 static inline void atomic_long_add(long i, atomic_long_t *l)
51245 {
51246 atomic64_t *v = (atomic64_t *)l;
51247 @@ -59,6 +101,15 @@ static inline void atomic_long_add(long
51248 atomic64_add(i, v);
51249 }
51250
51251 +#ifdef CONFIG_PAX_REFCOUNT
51252 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
51253 +{
51254 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51255 +
51256 + atomic64_add_unchecked(i, v);
51257 +}
51258 +#endif
51259 +
51260 static inline void atomic_long_sub(long i, atomic_long_t *l)
51261 {
51262 atomic64_t *v = (atomic64_t *)l;
51263 @@ -66,6 +117,15 @@ static inline void atomic_long_sub(long
51264 atomic64_sub(i, v);
51265 }
51266
51267 +#ifdef CONFIG_PAX_REFCOUNT
51268 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
51269 +{
51270 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51271 +
51272 + atomic64_sub_unchecked(i, v);
51273 +}
51274 +#endif
51275 +
51276 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
51277 {
51278 atomic64_t *v = (atomic64_t *)l;
51279 @@ -115,6 +175,15 @@ static inline long atomic_long_inc_retur
51280 return (long)atomic64_inc_return(v);
51281 }
51282
51283 +#ifdef CONFIG_PAX_REFCOUNT
51284 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
51285 +{
51286 + atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
51287 +
51288 + return (long)atomic64_inc_return_unchecked(v);
51289 +}
51290 +#endif
51291 +
51292 static inline long atomic_long_dec_return(atomic_long_t *l)
51293 {
51294 atomic64_t *v = (atomic64_t *)l;
51295 @@ -140,6 +209,12 @@ static inline long atomic_long_add_unles
51296
51297 typedef atomic_t atomic_long_t;
51298
51299 +#ifdef CONFIG_PAX_REFCOUNT
51300 +typedef atomic_unchecked_t atomic_long_unchecked_t;
51301 +#else
51302 +typedef atomic_t atomic_long_unchecked_t;
51303 +#endif
51304 +
51305 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
51306 static inline long atomic_long_read(atomic_long_t *l)
51307 {
51308 @@ -148,6 +223,15 @@ static inline long atomic_long_read(atom
51309 return (long)atomic_read(v);
51310 }
51311
51312 +#ifdef CONFIG_PAX_REFCOUNT
51313 +static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
51314 +{
51315 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51316 +
51317 + return (long)atomic_read_unchecked(v);
51318 +}
51319 +#endif
51320 +
51321 static inline void atomic_long_set(atomic_long_t *l, long i)
51322 {
51323 atomic_t *v = (atomic_t *)l;
51324 @@ -155,6 +239,15 @@ static inline void atomic_long_set(atomi
51325 atomic_set(v, i);
51326 }
51327
51328 +#ifdef CONFIG_PAX_REFCOUNT
51329 +static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
51330 +{
51331 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51332 +
51333 + atomic_set_unchecked(v, i);
51334 +}
51335 +#endif
51336 +
51337 static inline void atomic_long_inc(atomic_long_t *l)
51338 {
51339 atomic_t *v = (atomic_t *)l;
51340 @@ -162,6 +255,15 @@ static inline void atomic_long_inc(atomi
51341 atomic_inc(v);
51342 }
51343
51344 +#ifdef CONFIG_PAX_REFCOUNT
51345 +static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
51346 +{
51347 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51348 +
51349 + atomic_inc_unchecked(v);
51350 +}
51351 +#endif
51352 +
51353 static inline void atomic_long_dec(atomic_long_t *l)
51354 {
51355 atomic_t *v = (atomic_t *)l;
51356 @@ -169,6 +271,15 @@ static inline void atomic_long_dec(atomi
51357 atomic_dec(v);
51358 }
51359
51360 +#ifdef CONFIG_PAX_REFCOUNT
51361 +static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
51362 +{
51363 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51364 +
51365 + atomic_dec_unchecked(v);
51366 +}
51367 +#endif
51368 +
51369 static inline void atomic_long_add(long i, atomic_long_t *l)
51370 {
51371 atomic_t *v = (atomic_t *)l;
51372 @@ -176,6 +287,15 @@ static inline void atomic_long_add(long
51373 atomic_add(i, v);
51374 }
51375
51376 +#ifdef CONFIG_PAX_REFCOUNT
51377 +static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
51378 +{
51379 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51380 +
51381 + atomic_add_unchecked(i, v);
51382 +}
51383 +#endif
51384 +
51385 static inline void atomic_long_sub(long i, atomic_long_t *l)
51386 {
51387 atomic_t *v = (atomic_t *)l;
51388 @@ -183,6 +303,15 @@ static inline void atomic_long_sub(long
51389 atomic_sub(i, v);
51390 }
51391
51392 +#ifdef CONFIG_PAX_REFCOUNT
51393 +static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
51394 +{
51395 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51396 +
51397 + atomic_sub_unchecked(i, v);
51398 +}
51399 +#endif
51400 +
51401 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
51402 {
51403 atomic_t *v = (atomic_t *)l;
51404 @@ -232,6 +361,15 @@ static inline long atomic_long_inc_retur
51405 return (long)atomic_inc_return(v);
51406 }
51407
51408 +#ifdef CONFIG_PAX_REFCOUNT
51409 +static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
51410 +{
51411 + atomic_unchecked_t *v = (atomic_unchecked_t *)l;
51412 +
51413 + return (long)atomic_inc_return_unchecked(v);
51414 +}
51415 +#endif
51416 +
51417 static inline long atomic_long_dec_return(atomic_long_t *l)
51418 {
51419 atomic_t *v = (atomic_t *)l;
51420 @@ -255,4 +393,49 @@ static inline long atomic_long_add_unles
51421
51422 #endif /* BITS_PER_LONG == 64 */
51423
51424 +#ifdef CONFIG_PAX_REFCOUNT
51425 +static inline void pax_refcount_needs_these_functions(void)
51426 +{
51427 + atomic_read_unchecked((atomic_unchecked_t *)NULL);
51428 + atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
51429 + atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
51430 + atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
51431 + atomic_inc_unchecked((atomic_unchecked_t *)NULL);
51432 + (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
51433 + atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
51434 + atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
51435 + atomic_dec_unchecked((atomic_unchecked_t *)NULL);
51436 + atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
51437 + (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
51438 +
51439 + atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
51440 + atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
51441 + atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
51442 + atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
51443 + atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
51444 + atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
51445 + atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
51446 +}
51447 +#else
51448 +#define atomic_read_unchecked(v) atomic_read(v)
51449 +#define atomic_set_unchecked(v, i) atomic_set((v), (i))
51450 +#define atomic_add_unchecked(i, v) atomic_add((i), (v))
51451 +#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
51452 +#define atomic_inc_unchecked(v) atomic_inc(v)
51453 +#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
51454 +#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
51455 +#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
51456 +#define atomic_dec_unchecked(v) atomic_dec(v)
51457 +#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
51458 +#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
51459 +
51460 +#define atomic_long_read_unchecked(v) atomic_long_read(v)
51461 +#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
51462 +#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
51463 +#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
51464 +#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
51465 +#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
51466 +#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
51467 +#endif
51468 +
51469 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
51470 diff -urNp linux-2.6.39.4/include/asm-generic/cache.h linux-2.6.39.4/include/asm-generic/cache.h
51471 --- linux-2.6.39.4/include/asm-generic/cache.h 2011-05-19 00:06:34.000000000 -0400
51472 +++ linux-2.6.39.4/include/asm-generic/cache.h 2011-08-05 19:44:37.000000000 -0400
51473 @@ -6,7 +6,7 @@
51474 * cache lines need to provide their own cache.h.
51475 */
51476
51477 -#define L1_CACHE_SHIFT 5
51478 -#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
51479 +#define L1_CACHE_SHIFT 5UL
51480 +#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
51481
51482 #endif /* __ASM_GENERIC_CACHE_H */
51483 diff -urNp linux-2.6.39.4/include/asm-generic/int-l64.h linux-2.6.39.4/include/asm-generic/int-l64.h
51484 --- linux-2.6.39.4/include/asm-generic/int-l64.h 2011-05-19 00:06:34.000000000 -0400
51485 +++ linux-2.6.39.4/include/asm-generic/int-l64.h 2011-08-05 19:44:37.000000000 -0400
51486 @@ -46,6 +46,8 @@ typedef unsigned int u32;
51487 typedef signed long s64;
51488 typedef unsigned long u64;
51489
51490 +typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
51491 +
51492 #define S8_C(x) x
51493 #define U8_C(x) x ## U
51494 #define S16_C(x) x
51495 diff -urNp linux-2.6.39.4/include/asm-generic/int-ll64.h linux-2.6.39.4/include/asm-generic/int-ll64.h
51496 --- linux-2.6.39.4/include/asm-generic/int-ll64.h 2011-05-19 00:06:34.000000000 -0400
51497 +++ linux-2.6.39.4/include/asm-generic/int-ll64.h 2011-08-05 19:44:37.000000000 -0400
51498 @@ -51,6 +51,8 @@ typedef unsigned int u32;
51499 typedef signed long long s64;
51500 typedef unsigned long long u64;
51501
51502 +typedef unsigned long long intoverflow_t;
51503 +
51504 #define S8_C(x) x
51505 #define U8_C(x) x ## U
51506 #define S16_C(x) x
51507 diff -urNp linux-2.6.39.4/include/asm-generic/kmap_types.h linux-2.6.39.4/include/asm-generic/kmap_types.h
51508 --- linux-2.6.39.4/include/asm-generic/kmap_types.h 2011-05-19 00:06:34.000000000 -0400
51509 +++ linux-2.6.39.4/include/asm-generic/kmap_types.h 2011-08-05 19:44:37.000000000 -0400
51510 @@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE,
51511 KMAP_D(17) KM_NMI,
51512 KMAP_D(18) KM_NMI_PTE,
51513 KMAP_D(19) KM_KDB,
51514 +KMAP_D(20) KM_CLEARPAGE,
51515 /*
51516 * Remember to update debug_kmap_atomic() when adding new kmap types!
51517 */
51518 -KMAP_D(20) KM_TYPE_NR
51519 +KMAP_D(21) KM_TYPE_NR
51520 };
51521
51522 #undef KMAP_D
51523 diff -urNp linux-2.6.39.4/include/asm-generic/pgtable.h linux-2.6.39.4/include/asm-generic/pgtable.h
51524 --- linux-2.6.39.4/include/asm-generic/pgtable.h 2011-05-19 00:06:34.000000000 -0400
51525 +++ linux-2.6.39.4/include/asm-generic/pgtable.h 2011-08-05 19:44:37.000000000 -0400
51526 @@ -447,6 +447,14 @@ static inline int pmd_write(pmd_t pmd)
51527 #endif /* __HAVE_ARCH_PMD_WRITE */
51528 #endif
51529
51530 +#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
51531 +static inline unsigned long pax_open_kernel(void) { return 0; }
51532 +#endif
51533 +
51534 +#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
51535 +static inline unsigned long pax_close_kernel(void) { return 0; }
51536 +#endif
51537 +
51538 #endif /* !__ASSEMBLY__ */
51539
51540 #endif /* _ASM_GENERIC_PGTABLE_H */
51541 diff -urNp linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h
51542 --- linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h 2011-05-19 00:06:34.000000000 -0400
51543 +++ linux-2.6.39.4/include/asm-generic/pgtable-nopmd.h 2011-08-05 19:44:37.000000000 -0400
51544 @@ -1,14 +1,19 @@
51545 #ifndef _PGTABLE_NOPMD_H
51546 #define _PGTABLE_NOPMD_H
51547
51548 -#ifndef __ASSEMBLY__
51549 -
51550 #include <asm-generic/pgtable-nopud.h>
51551
51552 -struct mm_struct;
51553 -
51554 #define __PAGETABLE_PMD_FOLDED
51555
51556 +#define PMD_SHIFT PUD_SHIFT
51557 +#define PTRS_PER_PMD 1
51558 +#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
51559 +#define PMD_MASK (~(PMD_SIZE-1))
51560 +
51561 +#ifndef __ASSEMBLY__
51562 +
51563 +struct mm_struct;
51564 +
51565 /*
51566 * Having the pmd type consist of a pud gets the size right, and allows
51567 * us to conceptually access the pud entry that this pmd is folded into
51568 @@ -16,11 +21,6 @@ struct mm_struct;
51569 */
51570 typedef struct { pud_t pud; } pmd_t;
51571
51572 -#define PMD_SHIFT PUD_SHIFT
51573 -#define PTRS_PER_PMD 1
51574 -#define PMD_SIZE (1UL << PMD_SHIFT)
51575 -#define PMD_MASK (~(PMD_SIZE-1))
51576 -
51577 /*
51578 * The "pud_xxx()" functions here are trivial for a folded two-level
51579 * setup: the pmd is never bad, and a pmd always exists (as it's folded
51580 diff -urNp linux-2.6.39.4/include/asm-generic/pgtable-nopud.h linux-2.6.39.4/include/asm-generic/pgtable-nopud.h
51581 --- linux-2.6.39.4/include/asm-generic/pgtable-nopud.h 2011-05-19 00:06:34.000000000 -0400
51582 +++ linux-2.6.39.4/include/asm-generic/pgtable-nopud.h 2011-08-05 19:44:37.000000000 -0400
51583 @@ -1,10 +1,15 @@
51584 #ifndef _PGTABLE_NOPUD_H
51585 #define _PGTABLE_NOPUD_H
51586
51587 -#ifndef __ASSEMBLY__
51588 -
51589 #define __PAGETABLE_PUD_FOLDED
51590
51591 +#define PUD_SHIFT PGDIR_SHIFT
51592 +#define PTRS_PER_PUD 1
51593 +#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
51594 +#define PUD_MASK (~(PUD_SIZE-1))
51595 +
51596 +#ifndef __ASSEMBLY__
51597 +
51598 /*
51599 * Having the pud type consist of a pgd gets the size right, and allows
51600 * us to conceptually access the pgd entry that this pud is folded into
51601 @@ -12,11 +17,6 @@
51602 */
51603 typedef struct { pgd_t pgd; } pud_t;
51604
51605 -#define PUD_SHIFT PGDIR_SHIFT
51606 -#define PTRS_PER_PUD 1
51607 -#define PUD_SIZE (1UL << PUD_SHIFT)
51608 -#define PUD_MASK (~(PUD_SIZE-1))
51609 -
51610 /*
51611 * The "pgd_xxx()" functions here are trivial for a folded two-level
51612 * setup: the pud is never bad, and a pud always exists (as it's folded
51613 diff -urNp linux-2.6.39.4/include/asm-generic/vmlinux.lds.h linux-2.6.39.4/include/asm-generic/vmlinux.lds.h
51614 --- linux-2.6.39.4/include/asm-generic/vmlinux.lds.h 2011-05-19 00:06:34.000000000 -0400
51615 +++ linux-2.6.39.4/include/asm-generic/vmlinux.lds.h 2011-08-05 19:44:37.000000000 -0400
51616 @@ -213,6 +213,7 @@
51617 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
51618 VMLINUX_SYMBOL(__start_rodata) = .; \
51619 *(.rodata) *(.rodata.*) \
51620 + *(.data..read_only) \
51621 *(__vermagic) /* Kernel version magic */ \
51622 . = ALIGN(8); \
51623 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
51624 @@ -707,14 +708,15 @@
51625 * section in the linker script will go there too. @phdr should have
51626 * a leading colon.
51627 *
51628 - * Note that this macros defines __per_cpu_load as an absolute symbol.
51629 + * Note that this macros defines per_cpu_load as an absolute symbol.
51630 * If there is no need to put the percpu section at a predetermined
51631 * address, use PERCPU().
51632 */
51633 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
51634 - VMLINUX_SYMBOL(__per_cpu_load) = .; \
51635 - .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
51636 + per_cpu_load = .; \
51637 + .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
51638 - LOAD_OFFSET) { \
51639 + VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
51640 VMLINUX_SYMBOL(__per_cpu_start) = .; \
51641 *(.data..percpu..first) \
51642 . = ALIGN(PAGE_SIZE); \
51643 @@ -726,7 +728,7 @@
51644 *(.data..percpu..shared_aligned) \
51645 VMLINUX_SYMBOL(__per_cpu_end) = .; \
51646 } phdr \
51647 - . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
51648 + . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
51649
51650 /**
51651 * PERCPU - define output section for percpu area, simple version
51652 diff -urNp linux-2.6.39.4/include/drm/drm_crtc_helper.h linux-2.6.39.4/include/drm/drm_crtc_helper.h
51653 --- linux-2.6.39.4/include/drm/drm_crtc_helper.h 2011-05-19 00:06:34.000000000 -0400
51654 +++ linux-2.6.39.4/include/drm/drm_crtc_helper.h 2011-08-05 20:34:06.000000000 -0400
51655 @@ -74,7 +74,7 @@ struct drm_crtc_helper_funcs {
51656
51657 /* disable crtc when not in use - more explicit than dpms off */
51658 void (*disable)(struct drm_crtc *crtc);
51659 -};
51660 +} __no_const;
51661
51662 struct drm_encoder_helper_funcs {
51663 void (*dpms)(struct drm_encoder *encoder, int mode);
51664 @@ -95,7 +95,7 @@ struct drm_encoder_helper_funcs {
51665 struct drm_connector *connector);
51666 /* disable encoder when not in use - more explicit than dpms off */
51667 void (*disable)(struct drm_encoder *encoder);
51668 -};
51669 +} __no_const;
51670
51671 struct drm_connector_helper_funcs {
51672 int (*get_modes)(struct drm_connector *connector);
51673 diff -urNp linux-2.6.39.4/include/drm/drmP.h linux-2.6.39.4/include/drm/drmP.h
51674 --- linux-2.6.39.4/include/drm/drmP.h 2011-05-19 00:06:34.000000000 -0400
51675 +++ linux-2.6.39.4/include/drm/drmP.h 2011-08-05 20:34:06.000000000 -0400
51676 @@ -73,6 +73,7 @@
51677 #include <linux/workqueue.h>
51678 #include <linux/poll.h>
51679 #include <asm/pgalloc.h>
51680 +#include <asm/local.h>
51681 #include "drm.h"
51682
51683 #include <linux/idr.h>
51684 @@ -1023,7 +1024,7 @@ struct drm_device {
51685
51686 /** \name Usage Counters */
51687 /*@{ */
51688 - int open_count; /**< Outstanding files open */
51689 + local_t open_count; /**< Outstanding files open */
51690 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
51691 atomic_t vma_count; /**< Outstanding vma areas open */
51692 int buf_use; /**< Buffers in use -- cannot alloc */
51693 @@ -1034,7 +1035,7 @@ struct drm_device {
51694 /*@{ */
51695 unsigned long counters;
51696 enum drm_stat_type types[15];
51697 - atomic_t counts[15];
51698 + atomic_unchecked_t counts[15];
51699 /*@} */
51700
51701 struct list_head filelist;
51702 diff -urNp linux-2.6.39.4/include/drm/ttm/ttm_memory.h linux-2.6.39.4/include/drm/ttm/ttm_memory.h
51703 --- linux-2.6.39.4/include/drm/ttm/ttm_memory.h 2011-05-19 00:06:34.000000000 -0400
51704 +++ linux-2.6.39.4/include/drm/ttm/ttm_memory.h 2011-08-05 20:34:06.000000000 -0400
51705 @@ -47,7 +47,7 @@
51706
51707 struct ttm_mem_shrink {
51708 int (*do_shrink) (struct ttm_mem_shrink *);
51709 -};
51710 +} __no_const;
51711
51712 /**
51713 * struct ttm_mem_global - Global memory accounting structure.
51714 diff -urNp linux-2.6.39.4/include/linux/a.out.h linux-2.6.39.4/include/linux/a.out.h
51715 --- linux-2.6.39.4/include/linux/a.out.h 2011-05-19 00:06:34.000000000 -0400
51716 +++ linux-2.6.39.4/include/linux/a.out.h 2011-08-05 19:44:37.000000000 -0400
51717 @@ -39,6 +39,14 @@ enum machine_type {
51718 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
51719 };
51720
51721 +/* Constants for the N_FLAGS field */
51722 +#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
51723 +#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
51724 +#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
51725 +#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
51726 +/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
51727 +#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
51728 +
51729 #if !defined (N_MAGIC)
51730 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
51731 #endif
51732 diff -urNp linux-2.6.39.4/include/linux/atmdev.h linux-2.6.39.4/include/linux/atmdev.h
51733 --- linux-2.6.39.4/include/linux/atmdev.h 2011-05-19 00:06:34.000000000 -0400
51734 +++ linux-2.6.39.4/include/linux/atmdev.h 2011-08-05 19:44:37.000000000 -0400
51735 @@ -237,7 +237,7 @@ struct compat_atm_iobuf {
51736 #endif
51737
51738 struct k_atm_aal_stats {
51739 -#define __HANDLE_ITEM(i) atomic_t i
51740 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
51741 __AAL_STAT_ITEMS
51742 #undef __HANDLE_ITEM
51743 };
51744 diff -urNp linux-2.6.39.4/include/linux/binfmts.h linux-2.6.39.4/include/linux/binfmts.h
51745 --- linux-2.6.39.4/include/linux/binfmts.h 2011-05-19 00:06:34.000000000 -0400
51746 +++ linux-2.6.39.4/include/linux/binfmts.h 2011-08-05 19:44:37.000000000 -0400
51747 @@ -92,6 +92,7 @@ struct linux_binfmt {
51748 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
51749 int (*load_shlib)(struct file *);
51750 int (*core_dump)(struct coredump_params *cprm);
51751 + void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
51752 unsigned long min_coredump; /* minimal dump size */
51753 };
51754
51755 diff -urNp linux-2.6.39.4/include/linux/blkdev.h linux-2.6.39.4/include/linux/blkdev.h
51756 --- linux-2.6.39.4/include/linux/blkdev.h 2011-06-03 00:04:14.000000000 -0400
51757 +++ linux-2.6.39.4/include/linux/blkdev.h 2011-08-05 20:34:06.000000000 -0400
51758 @@ -1307,7 +1307,7 @@ struct block_device_operations {
51759 int (*getgeo)(struct block_device *, struct hd_geometry *);
51760 /* this callback is with swap_lock and sometimes page table lock held */
51761 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
51762 - struct module *owner;
51763 + struct module * const owner;
51764 };
51765
51766 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
51767 diff -urNp linux-2.6.39.4/include/linux/blktrace_api.h linux-2.6.39.4/include/linux/blktrace_api.h
51768 --- linux-2.6.39.4/include/linux/blktrace_api.h 2011-05-19 00:06:34.000000000 -0400
51769 +++ linux-2.6.39.4/include/linux/blktrace_api.h 2011-08-05 19:44:37.000000000 -0400
51770 @@ -161,7 +161,7 @@ struct blk_trace {
51771 struct dentry *dir;
51772 struct dentry *dropped_file;
51773 struct dentry *msg_file;
51774 - atomic_t dropped;
51775 + atomic_unchecked_t dropped;
51776 };
51777
51778 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
51779 diff -urNp linux-2.6.39.4/include/linux/byteorder/little_endian.h linux-2.6.39.4/include/linux/byteorder/little_endian.h
51780 --- linux-2.6.39.4/include/linux/byteorder/little_endian.h 2011-05-19 00:06:34.000000000 -0400
51781 +++ linux-2.6.39.4/include/linux/byteorder/little_endian.h 2011-08-05 19:44:37.000000000 -0400
51782 @@ -42,51 +42,51 @@
51783
51784 static inline __le64 __cpu_to_le64p(const __u64 *p)
51785 {
51786 - return (__force __le64)*p;
51787 + return (__force const __le64)*p;
51788 }
51789 static inline __u64 __le64_to_cpup(const __le64 *p)
51790 {
51791 - return (__force __u64)*p;
51792 + return (__force const __u64)*p;
51793 }
51794 static inline __le32 __cpu_to_le32p(const __u32 *p)
51795 {
51796 - return (__force __le32)*p;
51797 + return (__force const __le32)*p;
51798 }
51799 static inline __u32 __le32_to_cpup(const __le32 *p)
51800 {
51801 - return (__force __u32)*p;
51802 + return (__force const __u32)*p;
51803 }
51804 static inline __le16 __cpu_to_le16p(const __u16 *p)
51805 {
51806 - return (__force __le16)*p;
51807 + return (__force const __le16)*p;
51808 }
51809 static inline __u16 __le16_to_cpup(const __le16 *p)
51810 {
51811 - return (__force __u16)*p;
51812 + return (__force const __u16)*p;
51813 }
51814 static inline __be64 __cpu_to_be64p(const __u64 *p)
51815 {
51816 - return (__force __be64)__swab64p(p);
51817 + return (__force const __be64)__swab64p(p);
51818 }
51819 static inline __u64 __be64_to_cpup(const __be64 *p)
51820 {
51821 - return __swab64p((__u64 *)p);
51822 + return __swab64p((const __u64 *)p);
51823 }
51824 static inline __be32 __cpu_to_be32p(const __u32 *p)
51825 {
51826 - return (__force __be32)__swab32p(p);
51827 + return (__force const __be32)__swab32p(p);
51828 }
51829 static inline __u32 __be32_to_cpup(const __be32 *p)
51830 {
51831 - return __swab32p((__u32 *)p);
51832 + return __swab32p((const __u32 *)p);
51833 }
51834 static inline __be16 __cpu_to_be16p(const __u16 *p)
51835 {
51836 - return (__force __be16)__swab16p(p);
51837 + return (__force const __be16)__swab16p(p);
51838 }
51839 static inline __u16 __be16_to_cpup(const __be16 *p)
51840 {
51841 - return __swab16p((__u16 *)p);
51842 + return __swab16p((const __u16 *)p);
51843 }
51844 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
51845 #define __le64_to_cpus(x) do { (void)(x); } while (0)
51846 diff -urNp linux-2.6.39.4/include/linux/cache.h linux-2.6.39.4/include/linux/cache.h
51847 --- linux-2.6.39.4/include/linux/cache.h 2011-05-19 00:06:34.000000000 -0400
51848 +++ linux-2.6.39.4/include/linux/cache.h 2011-08-05 19:44:37.000000000 -0400
51849 @@ -16,6 +16,10 @@
51850 #define __read_mostly
51851 #endif
51852
51853 +#ifndef __read_only
51854 +#define __read_only __read_mostly
51855 +#endif
51856 +
51857 #ifndef ____cacheline_aligned
51858 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
51859 #endif
51860 diff -urNp linux-2.6.39.4/include/linux/capability.h linux-2.6.39.4/include/linux/capability.h
51861 --- linux-2.6.39.4/include/linux/capability.h 2011-05-19 00:06:34.000000000 -0400
51862 +++ linux-2.6.39.4/include/linux/capability.h 2011-08-05 19:44:37.000000000 -0400
51863 @@ -547,6 +547,9 @@ extern bool capable(int cap);
51864 extern bool ns_capable(struct user_namespace *ns, int cap);
51865 extern bool task_ns_capable(struct task_struct *t, int cap);
51866 extern bool nsown_capable(int cap);
51867 +extern bool task_ns_capable_nolog(struct task_struct *t, int cap);
51868 +extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
51869 +extern bool capable_nolog(int cap);
51870
51871 /* audit system wants to get cap info from files as well */
51872 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
51873 diff -urNp linux-2.6.39.4/include/linux/compiler-gcc4.h linux-2.6.39.4/include/linux/compiler-gcc4.h
51874 --- linux-2.6.39.4/include/linux/compiler-gcc4.h 2011-05-19 00:06:34.000000000 -0400
51875 +++ linux-2.6.39.4/include/linux/compiler-gcc4.h 2011-08-05 20:34:06.000000000 -0400
51876 @@ -31,6 +31,9 @@
51877
51878
51879 #if __GNUC_MINOR__ >= 5
51880 +
51881 +#define __no_const __attribute__((no_const))
51882 +
51883 /*
51884 * Mark a position in code as unreachable. This can be used to
51885 * suppress control flow warnings after asm blocks that transfer
51886 @@ -46,6 +49,11 @@
51887 #define __noclone __attribute__((__noclone__))
51888
51889 #endif
51890 +
51891 +#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
51892 +#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
51893 +#define __bos0(ptr) __bos((ptr), 0)
51894 +#define __bos1(ptr) __bos((ptr), 1)
51895 #endif
51896
51897 #if __GNUC_MINOR__ > 0
51898 diff -urNp linux-2.6.39.4/include/linux/compiler.h linux-2.6.39.4/include/linux/compiler.h
51899 --- linux-2.6.39.4/include/linux/compiler.h 2011-05-19 00:06:34.000000000 -0400
51900 +++ linux-2.6.39.4/include/linux/compiler.h 2011-08-05 20:34:06.000000000 -0400
51901 @@ -264,6 +264,10 @@ void ftrace_likely_update(struct ftrace_
51902 # define __attribute_const__ /* unimplemented */
51903 #endif
51904
51905 +#ifndef __no_const
51906 +# define __no_const
51907 +#endif
51908 +
51909 /*
51910 * Tell gcc if a function is cold. The compiler will assume any path
51911 * directly leading to the call is unlikely.
51912 @@ -273,6 +277,22 @@ void ftrace_likely_update(struct ftrace_
51913 #define __cold
51914 #endif
51915
51916 +#ifndef __alloc_size
51917 +#define __alloc_size(...)
51918 +#endif
51919 +
51920 +#ifndef __bos
51921 +#define __bos(ptr, arg)
51922 +#endif
51923 +
51924 +#ifndef __bos0
51925 +#define __bos0(ptr)
51926 +#endif
51927 +
51928 +#ifndef __bos1
51929 +#define __bos1(ptr)
51930 +#endif
51931 +
51932 /* Simple shorthand for a section definition */
51933 #ifndef __section
51934 # define __section(S) __attribute__ ((__section__(#S)))
51935 @@ -306,6 +326,7 @@ void ftrace_likely_update(struct ftrace_
51936 * use is to mediate communication between process-level code and irq/NMI
51937 * handlers, all running on the same CPU.
51938 */
51939 -#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
51940 +#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
51941 +#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
51942
51943 #endif /* __LINUX_COMPILER_H */
51944 diff -urNp linux-2.6.39.4/include/linux/cpuset.h linux-2.6.39.4/include/linux/cpuset.h
51945 --- linux-2.6.39.4/include/linux/cpuset.h 2011-05-19 00:06:34.000000000 -0400
51946 +++ linux-2.6.39.4/include/linux/cpuset.h 2011-08-05 19:44:37.000000000 -0400
51947 @@ -118,7 +118,7 @@ static inline void put_mems_allowed(void
51948 * nodemask.
51949 */
51950 smp_mb();
51951 - --ACCESS_ONCE(current->mems_allowed_change_disable);
51952 + --ACCESS_ONCE_RW(current->mems_allowed_change_disable);
51953 }
51954
51955 static inline void set_mems_allowed(nodemask_t nodemask)
51956 diff -urNp linux-2.6.39.4/include/linux/crypto.h linux-2.6.39.4/include/linux/crypto.h
51957 --- linux-2.6.39.4/include/linux/crypto.h 2011-05-19 00:06:34.000000000 -0400
51958 +++ linux-2.6.39.4/include/linux/crypto.h 2011-08-05 20:34:06.000000000 -0400
51959 @@ -361,7 +361,7 @@ struct cipher_tfm {
51960 const u8 *key, unsigned int keylen);
51961 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
51962 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
51963 -};
51964 +} __no_const;
51965
51966 struct hash_tfm {
51967 int (*init)(struct hash_desc *desc);
51968 @@ -382,13 +382,13 @@ struct compress_tfm {
51969 int (*cot_decompress)(struct crypto_tfm *tfm,
51970 const u8 *src, unsigned int slen,
51971 u8 *dst, unsigned int *dlen);
51972 -};
51973 +} __no_const;
51974
51975 struct rng_tfm {
51976 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
51977 unsigned int dlen);
51978 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
51979 -};
51980 +} __no_const;
51981
51982 #define crt_ablkcipher crt_u.ablkcipher
51983 #define crt_aead crt_u.aead
51984 diff -urNp linux-2.6.39.4/include/linux/decompress/mm.h linux-2.6.39.4/include/linux/decompress/mm.h
51985 --- linux-2.6.39.4/include/linux/decompress/mm.h 2011-05-19 00:06:34.000000000 -0400
51986 +++ linux-2.6.39.4/include/linux/decompress/mm.h 2011-08-05 19:44:37.000000000 -0400
51987 @@ -77,7 +77,7 @@ static void free(void *where)
51988 * warnings when not needed (indeed large_malloc / large_free are not
51989 * needed by inflate */
51990
51991 -#define malloc(a) kmalloc(a, GFP_KERNEL)
51992 +#define malloc(a) kmalloc((a), GFP_KERNEL)
51993 #define free(a) kfree(a)
51994
51995 #define large_malloc(a) vmalloc(a)
51996 diff -urNp linux-2.6.39.4/include/linux/dma-mapping.h linux-2.6.39.4/include/linux/dma-mapping.h
51997 --- linux-2.6.39.4/include/linux/dma-mapping.h 2011-05-19 00:06:34.000000000 -0400
51998 +++ linux-2.6.39.4/include/linux/dma-mapping.h 2011-08-05 20:34:06.000000000 -0400
51999 @@ -49,7 +49,7 @@ struct dma_map_ops {
52000 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
52001 int (*dma_supported)(struct device *dev, u64 mask);
52002 int (*set_dma_mask)(struct device *dev, u64 mask);
52003 - int is_phys;
52004 + const int is_phys;
52005 };
52006
52007 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
52008 diff -urNp linux-2.6.39.4/include/linux/efi.h linux-2.6.39.4/include/linux/efi.h
52009 --- linux-2.6.39.4/include/linux/efi.h 2011-06-25 12:55:23.000000000 -0400
52010 +++ linux-2.6.39.4/include/linux/efi.h 2011-08-05 20:34:06.000000000 -0400
52011 @@ -409,7 +409,7 @@ struct efivar_operations {
52012 efi_get_variable_t *get_variable;
52013 efi_get_next_variable_t *get_next_variable;
52014 efi_set_variable_t *set_variable;
52015 -};
52016 +} __no_const;
52017
52018 struct efivars {
52019 /*
52020 diff -urNp linux-2.6.39.4/include/linux/elf.h linux-2.6.39.4/include/linux/elf.h
52021 --- linux-2.6.39.4/include/linux/elf.h 2011-05-19 00:06:34.000000000 -0400
52022 +++ linux-2.6.39.4/include/linux/elf.h 2011-08-05 19:44:37.000000000 -0400
52023 @@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
52024 #define PT_GNU_EH_FRAME 0x6474e550
52025
52026 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
52027 +#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
52028 +
52029 +#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
52030 +
52031 +/* Constants for the e_flags field */
52032 +#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
52033 +#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
52034 +#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
52035 +#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
52036 +/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
52037 +#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
52038
52039 /*
52040 * Extended Numbering
52041 @@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword;
52042 #define DT_DEBUG 21
52043 #define DT_TEXTREL 22
52044 #define DT_JMPREL 23
52045 +#define DT_FLAGS 30
52046 + #define DF_TEXTREL 0x00000004
52047 #define DT_ENCODING 32
52048 #define OLD_DT_LOOS 0x60000000
52049 #define DT_LOOS 0x6000000d
52050 @@ -252,6 +265,19 @@ typedef struct elf64_hdr {
52051 #define PF_W 0x2
52052 #define PF_X 0x1
52053
52054 +#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
52055 +#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
52056 +#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
52057 +#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
52058 +#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
52059 +#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
52060 +/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
52061 +/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
52062 +#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
52063 +#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
52064 +#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
52065 +#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
52066 +
52067 typedef struct elf32_phdr{
52068 Elf32_Word p_type;
52069 Elf32_Off p_offset;
52070 @@ -344,6 +370,8 @@ typedef struct elf64_shdr {
52071 #define EI_OSABI 7
52072 #define EI_PAD 8
52073
52074 +#define EI_PAX 14
52075 +
52076 #define ELFMAG0 0x7f /* EI_MAG */
52077 #define ELFMAG1 'E'
52078 #define ELFMAG2 'L'
52079 @@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC [];
52080 #define elf_note elf32_note
52081 #define elf_addr_t Elf32_Off
52082 #define Elf_Half Elf32_Half
52083 +#define elf_dyn Elf32_Dyn
52084
52085 #else
52086
52087 @@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC [];
52088 #define elf_note elf64_note
52089 #define elf_addr_t Elf64_Off
52090 #define Elf_Half Elf64_Half
52091 +#define elf_dyn Elf64_Dyn
52092
52093 #endif
52094
52095 diff -urNp linux-2.6.39.4/include/linux/firewire.h linux-2.6.39.4/include/linux/firewire.h
52096 --- linux-2.6.39.4/include/linux/firewire.h 2011-05-19 00:06:34.000000000 -0400
52097 +++ linux-2.6.39.4/include/linux/firewire.h 2011-08-05 20:34:06.000000000 -0400
52098 @@ -429,7 +429,7 @@ struct fw_iso_context {
52099 union {
52100 fw_iso_callback_t sc;
52101 fw_iso_mc_callback_t mc;
52102 - } callback;
52103 + } __no_const callback;
52104 void *callback_data;
52105 };
52106
52107 diff -urNp linux-2.6.39.4/include/linux/fscache-cache.h linux-2.6.39.4/include/linux/fscache-cache.h
52108 --- linux-2.6.39.4/include/linux/fscache-cache.h 2011-05-19 00:06:34.000000000 -0400
52109 +++ linux-2.6.39.4/include/linux/fscache-cache.h 2011-08-05 19:44:37.000000000 -0400
52110 @@ -113,7 +113,7 @@ struct fscache_operation {
52111 #endif
52112 };
52113
52114 -extern atomic_t fscache_op_debug_id;
52115 +extern atomic_unchecked_t fscache_op_debug_id;
52116 extern void fscache_op_work_func(struct work_struct *work);
52117
52118 extern void fscache_enqueue_operation(struct fscache_operation *);
52119 @@ -133,7 +133,7 @@ static inline void fscache_operation_ini
52120 {
52121 INIT_WORK(&op->work, fscache_op_work_func);
52122 atomic_set(&op->usage, 1);
52123 - op->debug_id = atomic_inc_return(&fscache_op_debug_id);
52124 + op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
52125 op->processor = processor;
52126 op->release = release;
52127 INIT_LIST_HEAD(&op->pend_link);
52128 diff -urNp linux-2.6.39.4/include/linux/fs.h linux-2.6.39.4/include/linux/fs.h
52129 --- linux-2.6.39.4/include/linux/fs.h 2011-05-19 00:06:34.000000000 -0400
52130 +++ linux-2.6.39.4/include/linux/fs.h 2011-08-05 20:34:06.000000000 -0400
52131 @@ -108,6 +108,11 @@ struct inodes_stat_t {
52132 /* File was opened by fanotify and shouldn't generate fanotify events */
52133 #define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
52134
52135 +/* Hack for grsec so as not to require read permission simply to execute
52136 + * a binary
52137 + */
52138 +#define FMODE_GREXEC ((__force fmode_t)0x2000000)
52139 +
52140 /*
52141 * The below are the various read and write types that we support. Some of
52142 * them include behavioral modifiers that send information down to the
52143 @@ -1535,7 +1540,7 @@ struct block_device_operations;
52144 * the big kernel lock held in all filesystems.
52145 */
52146 struct file_operations {
52147 - struct module *owner;
52148 + struct module * const owner;
52149 loff_t (*llseek) (struct file *, loff_t, int);
52150 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
52151 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
52152 @@ -1563,6 +1568,7 @@ struct file_operations {
52153 long (*fallocate)(struct file *file, int mode, loff_t offset,
52154 loff_t len);
52155 };
52156 +typedef struct file_operations __no_const file_operations_no_const;
52157
52158 #define IPERM_FLAG_RCU 0x0001
52159
52160 diff -urNp linux-2.6.39.4/include/linux/fs_struct.h linux-2.6.39.4/include/linux/fs_struct.h
52161 --- linux-2.6.39.4/include/linux/fs_struct.h 2011-05-19 00:06:34.000000000 -0400
52162 +++ linux-2.6.39.4/include/linux/fs_struct.h 2011-08-05 19:44:37.000000000 -0400
52163 @@ -6,7 +6,7 @@
52164 #include <linux/seqlock.h>
52165
52166 struct fs_struct {
52167 - int users;
52168 + atomic_t users;
52169 spinlock_t lock;
52170 seqcount_t seq;
52171 int umask;
52172 diff -urNp linux-2.6.39.4/include/linux/ftrace_event.h linux-2.6.39.4/include/linux/ftrace_event.h
52173 --- linux-2.6.39.4/include/linux/ftrace_event.h 2011-05-19 00:06:34.000000000 -0400
52174 +++ linux-2.6.39.4/include/linux/ftrace_event.h 2011-08-05 20:34:06.000000000 -0400
52175 @@ -84,7 +84,7 @@ struct trace_event_functions {
52176 trace_print_func raw;
52177 trace_print_func hex;
52178 trace_print_func binary;
52179 -};
52180 +} __no_const;
52181
52182 struct trace_event {
52183 struct hlist_node node;
52184 @@ -235,7 +235,7 @@ extern int trace_define_field(struct ftr
52185 extern int trace_add_event_call(struct ftrace_event_call *call);
52186 extern void trace_remove_event_call(struct ftrace_event_call *call);
52187
52188 -#define is_signed_type(type) (((type)(-1)) < 0)
52189 +#define is_signed_type(type) (((type)(-1)) < (type)1)
52190
52191 int trace_set_clr_event(const char *system, const char *event, int set);
52192
52193 diff -urNp linux-2.6.39.4/include/linux/genhd.h linux-2.6.39.4/include/linux/genhd.h
52194 --- linux-2.6.39.4/include/linux/genhd.h 2011-06-03 00:04:14.000000000 -0400
52195 +++ linux-2.6.39.4/include/linux/genhd.h 2011-08-05 19:44:37.000000000 -0400
52196 @@ -184,7 +184,7 @@ struct gendisk {
52197 struct kobject *slave_dir;
52198
52199 struct timer_rand_state *random;
52200 - atomic_t sync_io; /* RAID */
52201 + atomic_unchecked_t sync_io; /* RAID */
52202 struct disk_events *ev;
52203 #ifdef CONFIG_BLK_DEV_INTEGRITY
52204 struct blk_integrity *integrity;
52205 diff -urNp linux-2.6.39.4/include/linux/gracl.h linux-2.6.39.4/include/linux/gracl.h
52206 --- linux-2.6.39.4/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
52207 +++ linux-2.6.39.4/include/linux/gracl.h 2011-08-05 19:44:37.000000000 -0400
52208 @@ -0,0 +1,317 @@
52209 +#ifndef GR_ACL_H
52210 +#define GR_ACL_H
52211 +
52212 +#include <linux/grdefs.h>
52213 +#include <linux/resource.h>
52214 +#include <linux/capability.h>
52215 +#include <linux/dcache.h>
52216 +#include <asm/resource.h>
52217 +
52218 +/* Major status information */
52219 +
52220 +#define GR_VERSION "grsecurity 2.2.2"
52221 +#define GRSECURITY_VERSION 0x2202
52222 +
52223 +enum {
52224 + GR_SHUTDOWN = 0,
52225 + GR_ENABLE = 1,
52226 + GR_SPROLE = 2,
52227 + GR_RELOAD = 3,
52228 + GR_SEGVMOD = 4,
52229 + GR_STATUS = 5,
52230 + GR_UNSPROLE = 6,
52231 + GR_PASSSET = 7,
52232 + GR_SPROLEPAM = 8,
52233 +};
52234 +
52235 +/* Password setup definitions
52236 + * kernel/grhash.c */
52237 +enum {
52238 + GR_PW_LEN = 128,
52239 + GR_SALT_LEN = 16,
52240 + GR_SHA_LEN = 32,
52241 +};
52242 +
52243 +enum {
52244 + GR_SPROLE_LEN = 64,
52245 +};
52246 +
52247 +enum {
52248 + GR_NO_GLOB = 0,
52249 + GR_REG_GLOB,
52250 + GR_CREATE_GLOB
52251 +};
52252 +
52253 +#define GR_NLIMITS 32
52254 +
52255 +/* Begin Data Structures */
52256 +
52257 +struct sprole_pw {
52258 + unsigned char *rolename;
52259 + unsigned char salt[GR_SALT_LEN];
52260 + unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
52261 +};
52262 +
52263 +struct name_entry {
52264 + __u32 key;
52265 + ino_t inode;
52266 + dev_t device;
52267 + char *name;
52268 + __u16 len;
52269 + __u8 deleted;
52270 + struct name_entry *prev;
52271 + struct name_entry *next;
52272 +};
52273 +
52274 +struct inodev_entry {
52275 + struct name_entry *nentry;
52276 + struct inodev_entry *prev;
52277 + struct inodev_entry *next;
52278 +};
52279 +
52280 +struct acl_role_db {
52281 + struct acl_role_label **r_hash;
52282 + __u32 r_size;
52283 +};
52284 +
52285 +struct inodev_db {
52286 + struct inodev_entry **i_hash;
52287 + __u32 i_size;
52288 +};
52289 +
52290 +struct name_db {
52291 + struct name_entry **n_hash;
52292 + __u32 n_size;
52293 +};
52294 +
52295 +struct crash_uid {
52296 + uid_t uid;
52297 + unsigned long expires;
52298 +};
52299 +
52300 +struct gr_hash_struct {
52301 + void **table;
52302 + void **nametable;
52303 + void *first;
52304 + __u32 table_size;
52305 + __u32 used_size;
52306 + int type;
52307 +};
52308 +
52309 +/* Userspace Grsecurity ACL data structures */
52310 +
52311 +struct acl_subject_label {
52312 + char *filename;
52313 + ino_t inode;
52314 + dev_t device;
52315 + __u32 mode;
52316 + kernel_cap_t cap_mask;
52317 + kernel_cap_t cap_lower;
52318 + kernel_cap_t cap_invert_audit;
52319 +
52320 + struct rlimit res[GR_NLIMITS];
52321 + __u32 resmask;
52322 +
52323 + __u8 user_trans_type;
52324 + __u8 group_trans_type;
52325 + uid_t *user_transitions;
52326 + gid_t *group_transitions;
52327 + __u16 user_trans_num;
52328 + __u16 group_trans_num;
52329 +
52330 + __u32 sock_families[2];
52331 + __u32 ip_proto[8];
52332 + __u32 ip_type;
52333 + struct acl_ip_label **ips;
52334 + __u32 ip_num;
52335 + __u32 inaddr_any_override;
52336 +
52337 + __u32 crashes;
52338 + unsigned long expires;
52339 +
52340 + struct acl_subject_label *parent_subject;
52341 + struct gr_hash_struct *hash;
52342 + struct acl_subject_label *prev;
52343 + struct acl_subject_label *next;
52344 +
52345 + struct acl_object_label **obj_hash;
52346 + __u32 obj_hash_size;
52347 + __u16 pax_flags;
52348 +};
52349 +
52350 +struct role_allowed_ip {
52351 + __u32 addr;
52352 + __u32 netmask;
52353 +
52354 + struct role_allowed_ip *prev;
52355 + struct role_allowed_ip *next;
52356 +};
52357 +
52358 +struct role_transition {
52359 + char *rolename;
52360 +
52361 + struct role_transition *prev;
52362 + struct role_transition *next;
52363 +};
52364 +
52365 +struct acl_role_label {
52366 + char *rolename;
52367 + uid_t uidgid;
52368 + __u16 roletype;
52369 +
52370 + __u16 auth_attempts;
52371 + unsigned long expires;
52372 +
52373 + struct acl_subject_label *root_label;
52374 + struct gr_hash_struct *hash;
52375 +
52376 + struct acl_role_label *prev;
52377 + struct acl_role_label *next;
52378 +
52379 + struct role_transition *transitions;
52380 + struct role_allowed_ip *allowed_ips;
52381 + uid_t *domain_children;
52382 + __u16 domain_child_num;
52383 +
52384 + struct acl_subject_label **subj_hash;
52385 + __u32 subj_hash_size;
52386 +};
52387 +
52388 +struct user_acl_role_db {
52389 + struct acl_role_label **r_table;
52390 + __u32 num_pointers; /* Number of allocations to track */
52391 + __u32 num_roles; /* Number of roles */
52392 + __u32 num_domain_children; /* Number of domain children */
52393 + __u32 num_subjects; /* Number of subjects */
52394 + __u32 num_objects; /* Number of objects */
52395 +};
52396 +
52397 +struct acl_object_label {
52398 + char *filename;
52399 + ino_t inode;
52400 + dev_t device;
52401 + __u32 mode;
52402 +
52403 + struct acl_subject_label *nested;
52404 + struct acl_object_label *globbed;
52405 +
52406 + /* next two structures not used */
52407 +
52408 + struct acl_object_label *prev;
52409 + struct acl_object_label *next;
52410 +};
52411 +
52412 +struct acl_ip_label {
52413 + char *iface;
52414 + __u32 addr;
52415 + __u32 netmask;
52416 + __u16 low, high;
52417 + __u8 mode;
52418 + __u32 type;
52419 + __u32 proto[8];
52420 +
52421 + /* next two structures not used */
52422 +
52423 + struct acl_ip_label *prev;
52424 + struct acl_ip_label *next;
52425 +};
52426 +
52427 +struct gr_arg {
52428 + struct user_acl_role_db role_db;
52429 + unsigned char pw[GR_PW_LEN];
52430 + unsigned char salt[GR_SALT_LEN];
52431 + unsigned char sum[GR_SHA_LEN];
52432 + unsigned char sp_role[GR_SPROLE_LEN];
52433 + struct sprole_pw *sprole_pws;
52434 + dev_t segv_device;
52435 + ino_t segv_inode;
52436 + uid_t segv_uid;
52437 + __u16 num_sprole_pws;
52438 + __u16 mode;
52439 +};
52440 +
52441 +struct gr_arg_wrapper {
52442 + struct gr_arg *arg;
52443 + __u32 version;
52444 + __u32 size;
52445 +};
52446 +
52447 +struct subject_map {
52448 + struct acl_subject_label *user;
52449 + struct acl_subject_label *kernel;
52450 + struct subject_map *prev;
52451 + struct subject_map *next;
52452 +};
52453 +
52454 +struct acl_subj_map_db {
52455 + struct subject_map **s_hash;
52456 + __u32 s_size;
52457 +};
52458 +
52459 +/* End Data Structures Section */
52460 +
52461 +/* Hash functions generated by empirical testing by Brad Spengler
52462 + Makes good use of the low bits of the inode. Generally 0-1 times
52463 + in loop for successful match. 0-3 for unsuccessful match.
52464 + Shift/add algorithm with modulus of table size and an XOR*/
52465 +
52466 +static __inline__ unsigned int
52467 +rhash(const uid_t uid, const __u16 type, const unsigned int sz)
52468 +{
52469 + return ((((uid + type) << (16 + type)) ^ uid) % sz);
52470 +}
52471 +
52472 + static __inline__ unsigned int
52473 +shash(const struct acl_subject_label *userp, const unsigned int sz)
52474 +{
52475 + return ((const unsigned long)userp % sz);
52476 +}
52477 +
52478 +static __inline__ unsigned int
52479 +fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
52480 +{
52481 + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
52482 +}
52483 +
52484 +static __inline__ unsigned int
52485 +nhash(const char *name, const __u16 len, const unsigned int sz)
52486 +{
52487 + return full_name_hash((const unsigned char *)name, len) % sz;
52488 +}
52489 +
52490 +#define FOR_EACH_ROLE_START(role) \
52491 + role = role_list; \
52492 + while (role) {
52493 +
52494 +#define FOR_EACH_ROLE_END(role) \
52495 + role = role->prev; \
52496 + }
52497 +
52498 +#define FOR_EACH_SUBJECT_START(role,subj,iter) \
52499 + subj = NULL; \
52500 + iter = 0; \
52501 + while (iter < role->subj_hash_size) { \
52502 + if (subj == NULL) \
52503 + subj = role->subj_hash[iter]; \
52504 + if (subj == NULL) { \
52505 + iter++; \
52506 + continue; \
52507 + }
52508 +
52509 +#define FOR_EACH_SUBJECT_END(subj,iter) \
52510 + subj = subj->next; \
52511 + if (subj == NULL) \
52512 + iter++; \
52513 + }
52514 +
52515 +
52516 +#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
52517 + subj = role->hash->first; \
52518 + while (subj != NULL) {
52519 +
52520 +#define FOR_EACH_NESTED_SUBJECT_END(subj) \
52521 + subj = subj->next; \
52522 + }
52523 +
52524 +#endif
52525 +
52526 diff -urNp linux-2.6.39.4/include/linux/gralloc.h linux-2.6.39.4/include/linux/gralloc.h
52527 --- linux-2.6.39.4/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
52528 +++ linux-2.6.39.4/include/linux/gralloc.h 2011-08-05 19:44:37.000000000 -0400
52529 @@ -0,0 +1,9 @@
52530 +#ifndef __GRALLOC_H
52531 +#define __GRALLOC_H
52532 +
52533 +void acl_free_all(void);
52534 +int acl_alloc_stack_init(unsigned long size);
52535 +void *acl_alloc(unsigned long len);
52536 +void *acl_alloc_num(unsigned long num, unsigned long len);
52537 +
52538 +#endif
52539 diff -urNp linux-2.6.39.4/include/linux/grdefs.h linux-2.6.39.4/include/linux/grdefs.h
52540 --- linux-2.6.39.4/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
52541 +++ linux-2.6.39.4/include/linux/grdefs.h 2011-08-05 19:44:37.000000000 -0400
52542 @@ -0,0 +1,140 @@
52543 +#ifndef GRDEFS_H
52544 +#define GRDEFS_H
52545 +
52546 +/* Begin grsecurity status declarations */
52547 +
52548 +enum {
52549 + GR_READY = 0x01,
52550 + GR_STATUS_INIT = 0x00 // disabled state
52551 +};
52552 +
52553 +/* Begin ACL declarations */
52554 +
52555 +/* Role flags */
52556 +
52557 +enum {
52558 + GR_ROLE_USER = 0x0001,
52559 + GR_ROLE_GROUP = 0x0002,
52560 + GR_ROLE_DEFAULT = 0x0004,
52561 + GR_ROLE_SPECIAL = 0x0008,
52562 + GR_ROLE_AUTH = 0x0010,
52563 + GR_ROLE_NOPW = 0x0020,
52564 + GR_ROLE_GOD = 0x0040,
52565 + GR_ROLE_LEARN = 0x0080,
52566 + GR_ROLE_TPE = 0x0100,
52567 + GR_ROLE_DOMAIN = 0x0200,
52568 + GR_ROLE_PAM = 0x0400,
52569 + GR_ROLE_PERSIST = 0x0800
52570 +};
52571 +
52572 +/* ACL Subject and Object mode flags */
52573 +enum {
52574 + GR_DELETED = 0x80000000
52575 +};
52576 +
52577 +/* ACL Object-only mode flags */
52578 +enum {
52579 + GR_READ = 0x00000001,
52580 + GR_APPEND = 0x00000002,
52581 + GR_WRITE = 0x00000004,
52582 + GR_EXEC = 0x00000008,
52583 + GR_FIND = 0x00000010,
52584 + GR_INHERIT = 0x00000020,
52585 + GR_SETID = 0x00000040,
52586 + GR_CREATE = 0x00000080,
52587 + GR_DELETE = 0x00000100,
52588 + GR_LINK = 0x00000200,
52589 + GR_AUDIT_READ = 0x00000400,
52590 + GR_AUDIT_APPEND = 0x00000800,
52591 + GR_AUDIT_WRITE = 0x00001000,
52592 + GR_AUDIT_EXEC = 0x00002000,
52593 + GR_AUDIT_FIND = 0x00004000,
52594 + GR_AUDIT_INHERIT= 0x00008000,
52595 + GR_AUDIT_SETID = 0x00010000,
52596 + GR_AUDIT_CREATE = 0x00020000,
52597 + GR_AUDIT_DELETE = 0x00040000,
52598 + GR_AUDIT_LINK = 0x00080000,
52599 + GR_PTRACERD = 0x00100000,
52600 + GR_NOPTRACE = 0x00200000,
52601 + GR_SUPPRESS = 0x00400000,
52602 + GR_NOLEARN = 0x00800000,
52603 + GR_INIT_TRANSFER= 0x01000000
52604 +};
52605 +
52606 +#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
52607 + GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
52608 + GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
52609 +
52610 +/* ACL subject-only mode flags */
52611 +enum {
52612 + GR_KILL = 0x00000001,
52613 + GR_VIEW = 0x00000002,
52614 + GR_PROTECTED = 0x00000004,
52615 + GR_LEARN = 0x00000008,
52616 + GR_OVERRIDE = 0x00000010,
52617 + /* just a placeholder, this mode is only used in userspace */
52618 + GR_DUMMY = 0x00000020,
52619 + GR_PROTSHM = 0x00000040,
52620 + GR_KILLPROC = 0x00000080,
52621 + GR_KILLIPPROC = 0x00000100,
52622 + /* just a placeholder, this mode is only used in userspace */
52623 + GR_NOTROJAN = 0x00000200,
52624 + GR_PROTPROCFD = 0x00000400,
52625 + GR_PROCACCT = 0x00000800,
52626 + GR_RELAXPTRACE = 0x00001000,
52627 + GR_NESTED = 0x00002000,
52628 + GR_INHERITLEARN = 0x00004000,
52629 + GR_PROCFIND = 0x00008000,
52630 + GR_POVERRIDE = 0x00010000,
52631 + GR_KERNELAUTH = 0x00020000,
52632 + GR_ATSECURE = 0x00040000,
52633 + GR_SHMEXEC = 0x00080000
52634 +};
52635 +
52636 +enum {
52637 + GR_PAX_ENABLE_SEGMEXEC = 0x0001,
52638 + GR_PAX_ENABLE_PAGEEXEC = 0x0002,
52639 + GR_PAX_ENABLE_MPROTECT = 0x0004,
52640 + GR_PAX_ENABLE_RANDMMAP = 0x0008,
52641 + GR_PAX_ENABLE_EMUTRAMP = 0x0010,
52642 + GR_PAX_DISABLE_SEGMEXEC = 0x0100,
52643 + GR_PAX_DISABLE_PAGEEXEC = 0x0200,
52644 + GR_PAX_DISABLE_MPROTECT = 0x0400,
52645 + GR_PAX_DISABLE_RANDMMAP = 0x0800,
52646 + GR_PAX_DISABLE_EMUTRAMP = 0x1000,
52647 +};
52648 +
52649 +enum {
52650 + GR_ID_USER = 0x01,
52651 + GR_ID_GROUP = 0x02,
52652 +};
52653 +
52654 +enum {
52655 + GR_ID_ALLOW = 0x01,
52656 + GR_ID_DENY = 0x02,
52657 +};
52658 +
52659 +#define GR_CRASH_RES 31
52660 +#define GR_UIDTABLE_MAX 500
52661 +
52662 +/* begin resource learning section */
52663 +enum {
52664 + GR_RLIM_CPU_BUMP = 60,
52665 + GR_RLIM_FSIZE_BUMP = 50000,
52666 + GR_RLIM_DATA_BUMP = 10000,
52667 + GR_RLIM_STACK_BUMP = 1000,
52668 + GR_RLIM_CORE_BUMP = 10000,
52669 + GR_RLIM_RSS_BUMP = 500000,
52670 + GR_RLIM_NPROC_BUMP = 1,
52671 + GR_RLIM_NOFILE_BUMP = 5,
52672 + GR_RLIM_MEMLOCK_BUMP = 50000,
52673 + GR_RLIM_AS_BUMP = 500000,
52674 + GR_RLIM_LOCKS_BUMP = 2,
52675 + GR_RLIM_SIGPENDING_BUMP = 5,
52676 + GR_RLIM_MSGQUEUE_BUMP = 10000,
52677 + GR_RLIM_NICE_BUMP = 1,
52678 + GR_RLIM_RTPRIO_BUMP = 1,
52679 + GR_RLIM_RTTIME_BUMP = 1000000
52680 +};
52681 +
52682 +#endif
52683 diff -urNp linux-2.6.39.4/include/linux/grinternal.h linux-2.6.39.4/include/linux/grinternal.h
52684 --- linux-2.6.39.4/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
52685 +++ linux-2.6.39.4/include/linux/grinternal.h 2011-08-05 19:44:37.000000000 -0400
52686 @@ -0,0 +1,219 @@
52687 +#ifndef __GRINTERNAL_H
52688 +#define __GRINTERNAL_H
52689 +
52690 +#ifdef CONFIG_GRKERNSEC
52691 +
52692 +#include <linux/fs.h>
52693 +#include <linux/mnt_namespace.h>
52694 +#include <linux/nsproxy.h>
52695 +#include <linux/gracl.h>
52696 +#include <linux/grdefs.h>
52697 +#include <linux/grmsg.h>
52698 +
52699 +void gr_add_learn_entry(const char *fmt, ...)
52700 + __attribute__ ((format (printf, 1, 2)));
52701 +__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
52702 + const struct vfsmount *mnt);
52703 +__u32 gr_check_create(const struct dentry *new_dentry,
52704 + const struct dentry *parent,
52705 + const struct vfsmount *mnt, const __u32 mode);
52706 +int gr_check_protected_task(const struct task_struct *task);
52707 +__u32 to_gr_audit(const __u32 reqmode);
52708 +int gr_set_acls(const int type);
52709 +int gr_apply_subject_to_task(struct task_struct *task);
52710 +int gr_acl_is_enabled(void);
52711 +char gr_roletype_to_char(void);
52712 +
52713 +void gr_handle_alertkill(struct task_struct *task);
52714 +char *gr_to_filename(const struct dentry *dentry,
52715 + const struct vfsmount *mnt);
52716 +char *gr_to_filename1(const struct dentry *dentry,
52717 + const struct vfsmount *mnt);
52718 +char *gr_to_filename2(const struct dentry *dentry,
52719 + const struct vfsmount *mnt);
52720 +char *gr_to_filename3(const struct dentry *dentry,
52721 + const struct vfsmount *mnt);
52722 +
52723 +extern int grsec_enable_harden_ptrace;
52724 +extern int grsec_enable_link;
52725 +extern int grsec_enable_fifo;
52726 +extern int grsec_enable_execve;
52727 +extern int grsec_enable_shm;
52728 +extern int grsec_enable_execlog;
52729 +extern int grsec_enable_signal;
52730 +extern int grsec_enable_audit_ptrace;
52731 +extern int grsec_enable_forkfail;
52732 +extern int grsec_enable_time;
52733 +extern int grsec_enable_rofs;
52734 +extern int grsec_enable_chroot_shmat;
52735 +extern int grsec_enable_chroot_mount;
52736 +extern int grsec_enable_chroot_double;
52737 +extern int grsec_enable_chroot_pivot;
52738 +extern int grsec_enable_chroot_chdir;
52739 +extern int grsec_enable_chroot_chmod;
52740 +extern int grsec_enable_chroot_mknod;
52741 +extern int grsec_enable_chroot_fchdir;
52742 +extern int grsec_enable_chroot_nice;
52743 +extern int grsec_enable_chroot_execlog;
52744 +extern int grsec_enable_chroot_caps;
52745 +extern int grsec_enable_chroot_sysctl;
52746 +extern int grsec_enable_chroot_unix;
52747 +extern int grsec_enable_tpe;
52748 +extern int grsec_tpe_gid;
52749 +extern int grsec_enable_tpe_all;
52750 +extern int grsec_enable_tpe_invert;
52751 +extern int grsec_enable_socket_all;
52752 +extern int grsec_socket_all_gid;
52753 +extern int grsec_enable_socket_client;
52754 +extern int grsec_socket_client_gid;
52755 +extern int grsec_enable_socket_server;
52756 +extern int grsec_socket_server_gid;
52757 +extern int grsec_audit_gid;
52758 +extern int grsec_enable_group;
52759 +extern int grsec_enable_audit_textrel;
52760 +extern int grsec_enable_log_rwxmaps;
52761 +extern int grsec_enable_mount;
52762 +extern int grsec_enable_chdir;
52763 +extern int grsec_resource_logging;
52764 +extern int grsec_enable_blackhole;
52765 +extern int grsec_lastack_retries;
52766 +extern int grsec_enable_brute;
52767 +extern int grsec_lock;
52768 +
52769 +extern spinlock_t grsec_alert_lock;
52770 +extern unsigned long grsec_alert_wtime;
52771 +extern unsigned long grsec_alert_fyet;
52772 +
52773 +extern spinlock_t grsec_audit_lock;
52774 +
52775 +extern rwlock_t grsec_exec_file_lock;
52776 +
52777 +#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
52778 + gr_to_filename2((tsk)->exec_file->f_path.dentry, \
52779 + (tsk)->exec_file->f_vfsmnt) : "/")
52780 +
52781 +#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
52782 + gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
52783 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
52784 +
52785 +#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
52786 + gr_to_filename((tsk)->exec_file->f_path.dentry, \
52787 + (tsk)->exec_file->f_vfsmnt) : "/")
52788 +
52789 +#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
52790 + gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
52791 + (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
52792 +
52793 +#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
52794 +
52795 +#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
52796 +
52797 +#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
52798 + (task)->pid, (cred)->uid, \
52799 + (cred)->euid, (cred)->gid, (cred)->egid, \
52800 + gr_parent_task_fullpath(task), \
52801 + (task)->real_parent->comm, (task)->real_parent->pid, \
52802 + (pcred)->uid, (pcred)->euid, \
52803 + (pcred)->gid, (pcred)->egid
52804 +
52805 +#define GR_CHROOT_CAPS {{ \
52806 + CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
52807 + CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
52808 + CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
52809 + CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
52810 + CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
52811 + CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
52812 +
52813 +#define security_learn(normal_msg,args...) \
52814 +({ \
52815 + read_lock(&grsec_exec_file_lock); \
52816 + gr_add_learn_entry(normal_msg "\n", ## args); \
52817 + read_unlock(&grsec_exec_file_lock); \
52818 +})
52819 +
52820 +enum {
52821 + GR_DO_AUDIT,
52822 + GR_DONT_AUDIT,
52823 + /* used for non-audit messages that we shouldn't kill the task on */
52824 + GR_DONT_AUDIT_GOOD
52825 +};
52826 +
52827 +enum {
52828 + GR_TTYSNIFF,
52829 + GR_RBAC,
52830 + GR_RBAC_STR,
52831 + GR_STR_RBAC,
52832 + GR_RBAC_MODE2,
52833 + GR_RBAC_MODE3,
52834 + GR_FILENAME,
52835 + GR_SYSCTL_HIDDEN,
52836 + GR_NOARGS,
52837 + GR_ONE_INT,
52838 + GR_ONE_INT_TWO_STR,
52839 + GR_ONE_STR,
52840 + GR_STR_INT,
52841 + GR_TWO_STR_INT,
52842 + GR_TWO_INT,
52843 + GR_TWO_U64,
52844 + GR_THREE_INT,
52845 + GR_FIVE_INT_TWO_STR,
52846 + GR_TWO_STR,
52847 + GR_THREE_STR,
52848 + GR_FOUR_STR,
52849 + GR_STR_FILENAME,
52850 + GR_FILENAME_STR,
52851 + GR_FILENAME_TWO_INT,
52852 + GR_FILENAME_TWO_INT_STR,
52853 + GR_TEXTREL,
52854 + GR_PTRACE,
52855 + GR_RESOURCE,
52856 + GR_CAP,
52857 + GR_SIG,
52858 + GR_SIG2,
52859 + GR_CRASH1,
52860 + GR_CRASH2,
52861 + GR_PSACCT,
52862 + GR_RWXMAP
52863 +};
52864 +
52865 +#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
52866 +#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
52867 +#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
52868 +#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
52869 +#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
52870 +#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
52871 +#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
52872 +#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
52873 +#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
52874 +#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
52875 +#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
52876 +#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
52877 +#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
52878 +#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
52879 +#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
52880 +#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
52881 +#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
52882 +#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
52883 +#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
52884 +#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
52885 +#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
52886 +#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
52887 +#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
52888 +#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
52889 +#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
52890 +#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
52891 +#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
52892 +#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
52893 +#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
52894 +#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
52895 +#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
52896 +#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
52897 +#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
52898 +#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
52899 +#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
52900 +
52901 +void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
52902 +
52903 +#endif
52904 +
52905 +#endif
52906 diff -urNp linux-2.6.39.4/include/linux/grmsg.h linux-2.6.39.4/include/linux/grmsg.h
52907 --- linux-2.6.39.4/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
52908 +++ linux-2.6.39.4/include/linux/grmsg.h 2011-08-05 19:44:37.000000000 -0400
52909 @@ -0,0 +1,108 @@
52910 +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
52911 +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
52912 +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
52913 +#define GR_STOPMOD_MSG "denied modification of module state by "
52914 +#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
52915 +#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
52916 +#define GR_IOPERM_MSG "denied use of ioperm() by "
52917 +#define GR_IOPL_MSG "denied use of iopl() by "
52918 +#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
52919 +#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
52920 +#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
52921 +#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
52922 +#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
52923 +#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
52924 +#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
52925 +#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
52926 +#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
52927 +#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
52928 +#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
52929 +#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
52930 +#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
52931 +#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
52932 +#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
52933 +#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
52934 +#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
52935 +#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
52936 +#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
52937 +#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
52938 +#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
52939 +#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
52940 +#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
52941 +#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
52942 +#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
52943 +#define GR_NPROC_MSG "denied overstep of process limit by "
52944 +#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
52945 +#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
52946 +#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
52947 +#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
52948 +#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
52949 +#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
52950 +#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
52951 +#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
52952 +#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
52953 +#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
52954 +#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
52955 +#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
52956 +#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
52957 +#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
52958 +#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
52959 +#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
52960 +#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
52961 +#define GR_INITF_ACL_MSG "init_variables() failed %s by "
52962 +#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
52963 +#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
52964 +#define GR_SHUTS_ACL_MSG "shutdown auth success for "
52965 +#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
52966 +#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
52967 +#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
52968 +#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
52969 +#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
52970 +#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
52971 +#define GR_ENABLEF_ACL_MSG "unable to load %s for "
52972 +#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
52973 +#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
52974 +#define GR_RELOADF_ACL_MSG "failed reload of %s for "
52975 +#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
52976 +#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
52977 +#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
52978 +#define GR_SPROLEF_ACL_MSG "special role %s failure for "
52979 +#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
52980 +#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
52981 +#define GR_INVMODE_ACL_MSG "invalid mode %d by "
52982 +#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
52983 +#define GR_FAILFORK_MSG "failed fork with errno %s by "
52984 +#define GR_NICE_CHROOT_MSG "denied priority change by "
52985 +#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
52986 +#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
52987 +#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
52988 +#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
52989 +#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
52990 +#define GR_TIME_MSG "time set by "
52991 +#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
52992 +#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
52993 +#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
52994 +#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
52995 +#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
52996 +#define GR_BIND_MSG "denied bind() by "
52997 +#define GR_CONNECT_MSG "denied connect() by "
52998 +#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
52999 +#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
53000 +#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
53001 +#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
53002 +#define GR_CAP_ACL_MSG "use of %s denied for "
53003 +#define GR_CAP_ACL_MSG2 "use of %s permitted for "
53004 +#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
53005 +#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
53006 +#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
53007 +#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
53008 +#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
53009 +#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
53010 +#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
53011 +#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
53012 +#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
53013 +#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
53014 +#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
53015 +#define GR_VM86_MSG "denied use of vm86 by "
53016 +#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
53017 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
53018 diff -urNp linux-2.6.39.4/include/linux/grsecurity.h linux-2.6.39.4/include/linux/grsecurity.h
53019 --- linux-2.6.39.4/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
53020 +++ linux-2.6.39.4/include/linux/grsecurity.h 2011-08-05 19:54:17.000000000 -0400
53021 @@ -0,0 +1,218 @@
53022 +#ifndef GR_SECURITY_H
53023 +#define GR_SECURITY_H
53024 +#include <linux/fs.h>
53025 +#include <linux/fs_struct.h>
53026 +#include <linux/binfmts.h>
53027 +#include <linux/gracl.h>
53028 +#include <linux/compat.h>
53029 +
53030 +/* notify of brain-dead configs */
53031 +#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53032 +#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
53033 +#endif
53034 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
53035 +#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
53036 +#endif
53037 +#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
53038 +#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
53039 +#endif
53040 +#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
53041 +#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
53042 +#endif
53043 +#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
53044 +#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
53045 +#endif
53046 +#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
53047 +#error "CONFIG_PAX enabled, but no PaX options are enabled."
53048 +#endif
53049 +
53050 +void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
53051 +void gr_handle_brute_check(void);
53052 +void gr_handle_kernel_exploit(void);
53053 +int gr_process_user_ban(void);
53054 +
53055 +char gr_roletype_to_char(void);
53056 +
53057 +int gr_acl_enable_at_secure(void);
53058 +
53059 +int gr_check_user_change(int real, int effective, int fs);
53060 +int gr_check_group_change(int real, int effective, int fs);
53061 +
53062 +void gr_del_task_from_ip_table(struct task_struct *p);
53063 +
53064 +int gr_pid_is_chrooted(struct task_struct *p);
53065 +int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
53066 +int gr_handle_chroot_nice(void);
53067 +int gr_handle_chroot_sysctl(const int op);
53068 +int gr_handle_chroot_setpriority(struct task_struct *p,
53069 + const int niceval);
53070 +int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
53071 +int gr_handle_chroot_chroot(const struct dentry *dentry,
53072 + const struct vfsmount *mnt);
53073 +int gr_handle_chroot_caps(struct path *path);
53074 +void gr_handle_chroot_chdir(struct path *path);
53075 +int gr_handle_chroot_chmod(const struct dentry *dentry,
53076 + const struct vfsmount *mnt, const int mode);
53077 +int gr_handle_chroot_mknod(const struct dentry *dentry,
53078 + const struct vfsmount *mnt, const int mode);
53079 +int gr_handle_chroot_mount(const struct dentry *dentry,
53080 + const struct vfsmount *mnt,
53081 + const char *dev_name);
53082 +int gr_handle_chroot_pivot(void);
53083 +int gr_handle_chroot_unix(const pid_t pid);
53084 +
53085 +int gr_handle_rawio(const struct inode *inode);
53086 +int gr_handle_nproc(void);
53087 +
53088 +void gr_handle_ioperm(void);
53089 +void gr_handle_iopl(void);
53090 +
53091 +int gr_tpe_allow(const struct file *file);
53092 +
53093 +void gr_set_chroot_entries(struct task_struct *task, struct path *path);
53094 +void gr_clear_chroot_entries(struct task_struct *task);
53095 +
53096 +void gr_log_forkfail(const int retval);
53097 +void gr_log_timechange(void);
53098 +void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
53099 +void gr_log_chdir(const struct dentry *dentry,
53100 + const struct vfsmount *mnt);
53101 +void gr_log_chroot_exec(const struct dentry *dentry,
53102 + const struct vfsmount *mnt);
53103 +void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
53104 +#ifdef CONFIG_COMPAT
53105 +void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
53106 +#endif
53107 +void gr_log_remount(const char *devname, const int retval);
53108 +void gr_log_unmount(const char *devname, const int retval);
53109 +void gr_log_mount(const char *from, const char *to, const int retval);
53110 +void gr_log_textrel(struct vm_area_struct *vma);
53111 +void gr_log_rwxmmap(struct file *file);
53112 +void gr_log_rwxmprotect(struct file *file);
53113 +
53114 +int gr_handle_follow_link(const struct inode *parent,
53115 + const struct inode *inode,
53116 + const struct dentry *dentry,
53117 + const struct vfsmount *mnt);
53118 +int gr_handle_fifo(const struct dentry *dentry,
53119 + const struct vfsmount *mnt,
53120 + const struct dentry *dir, const int flag,
53121 + const int acc_mode);
53122 +int gr_handle_hardlink(const struct dentry *dentry,
53123 + const struct vfsmount *mnt,
53124 + struct inode *inode,
53125 + const int mode, const char *to);
53126 +
53127 +int gr_is_capable(const int cap);
53128 +int gr_is_capable_nolog(const int cap);
53129 +void gr_learn_resource(const struct task_struct *task, const int limit,
53130 + const unsigned long wanted, const int gt);
53131 +void gr_copy_label(struct task_struct *tsk);
53132 +void gr_handle_crash(struct task_struct *task, const int sig);
53133 +int gr_handle_signal(const struct task_struct *p, const int sig);
53134 +int gr_check_crash_uid(const uid_t uid);
53135 +int gr_check_protected_task(const struct task_struct *task);
53136 +int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
53137 +int gr_acl_handle_mmap(const struct file *file,
53138 + const unsigned long prot);
53139 +int gr_acl_handle_mprotect(const struct file *file,
53140 + const unsigned long prot);
53141 +int gr_check_hidden_task(const struct task_struct *tsk);
53142 +__u32 gr_acl_handle_truncate(const struct dentry *dentry,
53143 + const struct vfsmount *mnt);
53144 +__u32 gr_acl_handle_utime(const struct dentry *dentry,
53145 + const struct vfsmount *mnt);
53146 +__u32 gr_acl_handle_access(const struct dentry *dentry,
53147 + const struct vfsmount *mnt, const int fmode);
53148 +__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
53149 + const struct vfsmount *mnt, mode_t mode);
53150 +__u32 gr_acl_handle_chmod(const struct dentry *dentry,
53151 + const struct vfsmount *mnt, mode_t mode);
53152 +__u32 gr_acl_handle_chown(const struct dentry *dentry,
53153 + const struct vfsmount *mnt);
53154 +__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
53155 + const struct vfsmount *mnt);
53156 +int gr_handle_ptrace(struct task_struct *task, const long request);
53157 +int gr_handle_proc_ptrace(struct task_struct *task);
53158 +__u32 gr_acl_handle_execve(const struct dentry *dentry,
53159 + const struct vfsmount *mnt);
53160 +int gr_check_crash_exec(const struct file *filp);
53161 +int gr_acl_is_enabled(void);
53162 +void gr_set_kernel_label(struct task_struct *task);
53163 +void gr_set_role_label(struct task_struct *task, const uid_t uid,
53164 + const gid_t gid);
53165 +int gr_set_proc_label(const struct dentry *dentry,
53166 + const struct vfsmount *mnt,
53167 + const int unsafe_share);
53168 +__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
53169 + const struct vfsmount *mnt);
53170 +__u32 gr_acl_handle_open(const struct dentry *dentry,
53171 + const struct vfsmount *mnt, const int fmode);
53172 +__u32 gr_acl_handle_creat(const struct dentry *dentry,
53173 + const struct dentry *p_dentry,
53174 + const struct vfsmount *p_mnt, const int fmode,
53175 + const int imode);
53176 +void gr_handle_create(const struct dentry *dentry,
53177 + const struct vfsmount *mnt);
53178 +__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
53179 + const struct dentry *parent_dentry,
53180 + const struct vfsmount *parent_mnt,
53181 + const int mode);
53182 +__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
53183 + const struct dentry *parent_dentry,
53184 + const struct vfsmount *parent_mnt);
53185 +__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
53186 + const struct vfsmount *mnt);
53187 +void gr_handle_delete(const ino_t ino, const dev_t dev);
53188 +__u32 gr_acl_handle_unlink(const struct dentry *dentry,
53189 + const struct vfsmount *mnt);
53190 +__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
53191 + const struct dentry *parent_dentry,
53192 + const struct vfsmount *parent_mnt,
53193 + const char *from);
53194 +__u32 gr_acl_handle_link(const struct dentry *new_dentry,
53195 + const struct dentry *parent_dentry,
53196 + const struct vfsmount *parent_mnt,
53197 + const struct dentry *old_dentry,
53198 + const struct vfsmount *old_mnt, const char *to);
53199 +int gr_acl_handle_rename(struct dentry *new_dentry,
53200 + struct dentry *parent_dentry,
53201 + const struct vfsmount *parent_mnt,
53202 + struct dentry *old_dentry,
53203 + struct inode *old_parent_inode,
53204 + struct vfsmount *old_mnt, const char *newname);
53205 +void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53206 + struct dentry *old_dentry,
53207 + struct dentry *new_dentry,
53208 + struct vfsmount *mnt, const __u8 replace);
53209 +__u32 gr_check_link(const struct dentry *new_dentry,
53210 + const struct dentry *parent_dentry,
53211 + const struct vfsmount *parent_mnt,
53212 + const struct dentry *old_dentry,
53213 + const struct vfsmount *old_mnt);
53214 +int gr_acl_handle_filldir(const struct file *file, const char *name,
53215 + const unsigned int namelen, const ino_t ino);
53216 +
53217 +__u32 gr_acl_handle_unix(const struct dentry *dentry,
53218 + const struct vfsmount *mnt);
53219 +void gr_acl_handle_exit(void);
53220 +void gr_acl_handle_psacct(struct task_struct *task, const long code);
53221 +int gr_acl_handle_procpidmem(const struct task_struct *task);
53222 +int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
53223 +int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
53224 +void gr_audit_ptrace(struct task_struct *task);
53225 +dev_t gr_get_dev_from_dentry(struct dentry *dentry);
53226 +
53227 +#ifdef CONFIG_GRKERNSEC
53228 +void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
53229 +void gr_handle_vm86(void);
53230 +void gr_handle_mem_readwrite(u64 from, u64 to);
53231 +
53232 +extern int grsec_enable_dmesg;
53233 +extern int grsec_disable_privio;
53234 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53235 +extern int grsec_enable_chroot_findtask;
53236 +#endif
53237 +#endif
53238 +
53239 +#endif
53240 diff -urNp linux-2.6.39.4/include/linux/grsock.h linux-2.6.39.4/include/linux/grsock.h
53241 --- linux-2.6.39.4/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500
53242 +++ linux-2.6.39.4/include/linux/grsock.h 2011-08-05 19:44:37.000000000 -0400
53243 @@ -0,0 +1,19 @@
53244 +#ifndef __GRSOCK_H
53245 +#define __GRSOCK_H
53246 +
53247 +extern void gr_attach_curr_ip(const struct sock *sk);
53248 +extern int gr_handle_sock_all(const int family, const int type,
53249 + const int protocol);
53250 +extern int gr_handle_sock_server(const struct sockaddr *sck);
53251 +extern int gr_handle_sock_server_other(const struct sock *sck);
53252 +extern int gr_handle_sock_client(const struct sockaddr *sck);
53253 +extern int gr_search_connect(struct socket * sock,
53254 + struct sockaddr_in * addr);
53255 +extern int gr_search_bind(struct socket * sock,
53256 + struct sockaddr_in * addr);
53257 +extern int gr_search_listen(struct socket * sock);
53258 +extern int gr_search_accept(struct socket * sock);
53259 +extern int gr_search_socket(const int domain, const int type,
53260 + const int protocol);
53261 +
53262 +#endif
53263 diff -urNp linux-2.6.39.4/include/linux/highmem.h linux-2.6.39.4/include/linux/highmem.h
53264 --- linux-2.6.39.4/include/linux/highmem.h 2011-05-19 00:06:34.000000000 -0400
53265 +++ linux-2.6.39.4/include/linux/highmem.h 2011-08-05 19:44:37.000000000 -0400
53266 @@ -185,6 +185,18 @@ static inline void clear_highpage(struct
53267 kunmap_atomic(kaddr, KM_USER0);
53268 }
53269
53270 +static inline void sanitize_highpage(struct page *page)
53271 +{
53272 + void *kaddr;
53273 + unsigned long flags;
53274 +
53275 + local_irq_save(flags);
53276 + kaddr = kmap_atomic(page, KM_CLEARPAGE);
53277 + clear_page(kaddr);
53278 + kunmap_atomic(kaddr, KM_CLEARPAGE);
53279 + local_irq_restore(flags);
53280 +}
53281 +
53282 static inline void zero_user_segments(struct page *page,
53283 unsigned start1, unsigned end1,
53284 unsigned start2, unsigned end2)
53285 diff -urNp linux-2.6.39.4/include/linux/i2c.h linux-2.6.39.4/include/linux/i2c.h
53286 --- linux-2.6.39.4/include/linux/i2c.h 2011-05-19 00:06:34.000000000 -0400
53287 +++ linux-2.6.39.4/include/linux/i2c.h 2011-08-05 20:34:06.000000000 -0400
53288 @@ -346,6 +346,7 @@ struct i2c_algorithm {
53289 /* To determine what the adapter supports */
53290 u32 (*functionality) (struct i2c_adapter *);
53291 };
53292 +typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
53293
53294 /*
53295 * i2c_adapter is the structure used to identify a physical i2c bus along
53296 diff -urNp linux-2.6.39.4/include/linux/i2o.h linux-2.6.39.4/include/linux/i2o.h
53297 --- linux-2.6.39.4/include/linux/i2o.h 2011-05-19 00:06:34.000000000 -0400
53298 +++ linux-2.6.39.4/include/linux/i2o.h 2011-08-05 19:44:37.000000000 -0400
53299 @@ -564,7 +564,7 @@ struct i2o_controller {
53300 struct i2o_device *exec; /* Executive */
53301 #if BITS_PER_LONG == 64
53302 spinlock_t context_list_lock; /* lock for context_list */
53303 - atomic_t context_list_counter; /* needed for unique contexts */
53304 + atomic_unchecked_t context_list_counter; /* needed for unique contexts */
53305 struct list_head context_list; /* list of context id's
53306 and pointers */
53307 #endif
53308 diff -urNp linux-2.6.39.4/include/linux/init.h linux-2.6.39.4/include/linux/init.h
53309 --- linux-2.6.39.4/include/linux/init.h 2011-05-19 00:06:34.000000000 -0400
53310 +++ linux-2.6.39.4/include/linux/init.h 2011-08-05 19:44:37.000000000 -0400
53311 @@ -293,13 +293,13 @@ void __init parse_early_options(char *cm
53312
53313 /* Each module must use one module_init(). */
53314 #define module_init(initfn) \
53315 - static inline initcall_t __inittest(void) \
53316 + static inline __used initcall_t __inittest(void) \
53317 { return initfn; } \
53318 int init_module(void) __attribute__((alias(#initfn)));
53319
53320 /* This is only required if you want to be unloadable. */
53321 #define module_exit(exitfn) \
53322 - static inline exitcall_t __exittest(void) \
53323 + static inline __used exitcall_t __exittest(void) \
53324 { return exitfn; } \
53325 void cleanup_module(void) __attribute__((alias(#exitfn)));
53326
53327 diff -urNp linux-2.6.39.4/include/linux/init_task.h linux-2.6.39.4/include/linux/init_task.h
53328 --- linux-2.6.39.4/include/linux/init_task.h 2011-05-19 00:06:34.000000000 -0400
53329 +++ linux-2.6.39.4/include/linux/init_task.h 2011-08-05 19:44:37.000000000 -0400
53330 @@ -83,6 +83,12 @@ extern struct group_info init_groups;
53331 #define INIT_IDS
53332 #endif
53333
53334 +#ifdef CONFIG_X86
53335 +#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
53336 +#else
53337 +#define INIT_TASK_THREAD_INFO
53338 +#endif
53339 +
53340 /*
53341 * Because of the reduced scope of CAP_SETPCAP when filesystem
53342 * capabilities are in effect, it is safe to allow CAP_SETPCAP to
53343 @@ -163,6 +169,7 @@ extern struct cred init_cred;
53344 RCU_INIT_POINTER(.cred, &init_cred), \
53345 .comm = "swapper", \
53346 .thread = INIT_THREAD, \
53347 + INIT_TASK_THREAD_INFO \
53348 .fs = &init_fs, \
53349 .files = &init_files, \
53350 .signal = &init_signals, \
53351 diff -urNp linux-2.6.39.4/include/linux/intel-iommu.h linux-2.6.39.4/include/linux/intel-iommu.h
53352 --- linux-2.6.39.4/include/linux/intel-iommu.h 2011-05-19 00:06:34.000000000 -0400
53353 +++ linux-2.6.39.4/include/linux/intel-iommu.h 2011-08-05 20:34:06.000000000 -0400
53354 @@ -296,7 +296,7 @@ struct iommu_flush {
53355 u8 fm, u64 type);
53356 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
53357 unsigned int size_order, u64 type);
53358 -};
53359 +} __no_const;
53360
53361 enum {
53362 SR_DMAR_FECTL_REG,
53363 diff -urNp linux-2.6.39.4/include/linux/interrupt.h linux-2.6.39.4/include/linux/interrupt.h
53364 --- linux-2.6.39.4/include/linux/interrupt.h 2011-05-19 00:06:34.000000000 -0400
53365 +++ linux-2.6.39.4/include/linux/interrupt.h 2011-08-05 19:44:37.000000000 -0400
53366 @@ -422,7 +422,7 @@ enum
53367 /* map softirq index to softirq name. update 'softirq_to_name' in
53368 * kernel/softirq.c when adding a new softirq.
53369 */
53370 -extern char *softirq_to_name[NR_SOFTIRQS];
53371 +extern const char * const softirq_to_name[NR_SOFTIRQS];
53372
53373 /* softirq mask and active fields moved to irq_cpustat_t in
53374 * asm/hardirq.h to get better cache usage. KAO
53375 @@ -430,12 +430,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
53376
53377 struct softirq_action
53378 {
53379 - void (*action)(struct softirq_action *);
53380 + void (*action)(void);
53381 };
53382
53383 asmlinkage void do_softirq(void);
53384 asmlinkage void __do_softirq(void);
53385 -extern void open_softirq(int nr, void (*action)(struct softirq_action *));
53386 +extern void open_softirq(int nr, void (*action)(void));
53387 extern void softirq_init(void);
53388 static inline void __raise_softirq_irqoff(unsigned int nr)
53389 {
53390 diff -urNp linux-2.6.39.4/include/linux/kallsyms.h linux-2.6.39.4/include/linux/kallsyms.h
53391 --- linux-2.6.39.4/include/linux/kallsyms.h 2011-05-19 00:06:34.000000000 -0400
53392 +++ linux-2.6.39.4/include/linux/kallsyms.h 2011-08-05 19:44:37.000000000 -0400
53393 @@ -15,7 +15,8 @@
53394
53395 struct module;
53396
53397 -#ifdef CONFIG_KALLSYMS
53398 +#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
53399 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53400 /* Lookup the address for a symbol. Returns 0 if not found. */
53401 unsigned long kallsyms_lookup_name(const char *name);
53402
53403 @@ -99,6 +100,16 @@ static inline int lookup_symbol_attrs(un
53404 /* Stupid that this does nothing, but I didn't create this mess. */
53405 #define __print_symbol(fmt, addr)
53406 #endif /*CONFIG_KALLSYMS*/
53407 +#else /* when included by kallsyms.c, vsnprintf.c, or
53408 + arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
53409 +extern void __print_symbol(const char *fmt, unsigned long address);
53410 +extern int sprint_backtrace(char *buffer, unsigned long address);
53411 +extern int sprint_symbol(char *buffer, unsigned long address);
53412 +const char *kallsyms_lookup(unsigned long addr,
53413 + unsigned long *symbolsize,
53414 + unsigned long *offset,
53415 + char **modname, char *namebuf);
53416 +#endif
53417
53418 /* This macro allows us to keep printk typechecking */
53419 static void __check_printsym_format(const char *fmt, ...)
53420 diff -urNp linux-2.6.39.4/include/linux/kgdb.h linux-2.6.39.4/include/linux/kgdb.h
53421 --- linux-2.6.39.4/include/linux/kgdb.h 2011-05-19 00:06:34.000000000 -0400
53422 +++ linux-2.6.39.4/include/linux/kgdb.h 2011-08-05 20:34:06.000000000 -0400
53423 @@ -53,7 +53,7 @@ extern int kgdb_connected;
53424 extern int kgdb_io_module_registered;
53425
53426 extern atomic_t kgdb_setting_breakpoint;
53427 -extern atomic_t kgdb_cpu_doing_single_step;
53428 +extern atomic_unchecked_t kgdb_cpu_doing_single_step;
53429
53430 extern struct task_struct *kgdb_usethread;
53431 extern struct task_struct *kgdb_contthread;
53432 @@ -241,8 +241,8 @@ extern void kgdb_arch_late(void);
53433 * hardware debug registers.
53434 */
53435 struct kgdb_arch {
53436 - unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
53437 - unsigned long flags;
53438 + const unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
53439 + const unsigned long flags;
53440
53441 int (*set_breakpoint)(unsigned long, char *);
53442 int (*remove_breakpoint)(unsigned long, char *);
53443 @@ -268,14 +268,14 @@ struct kgdb_arch {
53444 * not a console
53445 */
53446 struct kgdb_io {
53447 - const char *name;
53448 + const char * const name;
53449 int (*read_char) (void);
53450 void (*write_char) (u8);
53451 void (*flush) (void);
53452 int (*init) (void);
53453 void (*pre_exception) (void);
53454 void (*post_exception) (void);
53455 - int is_console;
53456 + const int is_console;
53457 };
53458
53459 extern struct kgdb_arch arch_kgdb_ops;
53460 diff -urNp linux-2.6.39.4/include/linux/kmod.h linux-2.6.39.4/include/linux/kmod.h
53461 --- linux-2.6.39.4/include/linux/kmod.h 2011-05-19 00:06:34.000000000 -0400
53462 +++ linux-2.6.39.4/include/linux/kmod.h 2011-08-05 19:44:37.000000000 -0400
53463 @@ -33,6 +33,8 @@ extern char modprobe_path[]; /* for sysc
53464 * usually useless though. */
53465 extern int __request_module(bool wait, const char *name, ...) \
53466 __attribute__((format(printf, 2, 3)));
53467 +extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
53468 + __attribute__((format(printf, 3, 4)));
53469 #define request_module(mod...) __request_module(true, mod)
53470 #define request_module_nowait(mod...) __request_module(false, mod)
53471 #define try_then_request_module(x, mod...) \
53472 diff -urNp linux-2.6.39.4/include/linux/kvm_host.h linux-2.6.39.4/include/linux/kvm_host.h
53473 --- linux-2.6.39.4/include/linux/kvm_host.h 2011-05-19 00:06:34.000000000 -0400
53474 +++ linux-2.6.39.4/include/linux/kvm_host.h 2011-08-05 19:44:37.000000000 -0400
53475 @@ -302,7 +302,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
53476 void vcpu_load(struct kvm_vcpu *vcpu);
53477 void vcpu_put(struct kvm_vcpu *vcpu);
53478
53479 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
53480 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
53481 struct module *module);
53482 void kvm_exit(void);
53483
53484 @@ -442,7 +442,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
53485 struct kvm_guest_debug *dbg);
53486 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
53487
53488 -int kvm_arch_init(void *opaque);
53489 +int kvm_arch_init(const void *opaque);
53490 void kvm_arch_exit(void);
53491
53492 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
53493 diff -urNp linux-2.6.39.4/include/linux/libata.h linux-2.6.39.4/include/linux/libata.h
53494 --- linux-2.6.39.4/include/linux/libata.h 2011-05-19 00:06:34.000000000 -0400
53495 +++ linux-2.6.39.4/include/linux/libata.h 2011-08-05 20:34:06.000000000 -0400
53496 @@ -898,7 +898,7 @@ struct ata_port_operations {
53497 * ->inherits must be the last field and all the preceding
53498 * fields must be pointers.
53499 */
53500 - const struct ata_port_operations *inherits;
53501 + const struct ata_port_operations * const inherits;
53502 };
53503
53504 struct ata_port_info {
53505 diff -urNp linux-2.6.39.4/include/linux/mca.h linux-2.6.39.4/include/linux/mca.h
53506 --- linux-2.6.39.4/include/linux/mca.h 2011-05-19 00:06:34.000000000 -0400
53507 +++ linux-2.6.39.4/include/linux/mca.h 2011-08-05 20:34:06.000000000 -0400
53508 @@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
53509 int region);
53510 void * (*mca_transform_memory)(struct mca_device *,
53511 void *memory);
53512 -};
53513 +} __no_const;
53514
53515 struct mca_bus {
53516 u64 default_dma_mask;
53517 diff -urNp linux-2.6.39.4/include/linux/memory.h linux-2.6.39.4/include/linux/memory.h
53518 --- linux-2.6.39.4/include/linux/memory.h 2011-05-19 00:06:34.000000000 -0400
53519 +++ linux-2.6.39.4/include/linux/memory.h 2011-08-05 20:34:06.000000000 -0400
53520 @@ -142,7 +142,7 @@ struct memory_accessor {
53521 size_t count);
53522 ssize_t (*write)(struct memory_accessor *, const char *buf,
53523 off_t offset, size_t count);
53524 -};
53525 +} __no_const;
53526
53527 /*
53528 * Kernel text modification mutex, used for code patching. Users of this lock
53529 diff -urNp linux-2.6.39.4/include/linux/mfd/abx500.h linux-2.6.39.4/include/linux/mfd/abx500.h
53530 --- linux-2.6.39.4/include/linux/mfd/abx500.h 2011-05-19 00:06:34.000000000 -0400
53531 +++ linux-2.6.39.4/include/linux/mfd/abx500.h 2011-08-05 20:34:06.000000000 -0400
53532 @@ -226,6 +226,7 @@ struct abx500_ops {
53533 int (*event_registers_startup_state_get) (struct device *, u8 *);
53534 int (*startup_irq_enabled) (struct device *, unsigned int);
53535 };
53536 +typedef struct abx500_ops __no_const abx500_ops_no_const;
53537
53538 int abx500_register_ops(struct device *core_dev, struct abx500_ops *ops);
53539 void abx500_remove_ops(struct device *dev);
53540 diff -urNp linux-2.6.39.4/include/linux/mm.h linux-2.6.39.4/include/linux/mm.h
53541 --- linux-2.6.39.4/include/linux/mm.h 2011-05-19 00:06:34.000000000 -0400
53542 +++ linux-2.6.39.4/include/linux/mm.h 2011-08-05 19:44:37.000000000 -0400
53543 @@ -113,7 +113,14 @@ extern unsigned int kobjsize(const void
53544
53545 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
53546 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
53547 +
53548 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
53549 +#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
53550 +#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
53551 +#else
53552 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
53553 +#endif
53554 +
53555 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
53556 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
53557
53558 @@ -1010,34 +1017,6 @@ int set_page_dirty(struct page *page);
53559 int set_page_dirty_lock(struct page *page);
53560 int clear_page_dirty_for_io(struct page *page);
53561
53562 -/* Is the vma a continuation of the stack vma above it? */
53563 -static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
53564 -{
53565 - return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
53566 -}
53567 -
53568 -static inline int stack_guard_page_start(struct vm_area_struct *vma,
53569 - unsigned long addr)
53570 -{
53571 - return (vma->vm_flags & VM_GROWSDOWN) &&
53572 - (vma->vm_start == addr) &&
53573 - !vma_growsdown(vma->vm_prev, addr);
53574 -}
53575 -
53576 -/* Is the vma a continuation of the stack vma below it? */
53577 -static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
53578 -{
53579 - return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
53580 -}
53581 -
53582 -static inline int stack_guard_page_end(struct vm_area_struct *vma,
53583 - unsigned long addr)
53584 -{
53585 - return (vma->vm_flags & VM_GROWSUP) &&
53586 - (vma->vm_end == addr) &&
53587 - !vma_growsup(vma->vm_next, addr);
53588 -}
53589 -
53590 extern unsigned long move_page_tables(struct vm_area_struct *vma,
53591 unsigned long old_addr, struct vm_area_struct *new_vma,
53592 unsigned long new_addr, unsigned long len);
53593 @@ -1189,6 +1168,15 @@ struct shrinker {
53594 extern void register_shrinker(struct shrinker *);
53595 extern void unregister_shrinker(struct shrinker *);
53596
53597 +#ifdef CONFIG_MMU
53598 +pgprot_t vm_get_page_prot(unsigned long vm_flags);
53599 +#else
53600 +static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
53601 +{
53602 + return __pgprot(0);
53603 +}
53604 +#endif
53605 +
53606 int vma_wants_writenotify(struct vm_area_struct *vma);
53607
53608 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
53609 @@ -1476,6 +1464,7 @@ out:
53610 }
53611
53612 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
53613 +extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
53614
53615 extern unsigned long do_brk(unsigned long, unsigned long);
53616
53617 @@ -1532,6 +1521,10 @@ extern struct vm_area_struct * find_vma(
53618 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
53619 struct vm_area_struct **pprev);
53620
53621 +extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
53622 +extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
53623 +extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
53624 +
53625 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
53626 NULL if none. Assume start_addr < end_addr. */
53627 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
53628 @@ -1548,15 +1541,6 @@ static inline unsigned long vma_pages(st
53629 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
53630 }
53631
53632 -#ifdef CONFIG_MMU
53633 -pgprot_t vm_get_page_prot(unsigned long vm_flags);
53634 -#else
53635 -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
53636 -{
53637 - return __pgprot(0);
53638 -}
53639 -#endif
53640 -
53641 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
53642 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
53643 unsigned long pfn, unsigned long size, pgprot_t);
53644 @@ -1668,7 +1652,7 @@ extern int unpoison_memory(unsigned long
53645 extern int sysctl_memory_failure_early_kill;
53646 extern int sysctl_memory_failure_recovery;
53647 extern void shake_page(struct page *p, int access);
53648 -extern atomic_long_t mce_bad_pages;
53649 +extern atomic_long_unchecked_t mce_bad_pages;
53650 extern int soft_offline_page(struct page *page, int flags);
53651
53652 extern void dump_page(struct page *page);
53653 @@ -1682,5 +1666,11 @@ extern void copy_user_huge_page(struct p
53654 unsigned int pages_per_huge_page);
53655 #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
53656
53657 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53658 +extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
53659 +#else
53660 +static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
53661 +#endif
53662 +
53663 #endif /* __KERNEL__ */
53664 #endif /* _LINUX_MM_H */
53665 diff -urNp linux-2.6.39.4/include/linux/mm_types.h linux-2.6.39.4/include/linux/mm_types.h
53666 --- linux-2.6.39.4/include/linux/mm_types.h 2011-05-19 00:06:34.000000000 -0400
53667 +++ linux-2.6.39.4/include/linux/mm_types.h 2011-08-05 19:44:37.000000000 -0400
53668 @@ -183,6 +183,8 @@ struct vm_area_struct {
53669 #ifdef CONFIG_NUMA
53670 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
53671 #endif
53672 +
53673 + struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
53674 };
53675
53676 struct core_thread {
53677 @@ -317,6 +319,24 @@ struct mm_struct {
53678 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
53679 pgtable_t pmd_huge_pte; /* protected by page_table_lock */
53680 #endif
53681 +
53682 +#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
53683 + unsigned long pax_flags;
53684 +#endif
53685 +
53686 +#ifdef CONFIG_PAX_DLRESOLVE
53687 + unsigned long call_dl_resolve;
53688 +#endif
53689 +
53690 +#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
53691 + unsigned long call_syscall;
53692 +#endif
53693 +
53694 +#ifdef CONFIG_PAX_ASLR
53695 + unsigned long delta_mmap; /* randomized offset */
53696 + unsigned long delta_stack; /* randomized offset */
53697 +#endif
53698 +
53699 };
53700
53701 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
53702 diff -urNp linux-2.6.39.4/include/linux/mmu_notifier.h linux-2.6.39.4/include/linux/mmu_notifier.h
53703 --- linux-2.6.39.4/include/linux/mmu_notifier.h 2011-05-19 00:06:34.000000000 -0400
53704 +++ linux-2.6.39.4/include/linux/mmu_notifier.h 2011-08-05 19:44:37.000000000 -0400
53705 @@ -255,12 +255,12 @@ static inline void mmu_notifier_mm_destr
53706 */
53707 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
53708 ({ \
53709 - pte_t __pte; \
53710 + pte_t ___pte; \
53711 struct vm_area_struct *___vma = __vma; \
53712 unsigned long ___address = __address; \
53713 - __pte = ptep_clear_flush(___vma, ___address, __ptep); \
53714 + ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
53715 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
53716 - __pte; \
53717 + ___pte; \
53718 })
53719
53720 #define pmdp_clear_flush_notify(__vma, __address, __pmdp) \
53721 diff -urNp linux-2.6.39.4/include/linux/mmzone.h linux-2.6.39.4/include/linux/mmzone.h
53722 --- linux-2.6.39.4/include/linux/mmzone.h 2011-05-19 00:06:34.000000000 -0400
53723 +++ linux-2.6.39.4/include/linux/mmzone.h 2011-08-05 19:44:37.000000000 -0400
53724 @@ -355,7 +355,7 @@ struct zone {
53725 unsigned long flags; /* zone flags, see below */
53726
53727 /* Zone statistics */
53728 - atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53729 + atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
53730
53731 /*
53732 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
53733 diff -urNp linux-2.6.39.4/include/linux/mod_devicetable.h linux-2.6.39.4/include/linux/mod_devicetable.h
53734 --- linux-2.6.39.4/include/linux/mod_devicetable.h 2011-05-19 00:06:34.000000000 -0400
53735 +++ linux-2.6.39.4/include/linux/mod_devicetable.h 2011-08-05 19:44:37.000000000 -0400
53736 @@ -12,7 +12,7 @@
53737 typedef unsigned long kernel_ulong_t;
53738 #endif
53739
53740 -#define PCI_ANY_ID (~0)
53741 +#define PCI_ANY_ID ((__u16)~0)
53742
53743 struct pci_device_id {
53744 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
53745 @@ -131,7 +131,7 @@ struct usb_device_id {
53746 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
53747 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
53748
53749 -#define HID_ANY_ID (~0)
53750 +#define HID_ANY_ID (~0U)
53751
53752 struct hid_device_id {
53753 __u16 bus;
53754 diff -urNp linux-2.6.39.4/include/linux/module.h linux-2.6.39.4/include/linux/module.h
53755 --- linux-2.6.39.4/include/linux/module.h 2011-05-19 00:06:34.000000000 -0400
53756 +++ linux-2.6.39.4/include/linux/module.h 2011-08-05 20:34:06.000000000 -0400
53757 @@ -16,6 +16,7 @@
53758 #include <linux/kobject.h>
53759 #include <linux/moduleparam.h>
53760 #include <linux/tracepoint.h>
53761 +#include <linux/fs.h>
53762
53763 #include <linux/percpu.h>
53764 #include <asm/module.h>
53765 @@ -324,19 +325,16 @@ struct module
53766 int (*init)(void);
53767
53768 /* If this is non-NULL, vfree after init() returns */
53769 - void *module_init;
53770 + void *module_init_rx, *module_init_rw;
53771
53772 /* Here is the actual code + data, vfree'd on unload. */
53773 - void *module_core;
53774 + void *module_core_rx, *module_core_rw;
53775
53776 /* Here are the sizes of the init and core sections */
53777 - unsigned int init_size, core_size;
53778 + unsigned int init_size_rw, core_size_rw;
53779
53780 /* The size of the executable code in each section. */
53781 - unsigned int init_text_size, core_text_size;
53782 -
53783 - /* Size of RO sections of the module (text+rodata) */
53784 - unsigned int init_ro_size, core_ro_size;
53785 + unsigned int init_size_rx, core_size_rx;
53786
53787 /* Arch-specific module values */
53788 struct mod_arch_specific arch;
53789 @@ -391,6 +389,10 @@ struct module
53790 #ifdef CONFIG_EVENT_TRACING
53791 struct ftrace_event_call **trace_events;
53792 unsigned int num_trace_events;
53793 + struct file_operations trace_id;
53794 + struct file_operations trace_enable;
53795 + struct file_operations trace_format;
53796 + struct file_operations trace_filter;
53797 #endif
53798 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
53799 unsigned long *ftrace_callsites;
53800 @@ -441,16 +443,46 @@ bool is_module_address(unsigned long add
53801 bool is_module_percpu_address(unsigned long addr);
53802 bool is_module_text_address(unsigned long addr);
53803
53804 +static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
53805 +{
53806 +
53807 +#ifdef CONFIG_PAX_KERNEXEC
53808 + if (ktla_ktva(addr) >= (unsigned long)start &&
53809 + ktla_ktva(addr) < (unsigned long)start + size)
53810 + return 1;
53811 +#endif
53812 +
53813 + return ((void *)addr >= start && (void *)addr < start + size);
53814 +}
53815 +
53816 +static inline int within_module_core_rx(unsigned long addr, struct module *mod)
53817 +{
53818 + return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
53819 +}
53820 +
53821 +static inline int within_module_core_rw(unsigned long addr, struct module *mod)
53822 +{
53823 + return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
53824 +}
53825 +
53826 +static inline int within_module_init_rx(unsigned long addr, struct module *mod)
53827 +{
53828 + return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
53829 +}
53830 +
53831 +static inline int within_module_init_rw(unsigned long addr, struct module *mod)
53832 +{
53833 + return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
53834 +}
53835 +
53836 static inline int within_module_core(unsigned long addr, struct module *mod)
53837 {
53838 - return (unsigned long)mod->module_core <= addr &&
53839 - addr < (unsigned long)mod->module_core + mod->core_size;
53840 + return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
53841 }
53842
53843 static inline int within_module_init(unsigned long addr, struct module *mod)
53844 {
53845 - return (unsigned long)mod->module_init <= addr &&
53846 - addr < (unsigned long)mod->module_init + mod->init_size;
53847 + return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
53848 }
53849
53850 /* Search for module by name: must hold module_mutex. */
53851 diff -urNp linux-2.6.39.4/include/linux/moduleloader.h linux-2.6.39.4/include/linux/moduleloader.h
53852 --- linux-2.6.39.4/include/linux/moduleloader.h 2011-05-19 00:06:34.000000000 -0400
53853 +++ linux-2.6.39.4/include/linux/moduleloader.h 2011-08-05 19:44:37.000000000 -0400
53854 @@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
53855 sections. Returns NULL on failure. */
53856 void *module_alloc(unsigned long size);
53857
53858 +#ifdef CONFIG_PAX_KERNEXEC
53859 +void *module_alloc_exec(unsigned long size);
53860 +#else
53861 +#define module_alloc_exec(x) module_alloc(x)
53862 +#endif
53863 +
53864 /* Free memory returned from module_alloc. */
53865 void module_free(struct module *mod, void *module_region);
53866
53867 +#ifdef CONFIG_PAX_KERNEXEC
53868 +void module_free_exec(struct module *mod, void *module_region);
53869 +#else
53870 +#define module_free_exec(x, y) module_free((x), (y))
53871 +#endif
53872 +
53873 /* Apply the given relocation to the (simplified) ELF. Return -error
53874 or 0. */
53875 int apply_relocate(Elf_Shdr *sechdrs,
53876 diff -urNp linux-2.6.39.4/include/linux/moduleparam.h linux-2.6.39.4/include/linux/moduleparam.h
53877 --- linux-2.6.39.4/include/linux/moduleparam.h 2011-05-19 00:06:34.000000000 -0400
53878 +++ linux-2.6.39.4/include/linux/moduleparam.h 2011-08-05 20:34:06.000000000 -0400
53879 @@ -255,7 +255,7 @@ static inline void __kernel_param_unlock
53880 * @len is usually just sizeof(string).
53881 */
53882 #define module_param_string(name, string, len, perm) \
53883 - static const struct kparam_string __param_string_##name \
53884 + static const struct kparam_string __param_string_##name __used \
53885 = { len, string }; \
53886 __module_param_call(MODULE_PARAM_PREFIX, name, \
53887 &param_ops_string, \
53888 @@ -370,7 +370,7 @@ extern int param_get_invbool(char *buffe
53889 * module_param_named() for why this might be necessary.
53890 */
53891 #define module_param_array_named(name, array, type, nump, perm) \
53892 - static const struct kparam_array __param_arr_##name \
53893 + static const struct kparam_array __param_arr_##name __used \
53894 = { ARRAY_SIZE(array), nump, &param_ops_##type, \
53895 sizeof(array[0]), array }; \
53896 __module_param_call(MODULE_PARAM_PREFIX, name, \
53897 diff -urNp linux-2.6.39.4/include/linux/mutex.h linux-2.6.39.4/include/linux/mutex.h
53898 --- linux-2.6.39.4/include/linux/mutex.h 2011-05-19 00:06:34.000000000 -0400
53899 +++ linux-2.6.39.4/include/linux/mutex.h 2011-08-05 19:44:37.000000000 -0400
53900 @@ -51,7 +51,7 @@ struct mutex {
53901 spinlock_t wait_lock;
53902 struct list_head wait_list;
53903 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
53904 - struct thread_info *owner;
53905 + struct task_struct *owner;
53906 #endif
53907 #ifdef CONFIG_DEBUG_MUTEXES
53908 const char *name;
53909 diff -urNp linux-2.6.39.4/include/linux/namei.h linux-2.6.39.4/include/linux/namei.h
53910 --- linux-2.6.39.4/include/linux/namei.h 2011-05-19 00:06:34.000000000 -0400
53911 +++ linux-2.6.39.4/include/linux/namei.h 2011-08-05 19:44:37.000000000 -0400
53912 @@ -24,7 +24,7 @@ struct nameidata {
53913 unsigned seq;
53914 int last_type;
53915 unsigned depth;
53916 - char *saved_names[MAX_NESTED_LINKS + 1];
53917 + const char *saved_names[MAX_NESTED_LINKS + 1];
53918
53919 /* Intent data */
53920 union {
53921 @@ -91,12 +91,12 @@ extern int follow_up(struct path *);
53922 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
53923 extern void unlock_rename(struct dentry *, struct dentry *);
53924
53925 -static inline void nd_set_link(struct nameidata *nd, char *path)
53926 +static inline void nd_set_link(struct nameidata *nd, const char *path)
53927 {
53928 nd->saved_names[nd->depth] = path;
53929 }
53930
53931 -static inline char *nd_get_link(struct nameidata *nd)
53932 +static inline const char *nd_get_link(const struct nameidata *nd)
53933 {
53934 return nd->saved_names[nd->depth];
53935 }
53936 diff -urNp linux-2.6.39.4/include/linux/netdevice.h linux-2.6.39.4/include/linux/netdevice.h
53937 --- linux-2.6.39.4/include/linux/netdevice.h 2011-08-05 21:11:51.000000000 -0400
53938 +++ linux-2.6.39.4/include/linux/netdevice.h 2011-08-05 21:12:20.000000000 -0400
53939 @@ -979,6 +979,7 @@ struct net_device_ops {
53940 int (*ndo_set_features)(struct net_device *dev,
53941 u32 features);
53942 };
53943 +typedef struct net_device_ops __no_const net_device_ops_no_const;
53944
53945 /*
53946 * The DEVICE structure.
53947 diff -urNp linux-2.6.39.4/include/linux/netfilter/xt_gradm.h linux-2.6.39.4/include/linux/netfilter/xt_gradm.h
53948 --- linux-2.6.39.4/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
53949 +++ linux-2.6.39.4/include/linux/netfilter/xt_gradm.h 2011-08-05 19:44:37.000000000 -0400
53950 @@ -0,0 +1,9 @@
53951 +#ifndef _LINUX_NETFILTER_XT_GRADM_H
53952 +#define _LINUX_NETFILTER_XT_GRADM_H 1
53953 +
53954 +struct xt_gradm_mtinfo {
53955 + __u16 flags;
53956 + __u16 invflags;
53957 +};
53958 +
53959 +#endif
53960 diff -urNp linux-2.6.39.4/include/linux/oprofile.h linux-2.6.39.4/include/linux/oprofile.h
53961 --- linux-2.6.39.4/include/linux/oprofile.h 2011-05-19 00:06:34.000000000 -0400
53962 +++ linux-2.6.39.4/include/linux/oprofile.h 2011-08-05 19:44:37.000000000 -0400
53963 @@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super
53964 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
53965 char const * name, ulong * val);
53966
53967 -/** Create a file for read-only access to an atomic_t. */
53968 +/** Create a file for read-only access to an atomic_unchecked_t. */
53969 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
53970 - char const * name, atomic_t * val);
53971 + char const * name, atomic_unchecked_t * val);
53972
53973 /** create a directory */
53974 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
53975 diff -urNp linux-2.6.39.4/include/linux/padata.h linux-2.6.39.4/include/linux/padata.h
53976 --- linux-2.6.39.4/include/linux/padata.h 2011-05-19 00:06:34.000000000 -0400
53977 +++ linux-2.6.39.4/include/linux/padata.h 2011-08-05 19:44:37.000000000 -0400
53978 @@ -129,7 +129,7 @@ struct parallel_data {
53979 struct padata_instance *pinst;
53980 struct padata_parallel_queue __percpu *pqueue;
53981 struct padata_serial_queue __percpu *squeue;
53982 - atomic_t seq_nr;
53983 + atomic_unchecked_t seq_nr;
53984 atomic_t reorder_objects;
53985 atomic_t refcnt;
53986 unsigned int max_seq_nr;
53987 diff -urNp linux-2.6.39.4/include/linux/perf_event.h linux-2.6.39.4/include/linux/perf_event.h
53988 --- linux-2.6.39.4/include/linux/perf_event.h 2011-05-19 00:06:34.000000000 -0400
53989 +++ linux-2.6.39.4/include/linux/perf_event.h 2011-08-05 20:34:06.000000000 -0400
53990 @@ -759,8 +759,8 @@ struct perf_event {
53991
53992 enum perf_event_active_state state;
53993 unsigned int attach_state;
53994 - local64_t count;
53995 - atomic64_t child_count;
53996 + local64_t count; /* PaX: fix it one day */
53997 + atomic64_unchecked_t child_count;
53998
53999 /*
54000 * These are the total time in nanoseconds that the event
54001 @@ -811,8 +811,8 @@ struct perf_event {
54002 * These accumulate total time (in nanoseconds) that children
54003 * events have been enabled and running, respectively.
54004 */
54005 - atomic64_t child_total_time_enabled;
54006 - atomic64_t child_total_time_running;
54007 + atomic64_unchecked_t child_total_time_enabled;
54008 + atomic64_unchecked_t child_total_time_running;
54009
54010 /*
54011 * Protect attach/detach and child_list:
54012 diff -urNp linux-2.6.39.4/include/linux/pipe_fs_i.h linux-2.6.39.4/include/linux/pipe_fs_i.h
54013 --- linux-2.6.39.4/include/linux/pipe_fs_i.h 2011-05-19 00:06:34.000000000 -0400
54014 +++ linux-2.6.39.4/include/linux/pipe_fs_i.h 2011-08-05 19:44:37.000000000 -0400
54015 @@ -46,9 +46,9 @@ struct pipe_buffer {
54016 struct pipe_inode_info {
54017 wait_queue_head_t wait;
54018 unsigned int nrbufs, curbuf, buffers;
54019 - unsigned int readers;
54020 - unsigned int writers;
54021 - unsigned int waiting_writers;
54022 + atomic_t readers;
54023 + atomic_t writers;
54024 + atomic_t waiting_writers;
54025 unsigned int r_counter;
54026 unsigned int w_counter;
54027 struct page *tmp_page;
54028 diff -urNp linux-2.6.39.4/include/linux/pm_runtime.h linux-2.6.39.4/include/linux/pm_runtime.h
54029 --- linux-2.6.39.4/include/linux/pm_runtime.h 2011-05-19 00:06:34.000000000 -0400
54030 +++ linux-2.6.39.4/include/linux/pm_runtime.h 2011-08-05 19:44:37.000000000 -0400
54031 @@ -94,7 +94,7 @@ static inline bool pm_runtime_callbacks_
54032
54033 static inline void pm_runtime_mark_last_busy(struct device *dev)
54034 {
54035 - ACCESS_ONCE(dev->power.last_busy) = jiffies;
54036 + ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
54037 }
54038
54039 #else /* !CONFIG_PM_RUNTIME */
54040 diff -urNp linux-2.6.39.4/include/linux/poison.h linux-2.6.39.4/include/linux/poison.h
54041 --- linux-2.6.39.4/include/linux/poison.h 2011-05-19 00:06:34.000000000 -0400
54042 +++ linux-2.6.39.4/include/linux/poison.h 2011-08-05 19:44:37.000000000 -0400
54043 @@ -19,8 +19,8 @@
54044 * under normal circumstances, used to verify that nobody uses
54045 * non-initialized list entries.
54046 */
54047 -#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
54048 -#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
54049 +#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
54050 +#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
54051
54052 /********** include/linux/timer.h **********/
54053 /*
54054 diff -urNp linux-2.6.39.4/include/linux/preempt.h linux-2.6.39.4/include/linux/preempt.h
54055 --- linux-2.6.39.4/include/linux/preempt.h 2011-05-19 00:06:34.000000000 -0400
54056 +++ linux-2.6.39.4/include/linux/preempt.h 2011-08-05 20:34:06.000000000 -0400
54057 @@ -115,7 +115,7 @@ struct preempt_ops {
54058 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
54059 void (*sched_out)(struct preempt_notifier *notifier,
54060 struct task_struct *next);
54061 -};
54062 +} __no_const;
54063
54064 /**
54065 * preempt_notifier - key for installing preemption notifiers
54066 diff -urNp linux-2.6.39.4/include/linux/proc_fs.h linux-2.6.39.4/include/linux/proc_fs.h
54067 --- linux-2.6.39.4/include/linux/proc_fs.h 2011-05-19 00:06:34.000000000 -0400
54068 +++ linux-2.6.39.4/include/linux/proc_fs.h 2011-08-05 20:34:06.000000000 -0400
54069 @@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
54070 return proc_create_data(name, mode, parent, proc_fops, NULL);
54071 }
54072
54073 +static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
54074 + struct proc_dir_entry *parent, const struct file_operations *proc_fops)
54075 +{
54076 +#ifdef CONFIG_GRKERNSEC_PROC_USER
54077 + return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
54078 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
54079 + return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
54080 +#else
54081 + return proc_create_data(name, mode, parent, proc_fops, NULL);
54082 +#endif
54083 +}
54084 +
54085 +
54086 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
54087 mode_t mode, struct proc_dir_entry *base,
54088 read_proc_t *read_proc, void * data)
54089 @@ -258,7 +271,7 @@ union proc_op {
54090 int (*proc_show)(struct seq_file *m,
54091 struct pid_namespace *ns, struct pid *pid,
54092 struct task_struct *task);
54093 -};
54094 +} __no_const;
54095
54096 struct ctl_table_header;
54097 struct ctl_table;
54098 diff -urNp linux-2.6.39.4/include/linux/ptrace.h linux-2.6.39.4/include/linux/ptrace.h
54099 --- linux-2.6.39.4/include/linux/ptrace.h 2011-05-19 00:06:34.000000000 -0400
54100 +++ linux-2.6.39.4/include/linux/ptrace.h 2011-08-05 19:44:37.000000000 -0400
54101 @@ -115,10 +115,10 @@ extern void __ptrace_unlink(struct task_
54102 extern void exit_ptrace(struct task_struct *tracer);
54103 #define PTRACE_MODE_READ 1
54104 #define PTRACE_MODE_ATTACH 2
54105 -/* Returns 0 on success, -errno on denial. */
54106 -extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
54107 /* Returns true on success, false on denial. */
54108 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
54109 +/* Returns true on success, false on denial. */
54110 +extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
54111
54112 static inline int ptrace_reparented(struct task_struct *child)
54113 {
54114 diff -urNp linux-2.6.39.4/include/linux/random.h linux-2.6.39.4/include/linux/random.h
54115 --- linux-2.6.39.4/include/linux/random.h 2011-05-19 00:06:34.000000000 -0400
54116 +++ linux-2.6.39.4/include/linux/random.h 2011-08-05 19:44:37.000000000 -0400
54117 @@ -80,12 +80,17 @@ void srandom32(u32 seed);
54118
54119 u32 prandom32(struct rnd_state *);
54120
54121 +static inline unsigned long pax_get_random_long(void)
54122 +{
54123 + return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
54124 +}
54125 +
54126 /*
54127 * Handle minimum values for seeds
54128 */
54129 static inline u32 __seed(u32 x, u32 m)
54130 {
54131 - return (x < m) ? x + m : x;
54132 + return (x <= m) ? x + m + 1 : x;
54133 }
54134
54135 /**
54136 diff -urNp linux-2.6.39.4/include/linux/reboot.h linux-2.6.39.4/include/linux/reboot.h
54137 --- linux-2.6.39.4/include/linux/reboot.h 2011-05-19 00:06:34.000000000 -0400
54138 +++ linux-2.6.39.4/include/linux/reboot.h 2011-08-05 19:44:37.000000000 -0400
54139 @@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
54140 * Architecture-specific implementations of sys_reboot commands.
54141 */
54142
54143 -extern void machine_restart(char *cmd);
54144 -extern void machine_halt(void);
54145 -extern void machine_power_off(void);
54146 +extern void machine_restart(char *cmd) __noreturn;
54147 +extern void machine_halt(void) __noreturn;
54148 +extern void machine_power_off(void) __noreturn;
54149
54150 extern void machine_shutdown(void);
54151 struct pt_regs;
54152 @@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
54153 */
54154
54155 extern void kernel_restart_prepare(char *cmd);
54156 -extern void kernel_restart(char *cmd);
54157 -extern void kernel_halt(void);
54158 -extern void kernel_power_off(void);
54159 +extern void kernel_restart(char *cmd) __noreturn;
54160 +extern void kernel_halt(void) __noreturn;
54161 +extern void kernel_power_off(void) __noreturn;
54162
54163 extern int C_A_D; /* for sysctl */
54164 void ctrl_alt_del(void);
54165 @@ -76,7 +76,7 @@ extern int orderly_poweroff(bool force);
54166 * Emergency restart, callable from an interrupt handler.
54167 */
54168
54169 -extern void emergency_restart(void);
54170 +extern void emergency_restart(void) __noreturn;
54171 #include <asm/emergency-restart.h>
54172
54173 #endif
54174 diff -urNp linux-2.6.39.4/include/linux/reiserfs_fs.h linux-2.6.39.4/include/linux/reiserfs_fs.h
54175 --- linux-2.6.39.4/include/linux/reiserfs_fs.h 2011-05-19 00:06:34.000000000 -0400
54176 +++ linux-2.6.39.4/include/linux/reiserfs_fs.h 2011-08-05 20:34:06.000000000 -0400
54177 @@ -1406,7 +1406,7 @@ static inline loff_t max_reiserfs_offset
54178 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
54179
54180 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
54181 -#define get_generation(s) atomic_read (&fs_generation(s))
54182 +#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
54183 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
54184 #define __fs_changed(gen,s) (gen != get_generation (s))
54185 #define fs_changed(gen,s) \
54186 diff -urNp linux-2.6.39.4/include/linux/reiserfs_fs_sb.h linux-2.6.39.4/include/linux/reiserfs_fs_sb.h
54187 --- linux-2.6.39.4/include/linux/reiserfs_fs_sb.h 2011-05-19 00:06:34.000000000 -0400
54188 +++ linux-2.6.39.4/include/linux/reiserfs_fs_sb.h 2011-08-05 19:44:37.000000000 -0400
54189 @@ -386,7 +386,7 @@ struct reiserfs_sb_info {
54190 /* Comment? -Hans */
54191 wait_queue_head_t s_wait;
54192 /* To be obsoleted soon by per buffer seals.. -Hans */
54193 - atomic_t s_generation_counter; // increased by one every time the
54194 + atomic_unchecked_t s_generation_counter; // increased by one every time the
54195 // tree gets re-balanced
54196 unsigned long s_properties; /* File system properties. Currently holds
54197 on-disk FS format */
54198 diff -urNp linux-2.6.39.4/include/linux/relay.h linux-2.6.39.4/include/linux/relay.h
54199 --- linux-2.6.39.4/include/linux/relay.h 2011-05-19 00:06:34.000000000 -0400
54200 +++ linux-2.6.39.4/include/linux/relay.h 2011-08-05 20:34:06.000000000 -0400
54201 @@ -159,7 +159,7 @@ struct rchan_callbacks
54202 * The callback should return 0 if successful, negative if not.
54203 */
54204 int (*remove_buf_file)(struct dentry *dentry);
54205 -};
54206 +} __no_const;
54207
54208 /*
54209 * CONFIG_RELAY kernel API, kernel/relay.c
54210 diff -urNp linux-2.6.39.4/include/linux/rfkill.h linux-2.6.39.4/include/linux/rfkill.h
54211 --- linux-2.6.39.4/include/linux/rfkill.h 2011-05-19 00:06:34.000000000 -0400
54212 +++ linux-2.6.39.4/include/linux/rfkill.h 2011-08-05 20:34:06.000000000 -0400
54213 @@ -147,6 +147,7 @@ struct rfkill_ops {
54214 void (*query)(struct rfkill *rfkill, void *data);
54215 int (*set_block)(void *data, bool blocked);
54216 };
54217 +typedef struct rfkill_ops __no_const rfkill_ops_no_const;
54218
54219 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
54220 /**
54221 diff -urNp linux-2.6.39.4/include/linux/rmap.h linux-2.6.39.4/include/linux/rmap.h
54222 --- linux-2.6.39.4/include/linux/rmap.h 2011-05-19 00:06:34.000000000 -0400
54223 +++ linux-2.6.39.4/include/linux/rmap.h 2011-08-05 19:44:37.000000000 -0400
54224 @@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc
54225 void anon_vma_init(void); /* create anon_vma_cachep */
54226 int anon_vma_prepare(struct vm_area_struct *);
54227 void unlink_anon_vmas(struct vm_area_struct *);
54228 -int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
54229 -int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
54230 +int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
54231 +int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
54232 void __anon_vma_link(struct vm_area_struct *);
54233
54234 static inline void anon_vma_merge(struct vm_area_struct *vma,
54235 diff -urNp linux-2.6.39.4/include/linux/sched.h linux-2.6.39.4/include/linux/sched.h
54236 --- linux-2.6.39.4/include/linux/sched.h 2011-05-19 00:06:34.000000000 -0400
54237 +++ linux-2.6.39.4/include/linux/sched.h 2011-08-05 20:34:06.000000000 -0400
54238 @@ -100,6 +100,7 @@ struct bio_list;
54239 struct fs_struct;
54240 struct perf_event_context;
54241 struct blk_plug;
54242 +struct linux_binprm;
54243
54244 /*
54245 * List of flags we want to share for kernel threads,
54246 @@ -360,7 +361,7 @@ extern signed long schedule_timeout_inte
54247 extern signed long schedule_timeout_killable(signed long timeout);
54248 extern signed long schedule_timeout_uninterruptible(signed long timeout);
54249 asmlinkage void schedule(void);
54250 -extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
54251 +extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
54252
54253 struct nsproxy;
54254 struct user_namespace;
54255 @@ -381,10 +382,13 @@ struct user_namespace;
54256 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
54257
54258 extern int sysctl_max_map_count;
54259 +extern unsigned long sysctl_heap_stack_gap;
54260
54261 #include <linux/aio.h>
54262
54263 #ifdef CONFIG_MMU
54264 +extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
54265 +extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
54266 extern void arch_pick_mmap_layout(struct mm_struct *mm);
54267 extern unsigned long
54268 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
54269 @@ -629,6 +633,17 @@ struct signal_struct {
54270 #ifdef CONFIG_TASKSTATS
54271 struct taskstats *stats;
54272 #endif
54273 +
54274 +#ifdef CONFIG_GRKERNSEC
54275 + u32 curr_ip;
54276 + u32 saved_ip;
54277 + u32 gr_saddr;
54278 + u32 gr_daddr;
54279 + u16 gr_sport;
54280 + u16 gr_dport;
54281 + u8 used_accept:1;
54282 +#endif
54283 +
54284 #ifdef CONFIG_AUDIT
54285 unsigned audit_tty;
54286 struct tty_audit_buf *tty_audit_buf;
54287 @@ -701,6 +716,11 @@ struct user_struct {
54288 struct key *session_keyring; /* UID's default session keyring */
54289 #endif
54290
54291 +#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
54292 + unsigned int banned;
54293 + unsigned long ban_expires;
54294 +#endif
54295 +
54296 /* Hash table maintenance information */
54297 struct hlist_node uidhash_node;
54298 uid_t uid;
54299 @@ -1310,8 +1330,8 @@ struct task_struct {
54300 struct list_head thread_group;
54301
54302 struct completion *vfork_done; /* for vfork() */
54303 - int __user *set_child_tid; /* CLONE_CHILD_SETTID */
54304 - int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
54305 + pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
54306 + pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
54307
54308 cputime_t utime, stime, utimescaled, stimescaled;
54309 cputime_t gtime;
54310 @@ -1327,13 +1347,6 @@ struct task_struct {
54311 struct task_cputime cputime_expires;
54312 struct list_head cpu_timers[3];
54313
54314 -/* process credentials */
54315 - const struct cred __rcu *real_cred; /* objective and real subjective task
54316 - * credentials (COW) */
54317 - const struct cred __rcu *cred; /* effective (overridable) subjective task
54318 - * credentials (COW) */
54319 - struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
54320 -
54321 char comm[TASK_COMM_LEN]; /* executable name excluding path
54322 - access with [gs]et_task_comm (which lock
54323 it with task_lock())
54324 @@ -1350,8 +1363,16 @@ struct task_struct {
54325 #endif
54326 /* CPU-specific state of this task */
54327 struct thread_struct thread;
54328 +/* thread_info moved to task_struct */
54329 +#ifdef CONFIG_X86
54330 + struct thread_info tinfo;
54331 +#endif
54332 /* filesystem information */
54333 struct fs_struct *fs;
54334 +
54335 + const struct cred __rcu *cred; /* effective (overridable) subjective task
54336 + * credentials (COW) */
54337 +
54338 /* open file information */
54339 struct files_struct *files;
54340 /* namespaces */
54341 @@ -1398,6 +1419,11 @@ struct task_struct {
54342 struct rt_mutex_waiter *pi_blocked_on;
54343 #endif
54344
54345 +/* process credentials */
54346 + const struct cred __rcu *real_cred; /* objective and real subjective task
54347 + * credentials (COW) */
54348 + struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
54349 +
54350 #ifdef CONFIG_DEBUG_MUTEXES
54351 /* mutex deadlock detection */
54352 struct mutex_waiter *blocked_on;
54353 @@ -1508,6 +1534,21 @@ struct task_struct {
54354 unsigned long default_timer_slack_ns;
54355
54356 struct list_head *scm_work_list;
54357 +
54358 +#ifdef CONFIG_GRKERNSEC
54359 + /* grsecurity */
54360 + struct dentry *gr_chroot_dentry;
54361 + struct acl_subject_label *acl;
54362 + struct acl_role_label *role;
54363 + struct file *exec_file;
54364 + u16 acl_role_id;
54365 + /* is this the task that authenticated to the special role */
54366 + u8 acl_sp_role;
54367 + u8 is_writable;
54368 + u8 brute;
54369 + u8 gr_is_chrooted;
54370 +#endif
54371 +
54372 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
54373 /* Index of current stored address in ret_stack */
54374 int curr_ret_stack;
54375 @@ -1542,6 +1583,57 @@ struct task_struct {
54376 #endif
54377 };
54378
54379 +#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
54380 +#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
54381 +#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
54382 +#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
54383 +/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
54384 +#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
54385 +
54386 +#ifdef CONFIG_PAX_SOFTMODE
54387 +extern int pax_softmode;
54388 +#endif
54389 +
54390 +extern int pax_check_flags(unsigned long *);
54391 +
54392 +/* if tsk != current then task_lock must be held on it */
54393 +#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
54394 +static inline unsigned long pax_get_flags(struct task_struct *tsk)
54395 +{
54396 + if (likely(tsk->mm))
54397 + return tsk->mm->pax_flags;
54398 + else
54399 + return 0UL;
54400 +}
54401 +
54402 +/* if tsk != current then task_lock must be held on it */
54403 +static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
54404 +{
54405 + if (likely(tsk->mm)) {
54406 + tsk->mm->pax_flags = flags;
54407 + return 0;
54408 + }
54409 + return -EINVAL;
54410 +}
54411 +#endif
54412 +
54413 +#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
54414 +extern void pax_set_initial_flags(struct linux_binprm *bprm);
54415 +#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
54416 +extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
54417 +#endif
54418 +
54419 +extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
54420 +extern void pax_report_insns(void *pc, void *sp);
54421 +extern void pax_report_refcount_overflow(struct pt_regs *regs);
54422 +extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
54423 +
54424 +#ifdef CONFIG_PAX_MEMORY_STACKLEAK
54425 +extern void pax_track_stack(void);
54426 +#else
54427 +static inline void pax_track_stack(void) {}
54428 +#endif
54429 +
54430 /* Future-safe accessor for struct task_struct's cpus_allowed. */
54431 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
54432
54433 @@ -2009,7 +2101,9 @@ void yield(void);
54434 extern struct exec_domain default_exec_domain;
54435
54436 union thread_union {
54437 +#ifndef CONFIG_X86
54438 struct thread_info thread_info;
54439 +#endif
54440 unsigned long stack[THREAD_SIZE/sizeof(long)];
54441 };
54442
54443 @@ -2042,6 +2136,7 @@ extern struct pid_namespace init_pid_ns;
54444 */
54445
54446 extern struct task_struct *find_task_by_vpid(pid_t nr);
54447 +extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
54448 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
54449 struct pid_namespace *ns);
54450
54451 @@ -2179,7 +2274,7 @@ extern void __cleanup_sighand(struct sig
54452 extern void exit_itimers(struct signal_struct *);
54453 extern void flush_itimer_signals(void);
54454
54455 -extern NORET_TYPE void do_group_exit(int);
54456 +extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
54457
54458 extern void daemonize(const char *, ...);
54459 extern int allow_signal(int);
54460 @@ -2320,13 +2415,17 @@ static inline unsigned long *end_of_stac
54461
54462 #endif
54463
54464 -static inline int object_is_on_stack(void *obj)
54465 +static inline int object_starts_on_stack(void *obj)
54466 {
54467 - void *stack = task_stack_page(current);
54468 + const void *stack = task_stack_page(current);
54469
54470 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
54471 }
54472
54473 +#ifdef CONFIG_PAX_USERCOPY
54474 +extern int object_is_on_stack(const void *obj, unsigned long len);
54475 +#endif
54476 +
54477 extern void thread_info_cache_init(void);
54478
54479 #ifdef CONFIG_DEBUG_STACK_USAGE
54480 diff -urNp linux-2.6.39.4/include/linux/screen_info.h linux-2.6.39.4/include/linux/screen_info.h
54481 --- linux-2.6.39.4/include/linux/screen_info.h 2011-05-19 00:06:34.000000000 -0400
54482 +++ linux-2.6.39.4/include/linux/screen_info.h 2011-08-05 19:44:37.000000000 -0400
54483 @@ -43,7 +43,8 @@ struct screen_info {
54484 __u16 pages; /* 0x32 */
54485 __u16 vesa_attributes; /* 0x34 */
54486 __u32 capabilities; /* 0x36 */
54487 - __u8 _reserved[6]; /* 0x3a */
54488 + __u16 vesapm_size; /* 0x3a */
54489 + __u8 _reserved[4]; /* 0x3c */
54490 } __attribute__((packed));
54491
54492 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
54493 diff -urNp linux-2.6.39.4/include/linux/security.h linux-2.6.39.4/include/linux/security.h
54494 --- linux-2.6.39.4/include/linux/security.h 2011-05-19 00:06:34.000000000 -0400
54495 +++ linux-2.6.39.4/include/linux/security.h 2011-08-05 19:44:37.000000000 -0400
54496 @@ -36,6 +36,7 @@
54497 #include <linux/key.h>
54498 #include <linux/xfrm.h>
54499 #include <linux/slab.h>
54500 +#include <linux/grsecurity.h>
54501 #include <net/flow.h>
54502
54503 /* Maximum number of letters for an LSM name string */
54504 diff -urNp linux-2.6.39.4/include/linux/seq_file.h linux-2.6.39.4/include/linux/seq_file.h
54505 --- linux-2.6.39.4/include/linux/seq_file.h 2011-05-19 00:06:34.000000000 -0400
54506 +++ linux-2.6.39.4/include/linux/seq_file.h 2011-08-05 20:34:06.000000000 -0400
54507 @@ -32,6 +32,7 @@ struct seq_operations {
54508 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
54509 int (*show) (struct seq_file *m, void *v);
54510 };
54511 +typedef struct seq_operations __no_const seq_operations_no_const;
54512
54513 #define SEQ_SKIP 1
54514
54515 diff -urNp linux-2.6.39.4/include/linux/shm.h linux-2.6.39.4/include/linux/shm.h
54516 --- linux-2.6.39.4/include/linux/shm.h 2011-05-19 00:06:34.000000000 -0400
54517 +++ linux-2.6.39.4/include/linux/shm.h 2011-08-05 19:44:37.000000000 -0400
54518 @@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
54519 pid_t shm_cprid;
54520 pid_t shm_lprid;
54521 struct user_struct *mlock_user;
54522 +#ifdef CONFIG_GRKERNSEC
54523 + time_t shm_createtime;
54524 + pid_t shm_lapid;
54525 +#endif
54526 };
54527
54528 /* shm_mode upper byte flags */
54529 diff -urNp linux-2.6.39.4/include/linux/skbuff.h linux-2.6.39.4/include/linux/skbuff.h
54530 --- linux-2.6.39.4/include/linux/skbuff.h 2011-05-19 00:06:34.000000000 -0400
54531 +++ linux-2.6.39.4/include/linux/skbuff.h 2011-08-05 19:44:37.000000000 -0400
54532 @@ -592,7 +592,7 @@ static inline struct skb_shared_hwtstamp
54533 */
54534 static inline int skb_queue_empty(const struct sk_buff_head *list)
54535 {
54536 - return list->next == (struct sk_buff *)list;
54537 + return list->next == (const struct sk_buff *)list;
54538 }
54539
54540 /**
54541 @@ -605,7 +605,7 @@ static inline int skb_queue_empty(const
54542 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
54543 const struct sk_buff *skb)
54544 {
54545 - return skb->next == (struct sk_buff *)list;
54546 + return skb->next == (const struct sk_buff *)list;
54547 }
54548
54549 /**
54550 @@ -618,7 +618,7 @@ static inline bool skb_queue_is_last(con
54551 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
54552 const struct sk_buff *skb)
54553 {
54554 - return skb->prev == (struct sk_buff *)list;
54555 + return skb->prev == (const struct sk_buff *)list;
54556 }
54557
54558 /**
54559 @@ -1435,7 +1435,7 @@ static inline int pskb_network_may_pull(
54560 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
54561 */
54562 #ifndef NET_SKB_PAD
54563 -#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
54564 +#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
54565 #endif
54566
54567 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
54568 diff -urNp linux-2.6.39.4/include/linux/slab_def.h linux-2.6.39.4/include/linux/slab_def.h
54569 --- linux-2.6.39.4/include/linux/slab_def.h 2011-05-19 00:06:34.000000000 -0400
54570 +++ linux-2.6.39.4/include/linux/slab_def.h 2011-08-05 19:44:37.000000000 -0400
54571 @@ -96,10 +96,10 @@ struct kmem_cache {
54572 unsigned long node_allocs;
54573 unsigned long node_frees;
54574 unsigned long node_overflow;
54575 - atomic_t allochit;
54576 - atomic_t allocmiss;
54577 - atomic_t freehit;
54578 - atomic_t freemiss;
54579 + atomic_unchecked_t allochit;
54580 + atomic_unchecked_t allocmiss;
54581 + atomic_unchecked_t freehit;
54582 + atomic_unchecked_t freemiss;
54583
54584 /*
54585 * If debugging is enabled, then the allocator can add additional
54586 diff -urNp linux-2.6.39.4/include/linux/slab.h linux-2.6.39.4/include/linux/slab.h
54587 --- linux-2.6.39.4/include/linux/slab.h 2011-05-19 00:06:34.000000000 -0400
54588 +++ linux-2.6.39.4/include/linux/slab.h 2011-08-05 19:44:37.000000000 -0400
54589 @@ -11,12 +11,20 @@
54590
54591 #include <linux/gfp.h>
54592 #include <linux/types.h>
54593 +#include <linux/err.h>
54594
54595 /*
54596 * Flags to pass to kmem_cache_create().
54597 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
54598 */
54599 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
54600 +
54601 +#ifdef CONFIG_PAX_USERCOPY
54602 +#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
54603 +#else
54604 +#define SLAB_USERCOPY 0x00000000UL
54605 +#endif
54606 +
54607 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
54608 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
54609 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
54610 @@ -87,10 +95,13 @@
54611 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
54612 * Both make kfree a no-op.
54613 */
54614 -#define ZERO_SIZE_PTR ((void *)16)
54615 +#define ZERO_SIZE_PTR \
54616 +({ \
54617 + BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
54618 + (void *)(-MAX_ERRNO-1L); \
54619 +})
54620
54621 -#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
54622 - (unsigned long)ZERO_SIZE_PTR)
54623 +#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
54624
54625 /*
54626 * struct kmem_cache related prototypes
54627 @@ -141,6 +152,7 @@ void * __must_check krealloc(const void
54628 void kfree(const void *);
54629 void kzfree(const void *);
54630 size_t ksize(const void *);
54631 +void check_object_size(const void *ptr, unsigned long n, bool to);
54632
54633 /*
54634 * Allocator specific definitions. These are mainly used to establish optimized
54635 @@ -333,4 +345,59 @@ static inline void *kzalloc_node(size_t
54636
54637 void __init kmem_cache_init_late(void);
54638
54639 +#define kmalloc(x, y) \
54640 +({ \
54641 + void *___retval; \
54642 + intoverflow_t ___x = (intoverflow_t)x; \
54643 + if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n")) \
54644 + ___retval = NULL; \
54645 + else \
54646 + ___retval = kmalloc((size_t)___x, (y)); \
54647 + ___retval; \
54648 +})
54649 +
54650 +#define kmalloc_node(x, y, z) \
54651 +({ \
54652 + void *___retval; \
54653 + intoverflow_t ___x = (intoverflow_t)x; \
54654 + if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
54655 + ___retval = NULL; \
54656 + else \
54657 + ___retval = kmalloc_node((size_t)___x, (y), (z));\
54658 + ___retval; \
54659 +})
54660 +
54661 +#define kzalloc(x, y) \
54662 +({ \
54663 + void *___retval; \
54664 + intoverflow_t ___x = (intoverflow_t)x; \
54665 + if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n")) \
54666 + ___retval = NULL; \
54667 + else \
54668 + ___retval = kzalloc((size_t)___x, (y)); \
54669 + ___retval; \
54670 +})
54671 +
54672 +#define __krealloc(x, y, z) \
54673 +({ \
54674 + void *___retval; \
54675 + intoverflow_t ___y = (intoverflow_t)y; \
54676 + if (WARN(___y > ULONG_MAX, "__krealloc size overflow\n"))\
54677 + ___retval = NULL; \
54678 + else \
54679 + ___retval = __krealloc((x), (size_t)___y, (z)); \
54680 + ___retval; \
54681 +})
54682 +
54683 +#define krealloc(x, y, z) \
54684 +({ \
54685 + void *___retval; \
54686 + intoverflow_t ___y = (intoverflow_t)y; \
54687 + if (WARN(___y > ULONG_MAX, "krealloc size overflow\n")) \
54688 + ___retval = NULL; \
54689 + else \
54690 + ___retval = krealloc((x), (size_t)___y, (z)); \
54691 + ___retval; \
54692 +})
54693 +
54694 #endif /* _LINUX_SLAB_H */
54695 diff -urNp linux-2.6.39.4/include/linux/slub_def.h linux-2.6.39.4/include/linux/slub_def.h
54696 --- linux-2.6.39.4/include/linux/slub_def.h 2011-05-19 00:06:34.000000000 -0400
54697 +++ linux-2.6.39.4/include/linux/slub_def.h 2011-08-05 20:34:06.000000000 -0400
54698 @@ -84,7 +84,7 @@ struct kmem_cache {
54699 struct kmem_cache_order_objects max;
54700 struct kmem_cache_order_objects min;
54701 gfp_t allocflags; /* gfp flags to use on each alloc */
54702 - int refcount; /* Refcount for slab cache destroy */
54703 + atomic_t refcount; /* Refcount for slab cache destroy */
54704 void (*ctor)(void *);
54705 int inuse; /* Offset to metadata */
54706 int align; /* Alignment */
54707 @@ -218,7 +218,7 @@ static __always_inline struct kmem_cache
54708 }
54709
54710 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
54711 -void *__kmalloc(size_t size, gfp_t flags);
54712 +void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
54713
54714 static __always_inline void *
54715 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
54716 diff -urNp linux-2.6.39.4/include/linux/sonet.h linux-2.6.39.4/include/linux/sonet.h
54717 --- linux-2.6.39.4/include/linux/sonet.h 2011-05-19 00:06:34.000000000 -0400
54718 +++ linux-2.6.39.4/include/linux/sonet.h 2011-08-05 19:44:37.000000000 -0400
54719 @@ -61,7 +61,7 @@ struct sonet_stats {
54720 #include <asm/atomic.h>
54721
54722 struct k_sonet_stats {
54723 -#define __HANDLE_ITEM(i) atomic_t i
54724 +#define __HANDLE_ITEM(i) atomic_unchecked_t i
54725 __SONET_ITEMS
54726 #undef __HANDLE_ITEM
54727 };
54728 diff -urNp linux-2.6.39.4/include/linux/sunrpc/clnt.h linux-2.6.39.4/include/linux/sunrpc/clnt.h
54729 --- linux-2.6.39.4/include/linux/sunrpc/clnt.h 2011-05-19 00:06:34.000000000 -0400
54730 +++ linux-2.6.39.4/include/linux/sunrpc/clnt.h 2011-08-05 19:44:37.000000000 -0400
54731 @@ -169,9 +169,9 @@ static inline unsigned short rpc_get_por
54732 {
54733 switch (sap->sa_family) {
54734 case AF_INET:
54735 - return ntohs(((struct sockaddr_in *)sap)->sin_port);
54736 + return ntohs(((const struct sockaddr_in *)sap)->sin_port);
54737 case AF_INET6:
54738 - return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
54739 + return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
54740 }
54741 return 0;
54742 }
54743 @@ -204,7 +204,7 @@ static inline bool __rpc_cmp_addr4(const
54744 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
54745 const struct sockaddr *src)
54746 {
54747 - const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
54748 + const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
54749 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
54750
54751 dsin->sin_family = ssin->sin_family;
54752 @@ -301,7 +301,7 @@ static inline u32 rpc_get_scope_id(const
54753 if (sa->sa_family != AF_INET6)
54754 return 0;
54755
54756 - return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
54757 + return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
54758 }
54759
54760 #endif /* __KERNEL__ */
54761 diff -urNp linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h
54762 --- linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h 2011-05-19 00:06:34.000000000 -0400
54763 +++ linux-2.6.39.4/include/linux/sunrpc/svc_rdma.h 2011-08-05 19:44:37.000000000 -0400
54764 @@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
54765 extern unsigned int svcrdma_max_requests;
54766 extern unsigned int svcrdma_max_req_size;
54767
54768 -extern atomic_t rdma_stat_recv;
54769 -extern atomic_t rdma_stat_read;
54770 -extern atomic_t rdma_stat_write;
54771 -extern atomic_t rdma_stat_sq_starve;
54772 -extern atomic_t rdma_stat_rq_starve;
54773 -extern atomic_t rdma_stat_rq_poll;
54774 -extern atomic_t rdma_stat_rq_prod;
54775 -extern atomic_t rdma_stat_sq_poll;
54776 -extern atomic_t rdma_stat_sq_prod;
54777 +extern atomic_unchecked_t rdma_stat_recv;
54778 +extern atomic_unchecked_t rdma_stat_read;
54779 +extern atomic_unchecked_t rdma_stat_write;
54780 +extern atomic_unchecked_t rdma_stat_sq_starve;
54781 +extern atomic_unchecked_t rdma_stat_rq_starve;
54782 +extern atomic_unchecked_t rdma_stat_rq_poll;
54783 +extern atomic_unchecked_t rdma_stat_rq_prod;
54784 +extern atomic_unchecked_t rdma_stat_sq_poll;
54785 +extern atomic_unchecked_t rdma_stat_sq_prod;
54786
54787 #define RPCRDMA_VERSION 1
54788
54789 diff -urNp linux-2.6.39.4/include/linux/sysctl.h linux-2.6.39.4/include/linux/sysctl.h
54790 --- linux-2.6.39.4/include/linux/sysctl.h 2011-05-19 00:06:34.000000000 -0400
54791 +++ linux-2.6.39.4/include/linux/sysctl.h 2011-08-05 19:44:37.000000000 -0400
54792 @@ -155,7 +155,11 @@ enum
54793 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
54794 };
54795
54796 -
54797 +#ifdef CONFIG_PAX_SOFTMODE
54798 +enum {
54799 + PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
54800 +};
54801 +#endif
54802
54803 /* CTL_VM names: */
54804 enum
54805 @@ -967,6 +971,8 @@ typedef int proc_handler (struct ctl_tab
54806
54807 extern int proc_dostring(struct ctl_table *, int,
54808 void __user *, size_t *, loff_t *);
54809 +extern int proc_dostring_modpriv(struct ctl_table *, int,
54810 + void __user *, size_t *, loff_t *);
54811 extern int proc_dointvec(struct ctl_table *, int,
54812 void __user *, size_t *, loff_t *);
54813 extern int proc_dointvec_minmax(struct ctl_table *, int,
54814 diff -urNp linux-2.6.39.4/include/linux/tty_ldisc.h linux-2.6.39.4/include/linux/tty_ldisc.h
54815 --- linux-2.6.39.4/include/linux/tty_ldisc.h 2011-05-19 00:06:34.000000000 -0400
54816 +++ linux-2.6.39.4/include/linux/tty_ldisc.h 2011-08-05 19:44:37.000000000 -0400
54817 @@ -148,7 +148,7 @@ struct tty_ldisc_ops {
54818
54819 struct module *owner;
54820
54821 - int refcount;
54822 + atomic_t refcount;
54823 };
54824
54825 struct tty_ldisc {
54826 diff -urNp linux-2.6.39.4/include/linux/types.h linux-2.6.39.4/include/linux/types.h
54827 --- linux-2.6.39.4/include/linux/types.h 2011-05-19 00:06:34.000000000 -0400
54828 +++ linux-2.6.39.4/include/linux/types.h 2011-08-05 19:44:37.000000000 -0400
54829 @@ -213,10 +213,26 @@ typedef struct {
54830 int counter;
54831 } atomic_t;
54832
54833 +#ifdef CONFIG_PAX_REFCOUNT
54834 +typedef struct {
54835 + int counter;
54836 +} atomic_unchecked_t;
54837 +#else
54838 +typedef atomic_t atomic_unchecked_t;
54839 +#endif
54840 +
54841 #ifdef CONFIG_64BIT
54842 typedef struct {
54843 long counter;
54844 } atomic64_t;
54845 +
54846 +#ifdef CONFIG_PAX_REFCOUNT
54847 +typedef struct {
54848 + long counter;
54849 +} atomic64_unchecked_t;
54850 +#else
54851 +typedef atomic64_t atomic64_unchecked_t;
54852 +#endif
54853 #endif
54854
54855 struct list_head {
54856 diff -urNp linux-2.6.39.4/include/linux/uaccess.h linux-2.6.39.4/include/linux/uaccess.h
54857 --- linux-2.6.39.4/include/linux/uaccess.h 2011-05-19 00:06:34.000000000 -0400
54858 +++ linux-2.6.39.4/include/linux/uaccess.h 2011-08-05 19:44:37.000000000 -0400
54859 @@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
54860 long ret; \
54861 mm_segment_t old_fs = get_fs(); \
54862 \
54863 - set_fs(KERNEL_DS); \
54864 pagefault_disable(); \
54865 + set_fs(KERNEL_DS); \
54866 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
54867 - pagefault_enable(); \
54868 set_fs(old_fs); \
54869 + pagefault_enable(); \
54870 ret; \
54871 })
54872
54873 @@ -93,8 +93,8 @@ static inline unsigned long __copy_from_
54874 * Safely read from address @src to the buffer at @dst. If a kernel fault
54875 * happens, handle that and return -EFAULT.
54876 */
54877 -extern long probe_kernel_read(void *dst, void *src, size_t size);
54878 -extern long __probe_kernel_read(void *dst, void *src, size_t size);
54879 +extern long probe_kernel_read(void *dst, const void *src, size_t size);
54880 +extern long __probe_kernel_read(void *dst, const void *src, size_t size);
54881
54882 /*
54883 * probe_kernel_write(): safely attempt to write to a location
54884 @@ -105,7 +105,7 @@ extern long __probe_kernel_read(void *ds
54885 * Safely write to address @dst from the buffer at @src. If a kernel fault
54886 * happens, handle that and return -EFAULT.
54887 */
54888 -extern long notrace probe_kernel_write(void *dst, void *src, size_t size);
54889 -extern long notrace __probe_kernel_write(void *dst, void *src, size_t size);
54890 +extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
54891 +extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
54892
54893 #endif /* __LINUX_UACCESS_H__ */
54894 diff -urNp linux-2.6.39.4/include/linux/unaligned/access_ok.h linux-2.6.39.4/include/linux/unaligned/access_ok.h
54895 --- linux-2.6.39.4/include/linux/unaligned/access_ok.h 2011-05-19 00:06:34.000000000 -0400
54896 +++ linux-2.6.39.4/include/linux/unaligned/access_ok.h 2011-08-05 19:44:37.000000000 -0400
54897 @@ -6,32 +6,32 @@
54898
54899 static inline u16 get_unaligned_le16(const void *p)
54900 {
54901 - return le16_to_cpup((__le16 *)p);
54902 + return le16_to_cpup((const __le16 *)p);
54903 }
54904
54905 static inline u32 get_unaligned_le32(const void *p)
54906 {
54907 - return le32_to_cpup((__le32 *)p);
54908 + return le32_to_cpup((const __le32 *)p);
54909 }
54910
54911 static inline u64 get_unaligned_le64(const void *p)
54912 {
54913 - return le64_to_cpup((__le64 *)p);
54914 + return le64_to_cpup((const __le64 *)p);
54915 }
54916
54917 static inline u16 get_unaligned_be16(const void *p)
54918 {
54919 - return be16_to_cpup((__be16 *)p);
54920 + return be16_to_cpup((const __be16 *)p);
54921 }
54922
54923 static inline u32 get_unaligned_be32(const void *p)
54924 {
54925 - return be32_to_cpup((__be32 *)p);
54926 + return be32_to_cpup((const __be32 *)p);
54927 }
54928
54929 static inline u64 get_unaligned_be64(const void *p)
54930 {
54931 - return be64_to_cpup((__be64 *)p);
54932 + return be64_to_cpup((const __be64 *)p);
54933 }
54934
54935 static inline void put_unaligned_le16(u16 val, void *p)
54936 diff -urNp linux-2.6.39.4/include/linux/vmalloc.h linux-2.6.39.4/include/linux/vmalloc.h
54937 --- linux-2.6.39.4/include/linux/vmalloc.h 2011-05-19 00:06:34.000000000 -0400
54938 +++ linux-2.6.39.4/include/linux/vmalloc.h 2011-08-05 19:44:37.000000000 -0400
54939 @@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
54940 #define VM_MAP 0x00000004 /* vmap()ed pages */
54941 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
54942 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
54943 +
54944 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
54945 +#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
54946 +#endif
54947 +
54948 /* bits [20..32] reserved for arch specific ioremap internals */
54949
54950 /*
54951 @@ -155,4 +160,103 @@ pcpu_free_vm_areas(struct vm_struct **vm
54952 # endif
54953 #endif
54954
54955 +#define vmalloc(x) \
54956 +({ \
54957 + void *___retval; \
54958 + intoverflow_t ___x = (intoverflow_t)x; \
54959 + if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
54960 + ___retval = NULL; \
54961 + else \
54962 + ___retval = vmalloc((unsigned long)___x); \
54963 + ___retval; \
54964 +})
54965 +
54966 +#define vzalloc(x) \
54967 +({ \
54968 + void *___retval; \
54969 + intoverflow_t ___x = (intoverflow_t)x; \
54970 + if (WARN(___x > ULONG_MAX, "vzalloc size overflow\n")) \
54971 + ___retval = NULL; \
54972 + else \
54973 + ___retval = vzalloc((unsigned long)___x); \
54974 + ___retval; \
54975 +})
54976 +
54977 +#define __vmalloc(x, y, z) \
54978 +({ \
54979 + void *___retval; \
54980 + intoverflow_t ___x = (intoverflow_t)x; \
54981 + if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
54982 + ___retval = NULL; \
54983 + else \
54984 + ___retval = __vmalloc((unsigned long)___x, (y), (z));\
54985 + ___retval; \
54986 +})
54987 +
54988 +#define vmalloc_user(x) \
54989 +({ \
54990 + void *___retval; \
54991 + intoverflow_t ___x = (intoverflow_t)x; \
54992 + if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
54993 + ___retval = NULL; \
54994 + else \
54995 + ___retval = vmalloc_user((unsigned long)___x); \
54996 + ___retval; \
54997 +})
54998 +
54999 +#define vmalloc_exec(x) \
55000 +({ \
55001 + void *___retval; \
55002 + intoverflow_t ___x = (intoverflow_t)x; \
55003 + if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
55004 + ___retval = NULL; \
55005 + else \
55006 + ___retval = vmalloc_exec((unsigned long)___x); \
55007 + ___retval; \
55008 +})
55009 +
55010 +#define vmalloc_node(x, y) \
55011 +({ \
55012 + void *___retval; \
55013 + intoverflow_t ___x = (intoverflow_t)x; \
55014 + if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
55015 + ___retval = NULL; \
55016 + else \
55017 + ___retval = vmalloc_node((unsigned long)___x, (y));\
55018 + ___retval; \
55019 +})
55020 +
55021 +#define vzalloc_node(x, y) \
55022 +({ \
55023 + void *___retval; \
55024 + intoverflow_t ___x = (intoverflow_t)x; \
55025 + if (WARN(___x > ULONG_MAX, "vzalloc_node size overflow\n"))\
55026 + ___retval = NULL; \
55027 + else \
55028 + ___retval = vzalloc_node((unsigned long)___x, (y));\
55029 + ___retval; \
55030 +})
55031 +
55032 +#define vmalloc_32(x) \
55033 +({ \
55034 + void *___retval; \
55035 + intoverflow_t ___x = (intoverflow_t)x; \
55036 + if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
55037 + ___retval = NULL; \
55038 + else \
55039 + ___retval = vmalloc_32((unsigned long)___x); \
55040 + ___retval; \
55041 +})
55042 +
55043 +#define vmalloc_32_user(x) \
55044 +({ \
55045 +void *___retval; \
55046 + intoverflow_t ___x = (intoverflow_t)x; \
55047 + if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
55048 + ___retval = NULL; \
55049 + else \
55050 + ___retval = vmalloc_32_user((unsigned long)___x);\
55051 + ___retval; \
55052 +})
55053 +
55054 #endif /* _LINUX_VMALLOC_H */
55055 diff -urNp linux-2.6.39.4/include/linux/vmstat.h linux-2.6.39.4/include/linux/vmstat.h
55056 --- linux-2.6.39.4/include/linux/vmstat.h 2011-05-19 00:06:34.000000000 -0400
55057 +++ linux-2.6.39.4/include/linux/vmstat.h 2011-08-05 19:44:37.000000000 -0400
55058 @@ -147,18 +147,18 @@ static inline void vm_events_fold_cpu(in
55059 /*
55060 * Zone based page accounting with per cpu differentials.
55061 */
55062 -extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
55063 +extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
55064
55065 static inline void zone_page_state_add(long x, struct zone *zone,
55066 enum zone_stat_item item)
55067 {
55068 - atomic_long_add(x, &zone->vm_stat[item]);
55069 - atomic_long_add(x, &vm_stat[item]);
55070 + atomic_long_add_unchecked(x, &zone->vm_stat[item]);
55071 + atomic_long_add_unchecked(x, &vm_stat[item]);
55072 }
55073
55074 static inline unsigned long global_page_state(enum zone_stat_item item)
55075 {
55076 - long x = atomic_long_read(&vm_stat[item]);
55077 + long x = atomic_long_read_unchecked(&vm_stat[item]);
55078 #ifdef CONFIG_SMP
55079 if (x < 0)
55080 x = 0;
55081 @@ -169,7 +169,7 @@ static inline unsigned long global_page_
55082 static inline unsigned long zone_page_state(struct zone *zone,
55083 enum zone_stat_item item)
55084 {
55085 - long x = atomic_long_read(&zone->vm_stat[item]);
55086 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
55087 #ifdef CONFIG_SMP
55088 if (x < 0)
55089 x = 0;
55090 @@ -186,7 +186,7 @@ static inline unsigned long zone_page_st
55091 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
55092 enum zone_stat_item item)
55093 {
55094 - long x = atomic_long_read(&zone->vm_stat[item]);
55095 + long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
55096
55097 #ifdef CONFIG_SMP
55098 int cpu;
55099 @@ -280,8 +280,8 @@ static inline void __mod_zone_page_state
55100
55101 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
55102 {
55103 - atomic_long_inc(&zone->vm_stat[item]);
55104 - atomic_long_inc(&vm_stat[item]);
55105 + atomic_long_inc_unchecked(&zone->vm_stat[item]);
55106 + atomic_long_inc_unchecked(&vm_stat[item]);
55107 }
55108
55109 static inline void __inc_zone_page_state(struct page *page,
55110 @@ -292,8 +292,8 @@ static inline void __inc_zone_page_state
55111
55112 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
55113 {
55114 - atomic_long_dec(&zone->vm_stat[item]);
55115 - atomic_long_dec(&vm_stat[item]);
55116 + atomic_long_dec_unchecked(&zone->vm_stat[item]);
55117 + atomic_long_dec_unchecked(&vm_stat[item]);
55118 }
55119
55120 static inline void __dec_zone_page_state(struct page *page,
55121 diff -urNp linux-2.6.39.4/include/media/saa7146_vv.h linux-2.6.39.4/include/media/saa7146_vv.h
55122 --- linux-2.6.39.4/include/media/saa7146_vv.h 2011-05-19 00:06:34.000000000 -0400
55123 +++ linux-2.6.39.4/include/media/saa7146_vv.h 2011-08-05 20:34:06.000000000 -0400
55124 @@ -163,7 +163,7 @@ struct saa7146_ext_vv
55125 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
55126
55127 /* the extension can override this */
55128 - struct v4l2_ioctl_ops ops;
55129 + v4l2_ioctl_ops_no_const ops;
55130 /* pointer to the saa7146 core ops */
55131 const struct v4l2_ioctl_ops *core_ops;
55132
55133 diff -urNp linux-2.6.39.4/include/media/v4l2-dev.h linux-2.6.39.4/include/media/v4l2-dev.h
55134 --- linux-2.6.39.4/include/media/v4l2-dev.h 2011-05-19 00:06:34.000000000 -0400
55135 +++ linux-2.6.39.4/include/media/v4l2-dev.h 2011-08-05 20:34:06.000000000 -0400
55136 @@ -56,7 +56,7 @@ int v4l2_prio_check(struct v4l2_prio_sta
55137
55138
55139 struct v4l2_file_operations {
55140 - struct module *owner;
55141 + struct module * const owner;
55142 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
55143 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
55144 unsigned int (*poll) (struct file *, struct poll_table_struct *);
55145 diff -urNp linux-2.6.39.4/include/media/v4l2-device.h linux-2.6.39.4/include/media/v4l2-device.h
55146 --- linux-2.6.39.4/include/media/v4l2-device.h 2011-05-19 00:06:34.000000000 -0400
55147 +++ linux-2.6.39.4/include/media/v4l2-device.h 2011-08-05 19:44:37.000000000 -0400
55148 @@ -95,7 +95,7 @@ int __must_check v4l2_device_register(st
55149 this function returns 0. If the name ends with a digit (e.g. cx18),
55150 then the name will be set to cx18-0 since cx180 looks really odd. */
55151 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
55152 - atomic_t *instance);
55153 + atomic_unchecked_t *instance);
55154
55155 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
55156 Since the parent disappears this ensures that v4l2_dev doesn't have an
55157 diff -urNp linux-2.6.39.4/include/media/v4l2-ioctl.h linux-2.6.39.4/include/media/v4l2-ioctl.h
55158 --- linux-2.6.39.4/include/media/v4l2-ioctl.h 2011-05-19 00:06:34.000000000 -0400
55159 +++ linux-2.6.39.4/include/media/v4l2-ioctl.h 2011-08-05 20:34:06.000000000 -0400
55160 @@ -272,6 +272,7 @@ struct v4l2_ioctl_ops {
55161 long (*vidioc_default) (struct file *file, void *fh,
55162 bool valid_prio, int cmd, void *arg);
55163 };
55164 +typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
55165
55166
55167 /* v4l debugging and diagnostics */
55168 diff -urNp linux-2.6.39.4/include/net/caif/cfctrl.h linux-2.6.39.4/include/net/caif/cfctrl.h
55169 --- linux-2.6.39.4/include/net/caif/cfctrl.h 2011-05-19 00:06:34.000000000 -0400
55170 +++ linux-2.6.39.4/include/net/caif/cfctrl.h 2011-08-05 20:34:06.000000000 -0400
55171 @@ -52,7 +52,7 @@ struct cfctrl_rsp {
55172 void (*radioset_rsp)(void);
55173 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
55174 struct cflayer *client_layer);
55175 -};
55176 +} __no_const;
55177
55178 /* Link Setup Parameters for CAIF-Links. */
55179 struct cfctrl_link_param {
55180 @@ -101,8 +101,8 @@ struct cfctrl_request_info {
55181 struct cfctrl {
55182 struct cfsrvl serv;
55183 struct cfctrl_rsp res;
55184 - atomic_t req_seq_no;
55185 - atomic_t rsp_seq_no;
55186 + atomic_unchecked_t req_seq_no;
55187 + atomic_unchecked_t rsp_seq_no;
55188 struct list_head list;
55189 /* Protects from simultaneous access to first_req list */
55190 spinlock_t info_list_lock;
55191 diff -urNp linux-2.6.39.4/include/net/flow.h linux-2.6.39.4/include/net/flow.h
55192 --- linux-2.6.39.4/include/net/flow.h 2011-05-19 00:06:34.000000000 -0400
55193 +++ linux-2.6.39.4/include/net/flow.h 2011-08-05 19:44:37.000000000 -0400
55194 @@ -167,6 +167,6 @@ extern struct flow_cache_object *flow_ca
55195 u8 dir, flow_resolve_t resolver, void *ctx);
55196
55197 extern void flow_cache_flush(void);
55198 -extern atomic_t flow_cache_genid;
55199 +extern atomic_unchecked_t flow_cache_genid;
55200
55201 #endif
55202 diff -urNp linux-2.6.39.4/include/net/inetpeer.h linux-2.6.39.4/include/net/inetpeer.h
55203 --- linux-2.6.39.4/include/net/inetpeer.h 2011-05-19 00:06:34.000000000 -0400
55204 +++ linux-2.6.39.4/include/net/inetpeer.h 2011-08-05 19:44:37.000000000 -0400
55205 @@ -43,8 +43,8 @@ struct inet_peer {
55206 */
55207 union {
55208 struct {
55209 - atomic_t rid; /* Frag reception counter */
55210 - atomic_t ip_id_count; /* IP ID for the next packet */
55211 + atomic_unchecked_t rid; /* Frag reception counter */
55212 + atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
55213 __u32 tcp_ts;
55214 __u32 tcp_ts_stamp;
55215 u32 metrics[RTAX_MAX];
55216 @@ -108,7 +108,7 @@ static inline __u16 inet_getid(struct in
55217 {
55218 more++;
55219 inet_peer_refcheck(p);
55220 - return atomic_add_return(more, &p->ip_id_count) - more;
55221 + return atomic_add_return_unchecked(more, &p->ip_id_count) - more;
55222 }
55223
55224 #endif /* _NET_INETPEER_H */
55225 diff -urNp linux-2.6.39.4/include/net/ip_fib.h linux-2.6.39.4/include/net/ip_fib.h
55226 --- linux-2.6.39.4/include/net/ip_fib.h 2011-05-19 00:06:34.000000000 -0400
55227 +++ linux-2.6.39.4/include/net/ip_fib.h 2011-08-05 19:44:37.000000000 -0400
55228 @@ -146,7 +146,7 @@ extern __be32 fib_info_update_nh_saddr(s
55229
55230 #define FIB_RES_SADDR(net, res) \
55231 ((FIB_RES_NH(res).nh_saddr_genid == \
55232 - atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
55233 + atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
55234 FIB_RES_NH(res).nh_saddr : \
55235 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
55236 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
55237 diff -urNp linux-2.6.39.4/include/net/ip_vs.h linux-2.6.39.4/include/net/ip_vs.h
55238 --- linux-2.6.39.4/include/net/ip_vs.h 2011-07-09 09:18:51.000000000 -0400
55239 +++ linux-2.6.39.4/include/net/ip_vs.h 2011-08-05 19:44:37.000000000 -0400
55240 @@ -512,7 +512,7 @@ struct ip_vs_conn {
55241 struct ip_vs_conn *control; /* Master control connection */
55242 atomic_t n_control; /* Number of controlled ones */
55243 struct ip_vs_dest *dest; /* real server */
55244 - atomic_t in_pkts; /* incoming packet counter */
55245 + atomic_unchecked_t in_pkts; /* incoming packet counter */
55246
55247 /* packet transmitter for different forwarding methods. If it
55248 mangles the packet, it must return NF_DROP or better NF_STOLEN,
55249 @@ -650,7 +650,7 @@ struct ip_vs_dest {
55250 __be16 port; /* port number of the server */
55251 union nf_inet_addr addr; /* IP address of the server */
55252 volatile unsigned flags; /* dest status flags */
55253 - atomic_t conn_flags; /* flags to copy to conn */
55254 + atomic_unchecked_t conn_flags; /* flags to copy to conn */
55255 atomic_t weight; /* server weight */
55256
55257 atomic_t refcnt; /* reference counter */
55258 diff -urNp linux-2.6.39.4/include/net/irda/ircomm_core.h linux-2.6.39.4/include/net/irda/ircomm_core.h
55259 --- linux-2.6.39.4/include/net/irda/ircomm_core.h 2011-05-19 00:06:34.000000000 -0400
55260 +++ linux-2.6.39.4/include/net/irda/ircomm_core.h 2011-08-05 20:34:06.000000000 -0400
55261 @@ -51,7 +51,7 @@ typedef struct {
55262 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
55263 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
55264 struct ircomm_info *);
55265 -} call_t;
55266 +} __no_const call_t;
55267
55268 struct ircomm_cb {
55269 irda_queue_t queue;
55270 diff -urNp linux-2.6.39.4/include/net/irda/ircomm_tty.h linux-2.6.39.4/include/net/irda/ircomm_tty.h
55271 --- linux-2.6.39.4/include/net/irda/ircomm_tty.h 2011-05-19 00:06:34.000000000 -0400
55272 +++ linux-2.6.39.4/include/net/irda/ircomm_tty.h 2011-08-05 19:44:37.000000000 -0400
55273 @@ -35,6 +35,7 @@
55274 #include <linux/termios.h>
55275 #include <linux/timer.h>
55276 #include <linux/tty.h> /* struct tty_struct */
55277 +#include <asm/local.h>
55278
55279 #include <net/irda/irias_object.h>
55280 #include <net/irda/ircomm_core.h>
55281 @@ -105,8 +106,8 @@ struct ircomm_tty_cb {
55282 unsigned short close_delay;
55283 unsigned short closing_wait; /* time to wait before closing */
55284
55285 - int open_count;
55286 - int blocked_open; /* # of blocked opens */
55287 + local_t open_count;
55288 + local_t blocked_open; /* # of blocked opens */
55289
55290 /* Protect concurent access to :
55291 * o self->open_count
55292 diff -urNp linux-2.6.39.4/include/net/iucv/af_iucv.h linux-2.6.39.4/include/net/iucv/af_iucv.h
55293 --- linux-2.6.39.4/include/net/iucv/af_iucv.h 2011-05-19 00:06:34.000000000 -0400
55294 +++ linux-2.6.39.4/include/net/iucv/af_iucv.h 2011-08-05 19:44:37.000000000 -0400
55295 @@ -87,7 +87,7 @@ struct iucv_sock {
55296 struct iucv_sock_list {
55297 struct hlist_head head;
55298 rwlock_t lock;
55299 - atomic_t autobind_name;
55300 + atomic_unchecked_t autobind_name;
55301 };
55302
55303 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
55304 diff -urNp linux-2.6.39.4/include/net/lapb.h linux-2.6.39.4/include/net/lapb.h
55305 --- linux-2.6.39.4/include/net/lapb.h 2011-05-19 00:06:34.000000000 -0400
55306 +++ linux-2.6.39.4/include/net/lapb.h 2011-08-05 20:34:06.000000000 -0400
55307 @@ -95,7 +95,7 @@ struct lapb_cb {
55308 struct sk_buff_head write_queue;
55309 struct sk_buff_head ack_queue;
55310 unsigned char window;
55311 - struct lapb_register_struct callbacks;
55312 + struct lapb_register_struct *callbacks;
55313
55314 /* FRMR control information */
55315 struct lapb_frame frmr_data;
55316 diff -urNp linux-2.6.39.4/include/net/neighbour.h linux-2.6.39.4/include/net/neighbour.h
55317 --- linux-2.6.39.4/include/net/neighbour.h 2011-05-19 00:06:34.000000000 -0400
55318 +++ linux-2.6.39.4/include/net/neighbour.h 2011-08-05 20:34:06.000000000 -0400
55319 @@ -117,7 +117,7 @@ struct neighbour {
55320 };
55321
55322 struct neigh_ops {
55323 - int family;
55324 + const int family;
55325 void (*solicit)(struct neighbour *, struct sk_buff*);
55326 void (*error_report)(struct neighbour *, struct sk_buff*);
55327 int (*output)(struct sk_buff*);
55328 diff -urNp linux-2.6.39.4/include/net/netlink.h linux-2.6.39.4/include/net/netlink.h
55329 --- linux-2.6.39.4/include/net/netlink.h 2011-05-19 00:06:34.000000000 -0400
55330 +++ linux-2.6.39.4/include/net/netlink.h 2011-08-05 19:44:37.000000000 -0400
55331 @@ -562,7 +562,7 @@ static inline void *nlmsg_get_pos(struct
55332 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
55333 {
55334 if (mark)
55335 - skb_trim(skb, (unsigned char *) mark - skb->data);
55336 + skb_trim(skb, (const unsigned char *) mark - skb->data);
55337 }
55338
55339 /**
55340 diff -urNp linux-2.6.39.4/include/net/netns/ipv4.h linux-2.6.39.4/include/net/netns/ipv4.h
55341 --- linux-2.6.39.4/include/net/netns/ipv4.h 2011-05-19 00:06:34.000000000 -0400
55342 +++ linux-2.6.39.4/include/net/netns/ipv4.h 2011-08-05 19:44:37.000000000 -0400
55343 @@ -54,8 +54,8 @@ struct netns_ipv4 {
55344 int sysctl_rt_cache_rebuild_count;
55345 int current_rt_cache_rebuild_count;
55346
55347 - atomic_t rt_genid;
55348 - atomic_t dev_addr_genid;
55349 + atomic_unchecked_t rt_genid;
55350 + atomic_unchecked_t dev_addr_genid;
55351
55352 #ifdef CONFIG_IP_MROUTE
55353 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
55354 diff -urNp linux-2.6.39.4/include/net/sctp/sctp.h linux-2.6.39.4/include/net/sctp/sctp.h
55355 --- linux-2.6.39.4/include/net/sctp/sctp.h 2011-05-19 00:06:34.000000000 -0400
55356 +++ linux-2.6.39.4/include/net/sctp/sctp.h 2011-08-05 19:44:37.000000000 -0400
55357 @@ -316,9 +316,9 @@ do { \
55358
55359 #else /* SCTP_DEBUG */
55360
55361 -#define SCTP_DEBUG_PRINTK(whatever...)
55362 -#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
55363 -#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
55364 +#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
55365 +#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
55366 +#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
55367 #define SCTP_ENABLE_DEBUG
55368 #define SCTP_DISABLE_DEBUG
55369 #define SCTP_ASSERT(expr, str, func)
55370 diff -urNp linux-2.6.39.4/include/net/sock.h linux-2.6.39.4/include/net/sock.h
55371 --- linux-2.6.39.4/include/net/sock.h 2011-05-19 00:06:34.000000000 -0400
55372 +++ linux-2.6.39.4/include/net/sock.h 2011-08-05 19:44:37.000000000 -0400
55373 @@ -277,7 +277,7 @@ struct sock {
55374 #ifdef CONFIG_RPS
55375 __u32 sk_rxhash;
55376 #endif
55377 - atomic_t sk_drops;
55378 + atomic_unchecked_t sk_drops;
55379 int sk_rcvbuf;
55380
55381 struct sk_filter __rcu *sk_filter;
55382 diff -urNp linux-2.6.39.4/include/net/tcp.h linux-2.6.39.4/include/net/tcp.h
55383 --- linux-2.6.39.4/include/net/tcp.h 2011-05-19 00:06:34.000000000 -0400
55384 +++ linux-2.6.39.4/include/net/tcp.h 2011-08-05 20:34:06.000000000 -0400
55385 @@ -1374,8 +1374,8 @@ enum tcp_seq_states {
55386 struct tcp_seq_afinfo {
55387 char *name;
55388 sa_family_t family;
55389 - struct file_operations seq_fops;
55390 - struct seq_operations seq_ops;
55391 + file_operations_no_const seq_fops;
55392 + seq_operations_no_const seq_ops;
55393 };
55394
55395 struct tcp_iter_state {
55396 diff -urNp linux-2.6.39.4/include/net/udp.h linux-2.6.39.4/include/net/udp.h
55397 --- linux-2.6.39.4/include/net/udp.h 2011-05-19 00:06:34.000000000 -0400
55398 +++ linux-2.6.39.4/include/net/udp.h 2011-08-05 20:34:06.000000000 -0400
55399 @@ -234,8 +234,8 @@ struct udp_seq_afinfo {
55400 char *name;
55401 sa_family_t family;
55402 struct udp_table *udp_table;
55403 - struct file_operations seq_fops;
55404 - struct seq_operations seq_ops;
55405 + file_operations_no_const seq_fops;
55406 + seq_operations_no_const seq_ops;
55407 };
55408
55409 struct udp_iter_state {
55410 diff -urNp linux-2.6.39.4/include/net/xfrm.h linux-2.6.39.4/include/net/xfrm.h
55411 --- linux-2.6.39.4/include/net/xfrm.h 2011-05-19 00:06:34.000000000 -0400
55412 +++ linux-2.6.39.4/include/net/xfrm.h 2011-08-05 19:44:37.000000000 -0400
55413 @@ -505,7 +505,7 @@ struct xfrm_policy {
55414 struct timer_list timer;
55415
55416 struct flow_cache_object flo;
55417 - atomic_t genid;
55418 + atomic_unchecked_t genid;
55419 u32 priority;
55420 u32 index;
55421 struct xfrm_mark mark;
55422 diff -urNp linux-2.6.39.4/include/rdma/iw_cm.h linux-2.6.39.4/include/rdma/iw_cm.h
55423 --- linux-2.6.39.4/include/rdma/iw_cm.h 2011-05-19 00:06:34.000000000 -0400
55424 +++ linux-2.6.39.4/include/rdma/iw_cm.h 2011-08-05 20:34:06.000000000 -0400
55425 @@ -129,7 +129,7 @@ struct iw_cm_verbs {
55426 int backlog);
55427
55428 int (*destroy_listen)(struct iw_cm_id *cm_id);
55429 -};
55430 +} __no_const;
55431
55432 /**
55433 * iw_create_cm_id - Create an IW CM identifier.
55434 diff -urNp linux-2.6.39.4/include/scsi/libfc.h linux-2.6.39.4/include/scsi/libfc.h
55435 --- linux-2.6.39.4/include/scsi/libfc.h 2011-05-19 00:06:34.000000000 -0400
55436 +++ linux-2.6.39.4/include/scsi/libfc.h 2011-08-05 20:34:06.000000000 -0400
55437 @@ -750,6 +750,7 @@ struct libfc_function_template {
55438 */
55439 void (*disc_stop_final) (struct fc_lport *);
55440 };
55441 +typedef struct libfc_function_template __no_const libfc_function_template_no_const;
55442
55443 /**
55444 * struct fc_disc - Discovery context
55445 @@ -853,7 +854,7 @@ struct fc_lport {
55446 struct fc_vport *vport;
55447
55448 /* Operational Information */
55449 - struct libfc_function_template tt;
55450 + libfc_function_template_no_const tt;
55451 u8 link_up;
55452 u8 qfull;
55453 enum fc_lport_state state;
55454 diff -urNp linux-2.6.39.4/include/scsi/scsi_device.h linux-2.6.39.4/include/scsi/scsi_device.h
55455 --- linux-2.6.39.4/include/scsi/scsi_device.h 2011-05-19 00:06:34.000000000 -0400
55456 +++ linux-2.6.39.4/include/scsi/scsi_device.h 2011-08-05 19:44:37.000000000 -0400
55457 @@ -161,9 +161,9 @@ struct scsi_device {
55458 unsigned int max_device_blocked; /* what device_blocked counts down from */
55459 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
55460
55461 - atomic_t iorequest_cnt;
55462 - atomic_t iodone_cnt;
55463 - atomic_t ioerr_cnt;
55464 + atomic_unchecked_t iorequest_cnt;
55465 + atomic_unchecked_t iodone_cnt;
55466 + atomic_unchecked_t ioerr_cnt;
55467
55468 struct device sdev_gendev,
55469 sdev_dev;
55470 diff -urNp linux-2.6.39.4/include/scsi/scsi_transport_fc.h linux-2.6.39.4/include/scsi/scsi_transport_fc.h
55471 --- linux-2.6.39.4/include/scsi/scsi_transport_fc.h 2011-05-19 00:06:34.000000000 -0400
55472 +++ linux-2.6.39.4/include/scsi/scsi_transport_fc.h 2011-08-05 20:34:06.000000000 -0400
55473 @@ -666,9 +666,9 @@ struct fc_function_template {
55474 int (*bsg_timeout)(struct fc_bsg_job *);
55475
55476 /* allocation lengths for host-specific data */
55477 - u32 dd_fcrport_size;
55478 - u32 dd_fcvport_size;
55479 - u32 dd_bsg_size;
55480 + const u32 dd_fcrport_size;
55481 + const u32 dd_fcvport_size;
55482 + const u32 dd_bsg_size;
55483
55484 /*
55485 * The driver sets these to tell the transport class it
55486 @@ -678,39 +678,39 @@ struct fc_function_template {
55487 */
55488
55489 /* remote port fixed attributes */
55490 - unsigned long show_rport_maxframe_size:1;
55491 - unsigned long show_rport_supported_classes:1;
55492 - unsigned long show_rport_dev_loss_tmo:1;
55493 + const unsigned long show_rport_maxframe_size:1;
55494 + const unsigned long show_rport_supported_classes:1;
55495 + const unsigned long show_rport_dev_loss_tmo:1;
55496
55497 /*
55498 * target dynamic attributes
55499 * These should all be "1" if the driver uses the remote port
55500 * add/delete functions (so attributes reflect rport values).
55501 */
55502 - unsigned long show_starget_node_name:1;
55503 - unsigned long show_starget_port_name:1;
55504 - unsigned long show_starget_port_id:1;
55505 + const unsigned long show_starget_node_name:1;
55506 + const unsigned long show_starget_port_name:1;
55507 + const unsigned long show_starget_port_id:1;
55508
55509 /* host fixed attributes */
55510 - unsigned long show_host_node_name:1;
55511 - unsigned long show_host_port_name:1;
55512 - unsigned long show_host_permanent_port_name:1;
55513 - unsigned long show_host_supported_classes:1;
55514 - unsigned long show_host_supported_fc4s:1;
55515 - unsigned long show_host_supported_speeds:1;
55516 - unsigned long show_host_maxframe_size:1;
55517 - unsigned long show_host_serial_number:1;
55518 + const unsigned long show_host_node_name:1;
55519 + const unsigned long show_host_port_name:1;
55520 + const unsigned long show_host_permanent_port_name:1;
55521 + const unsigned long show_host_supported_classes:1;
55522 + const unsigned long show_host_supported_fc4s:1;
55523 + const unsigned long show_host_supported_speeds:1;
55524 + const unsigned long show_host_maxframe_size:1;
55525 + const unsigned long show_host_serial_number:1;
55526 /* host dynamic attributes */
55527 - unsigned long show_host_port_id:1;
55528 - unsigned long show_host_port_type:1;
55529 - unsigned long show_host_port_state:1;
55530 - unsigned long show_host_active_fc4s:1;
55531 - unsigned long show_host_speed:1;
55532 - unsigned long show_host_fabric_name:1;
55533 - unsigned long show_host_symbolic_name:1;
55534 - unsigned long show_host_system_hostname:1;
55535 + const unsigned long show_host_port_id:1;
55536 + const unsigned long show_host_port_type:1;
55537 + const unsigned long show_host_port_state:1;
55538 + const unsigned long show_host_active_fc4s:1;
55539 + const unsigned long show_host_speed:1;
55540 + const unsigned long show_host_fabric_name:1;
55541 + const unsigned long show_host_symbolic_name:1;
55542 + const unsigned long show_host_system_hostname:1;
55543
55544 - unsigned long disable_target_scan:1;
55545 + const unsigned long disable_target_scan:1;
55546 };
55547
55548
55549 diff -urNp linux-2.6.39.4/include/sound/ak4xxx-adda.h linux-2.6.39.4/include/sound/ak4xxx-adda.h
55550 --- linux-2.6.39.4/include/sound/ak4xxx-adda.h 2011-05-19 00:06:34.000000000 -0400
55551 +++ linux-2.6.39.4/include/sound/ak4xxx-adda.h 2011-08-05 20:34:06.000000000 -0400
55552 @@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
55553 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
55554 unsigned char val);
55555 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
55556 -};
55557 +} __no_const;
55558
55559 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
55560
55561 diff -urNp linux-2.6.39.4/include/sound/hwdep.h linux-2.6.39.4/include/sound/hwdep.h
55562 --- linux-2.6.39.4/include/sound/hwdep.h 2011-05-19 00:06:34.000000000 -0400
55563 +++ linux-2.6.39.4/include/sound/hwdep.h 2011-08-05 20:34:06.000000000 -0400
55564 @@ -49,7 +49,7 @@ struct snd_hwdep_ops {
55565 struct snd_hwdep_dsp_status *status);
55566 int (*dsp_load)(struct snd_hwdep *hw,
55567 struct snd_hwdep_dsp_image *image);
55568 -};
55569 +} __no_const;
55570
55571 struct snd_hwdep {
55572 struct snd_card *card;
55573 diff -urNp linux-2.6.39.4/include/sound/info.h linux-2.6.39.4/include/sound/info.h
55574 --- linux-2.6.39.4/include/sound/info.h 2011-05-19 00:06:34.000000000 -0400
55575 +++ linux-2.6.39.4/include/sound/info.h 2011-08-05 20:34:06.000000000 -0400
55576 @@ -44,7 +44,7 @@ struct snd_info_entry_text {
55577 struct snd_info_buffer *buffer);
55578 void (*write)(struct snd_info_entry *entry,
55579 struct snd_info_buffer *buffer);
55580 -};
55581 +} __no_const;
55582
55583 struct snd_info_entry_ops {
55584 int (*open)(struct snd_info_entry *entry,
55585 diff -urNp linux-2.6.39.4/include/sound/pcm.h linux-2.6.39.4/include/sound/pcm.h
55586 --- linux-2.6.39.4/include/sound/pcm.h 2011-05-19 00:06:34.000000000 -0400
55587 +++ linux-2.6.39.4/include/sound/pcm.h 2011-08-05 20:34:06.000000000 -0400
55588 @@ -81,6 +81,7 @@ struct snd_pcm_ops {
55589 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
55590 int (*ack)(struct snd_pcm_substream *substream);
55591 };
55592 +typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
55593
55594 /*
55595 *
55596 diff -urNp linux-2.6.39.4/include/sound/sb16_csp.h linux-2.6.39.4/include/sound/sb16_csp.h
55597 --- linux-2.6.39.4/include/sound/sb16_csp.h 2011-05-19 00:06:34.000000000 -0400
55598 +++ linux-2.6.39.4/include/sound/sb16_csp.h 2011-08-05 20:34:06.000000000 -0400
55599 @@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
55600 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
55601 int (*csp_stop) (struct snd_sb_csp * p);
55602 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
55603 -};
55604 +} __no_const;
55605
55606 /*
55607 * CSP private data
55608 diff -urNp linux-2.6.39.4/include/sound/soc.h linux-2.6.39.4/include/sound/soc.h
55609 --- linux-2.6.39.4/include/sound/soc.h 2011-05-19 00:06:34.000000000 -0400
55610 +++ linux-2.6.39.4/include/sound/soc.h 2011-08-05 20:34:06.000000000 -0400
55611 @@ -624,7 +624,7 @@ struct snd_soc_platform_driver {
55612 struct snd_soc_dai *);
55613
55614 /* platform stream ops */
55615 - struct snd_pcm_ops *ops;
55616 + struct snd_pcm_ops * const ops;
55617 };
55618
55619 struct snd_soc_platform {
55620 diff -urNp linux-2.6.39.4/include/sound/ymfpci.h linux-2.6.39.4/include/sound/ymfpci.h
55621 --- linux-2.6.39.4/include/sound/ymfpci.h 2011-05-19 00:06:34.000000000 -0400
55622 +++ linux-2.6.39.4/include/sound/ymfpci.h 2011-08-05 19:44:37.000000000 -0400
55623 @@ -358,7 +358,7 @@ struct snd_ymfpci {
55624 spinlock_t reg_lock;
55625 spinlock_t voice_lock;
55626 wait_queue_head_t interrupt_sleep;
55627 - atomic_t interrupt_sleep_count;
55628 + atomic_unchecked_t interrupt_sleep_count;
55629 struct snd_info_entry *proc_entry;
55630 const struct firmware *dsp_microcode;
55631 const struct firmware *controller_microcode;
55632 diff -urNp linux-2.6.39.4/include/target/target_core_base.h linux-2.6.39.4/include/target/target_core_base.h
55633 --- linux-2.6.39.4/include/target/target_core_base.h 2011-06-03 00:04:14.000000000 -0400
55634 +++ linux-2.6.39.4/include/target/target_core_base.h 2011-08-05 20:34:06.000000000 -0400
55635 @@ -364,7 +364,7 @@ struct t10_reservation_ops {
55636 int (*t10_seq_non_holder)(struct se_cmd *, unsigned char *, u32);
55637 int (*t10_pr_register)(struct se_cmd *);
55638 int (*t10_pr_clear)(struct se_cmd *);
55639 -};
55640 +} __no_const;
55641
55642 struct t10_reservation_template {
55643 /* Reservation effects all target ports */
55644 @@ -432,8 +432,8 @@ struct se_transport_task {
55645 atomic_t t_task_cdbs_left;
55646 atomic_t t_task_cdbs_ex_left;
55647 atomic_t t_task_cdbs_timeout_left;
55648 - atomic_t t_task_cdbs_sent;
55649 - atomic_t t_transport_aborted;
55650 + atomic_unchecked_t t_task_cdbs_sent;
55651 + atomic_unchecked_t t_transport_aborted;
55652 atomic_t t_transport_active;
55653 atomic_t t_transport_complete;
55654 atomic_t t_transport_queue_active;
55655 @@ -774,7 +774,7 @@ struct se_device {
55656 atomic_t active_cmds;
55657 atomic_t simple_cmds;
55658 atomic_t depth_left;
55659 - atomic_t dev_ordered_id;
55660 + atomic_unchecked_t dev_ordered_id;
55661 atomic_t dev_tur_active;
55662 atomic_t execute_tasks;
55663 atomic_t dev_status_thr_count;
55664 diff -urNp linux-2.6.39.4/include/trace/events/irq.h linux-2.6.39.4/include/trace/events/irq.h
55665 --- linux-2.6.39.4/include/trace/events/irq.h 2011-05-19 00:06:34.000000000 -0400
55666 +++ linux-2.6.39.4/include/trace/events/irq.h 2011-08-05 19:44:37.000000000 -0400
55667 @@ -36,7 +36,7 @@ struct softirq_action;
55668 */
55669 TRACE_EVENT(irq_handler_entry,
55670
55671 - TP_PROTO(int irq, struct irqaction *action),
55672 + TP_PROTO(int irq, const struct irqaction *action),
55673
55674 TP_ARGS(irq, action),
55675
55676 @@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
55677 */
55678 TRACE_EVENT(irq_handler_exit,
55679
55680 - TP_PROTO(int irq, struct irqaction *action, int ret),
55681 + TP_PROTO(int irq, const struct irqaction *action, int ret),
55682
55683 TP_ARGS(irq, action, ret),
55684
55685 diff -urNp linux-2.6.39.4/include/video/udlfb.h linux-2.6.39.4/include/video/udlfb.h
55686 --- linux-2.6.39.4/include/video/udlfb.h 2011-05-19 00:06:34.000000000 -0400
55687 +++ linux-2.6.39.4/include/video/udlfb.h 2011-08-05 19:44:37.000000000 -0400
55688 @@ -51,10 +51,10 @@ struct dlfb_data {
55689 int base8;
55690 u32 pseudo_palette[256];
55691 /* blit-only rendering path metrics, exposed through sysfs */
55692 - atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
55693 - atomic_t bytes_identical; /* saved effort with backbuffer comparison */
55694 - atomic_t bytes_sent; /* to usb, after compression including overhead */
55695 - atomic_t cpu_kcycles_used; /* transpired during pixel processing */
55696 + atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
55697 + atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
55698 + atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
55699 + atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
55700 };
55701
55702 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
55703 diff -urNp linux-2.6.39.4/include/video/uvesafb.h linux-2.6.39.4/include/video/uvesafb.h
55704 --- linux-2.6.39.4/include/video/uvesafb.h 2011-05-19 00:06:34.000000000 -0400
55705 +++ linux-2.6.39.4/include/video/uvesafb.h 2011-08-05 19:44:37.000000000 -0400
55706 @@ -177,6 +177,7 @@ struct uvesafb_par {
55707 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
55708 u8 pmi_setpal; /* PMI for palette changes */
55709 u16 *pmi_base; /* protected mode interface location */
55710 + u8 *pmi_code; /* protected mode code location */
55711 void *pmi_start;
55712 void *pmi_pal;
55713 u8 *vbe_state_orig; /*
55714 diff -urNp linux-2.6.39.4/init/do_mounts.c linux-2.6.39.4/init/do_mounts.c
55715 --- linux-2.6.39.4/init/do_mounts.c 2011-05-19 00:06:34.000000000 -0400
55716 +++ linux-2.6.39.4/init/do_mounts.c 2011-08-05 19:44:37.000000000 -0400
55717 @@ -287,7 +287,7 @@ static void __init get_fs_names(char *pa
55718
55719 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
55720 {
55721 - int err = sys_mount(name, "/root", fs, flags, data);
55722 + int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
55723 if (err)
55724 return err;
55725
55726 @@ -383,18 +383,18 @@ void __init change_floppy(char *fmt, ...
55727 va_start(args, fmt);
55728 vsprintf(buf, fmt, args);
55729 va_end(args);
55730 - fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
55731 + fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
55732 if (fd >= 0) {
55733 sys_ioctl(fd, FDEJECT, 0);
55734 sys_close(fd);
55735 }
55736 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
55737 - fd = sys_open("/dev/console", O_RDWR, 0);
55738 + fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
55739 if (fd >= 0) {
55740 sys_ioctl(fd, TCGETS, (long)&termios);
55741 termios.c_lflag &= ~ICANON;
55742 sys_ioctl(fd, TCSETSF, (long)&termios);
55743 - sys_read(fd, &c, 1);
55744 + sys_read(fd, (char __user *)&c, 1);
55745 termios.c_lflag |= ICANON;
55746 sys_ioctl(fd, TCSETSF, (long)&termios);
55747 sys_close(fd);
55748 @@ -488,6 +488,6 @@ void __init prepare_namespace(void)
55749 mount_root();
55750 out:
55751 devtmpfs_mount("dev");
55752 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
55753 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
55754 sys_chroot((const char __user __force *)".");
55755 }
55756 diff -urNp linux-2.6.39.4/init/do_mounts.h linux-2.6.39.4/init/do_mounts.h
55757 --- linux-2.6.39.4/init/do_mounts.h 2011-05-19 00:06:34.000000000 -0400
55758 +++ linux-2.6.39.4/init/do_mounts.h 2011-08-05 19:44:37.000000000 -0400
55759 @@ -15,15 +15,15 @@ extern int root_mountflags;
55760
55761 static inline int create_dev(char *name, dev_t dev)
55762 {
55763 - sys_unlink(name);
55764 - return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
55765 + sys_unlink((__force char __user *)name);
55766 + return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
55767 }
55768
55769 #if BITS_PER_LONG == 32
55770 static inline u32 bstat(char *name)
55771 {
55772 struct stat64 stat;
55773 - if (sys_stat64(name, &stat) != 0)
55774 + if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
55775 return 0;
55776 if (!S_ISBLK(stat.st_mode))
55777 return 0;
55778 diff -urNp linux-2.6.39.4/init/do_mounts_initrd.c linux-2.6.39.4/init/do_mounts_initrd.c
55779 --- linux-2.6.39.4/init/do_mounts_initrd.c 2011-05-19 00:06:34.000000000 -0400
55780 +++ linux-2.6.39.4/init/do_mounts_initrd.c 2011-08-05 19:44:37.000000000 -0400
55781 @@ -44,13 +44,13 @@ static void __init handle_initrd(void)
55782 create_dev("/dev/root.old", Root_RAM0);
55783 /* mount initrd on rootfs' /root */
55784 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
55785 - sys_mkdir("/old", 0700);
55786 - root_fd = sys_open("/", 0, 0);
55787 - old_fd = sys_open("/old", 0, 0);
55788 + sys_mkdir((__force const char __user *)"/old", 0700);
55789 + root_fd = sys_open((__force const char __user *)"/", 0, 0);
55790 + old_fd = sys_open((__force const char __user *)"/old", 0, 0);
55791 /* move initrd over / and chdir/chroot in initrd root */
55792 - sys_chdir("/root");
55793 - sys_mount(".", "/", NULL, MS_MOVE, NULL);
55794 - sys_chroot(".");
55795 + sys_chdir((__force const char __user *)"/root");
55796 + sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
55797 + sys_chroot((__force const char __user *)".");
55798
55799 /*
55800 * In case that a resume from disk is carried out by linuxrc or one of
55801 @@ -67,15 +67,15 @@ static void __init handle_initrd(void)
55802
55803 /* move initrd to rootfs' /old */
55804 sys_fchdir(old_fd);
55805 - sys_mount("/", ".", NULL, MS_MOVE, NULL);
55806 + sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
55807 /* switch root and cwd back to / of rootfs */
55808 sys_fchdir(root_fd);
55809 - sys_chroot(".");
55810 + sys_chroot((__force const char __user *)".");
55811 sys_close(old_fd);
55812 sys_close(root_fd);
55813
55814 if (new_decode_dev(real_root_dev) == Root_RAM0) {
55815 - sys_chdir("/old");
55816 + sys_chdir((__force const char __user *)"/old");
55817 return;
55818 }
55819
55820 @@ -83,17 +83,17 @@ static void __init handle_initrd(void)
55821 mount_root();
55822
55823 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
55824 - error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
55825 + error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
55826 if (!error)
55827 printk("okay\n");
55828 else {
55829 - int fd = sys_open("/dev/root.old", O_RDWR, 0);
55830 + int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
55831 if (error == -ENOENT)
55832 printk("/initrd does not exist. Ignored.\n");
55833 else
55834 printk("failed\n");
55835 printk(KERN_NOTICE "Unmounting old root\n");
55836 - sys_umount("/old", MNT_DETACH);
55837 + sys_umount((__force char __user *)"/old", MNT_DETACH);
55838 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
55839 if (fd < 0) {
55840 error = fd;
55841 @@ -116,11 +116,11 @@ int __init initrd_load(void)
55842 * mounted in the normal path.
55843 */
55844 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
55845 - sys_unlink("/initrd.image");
55846 + sys_unlink((__force const char __user *)"/initrd.image");
55847 handle_initrd();
55848 return 1;
55849 }
55850 }
55851 - sys_unlink("/initrd.image");
55852 + sys_unlink((__force const char __user *)"/initrd.image");
55853 return 0;
55854 }
55855 diff -urNp linux-2.6.39.4/init/do_mounts_md.c linux-2.6.39.4/init/do_mounts_md.c
55856 --- linux-2.6.39.4/init/do_mounts_md.c 2011-05-19 00:06:34.000000000 -0400
55857 +++ linux-2.6.39.4/init/do_mounts_md.c 2011-08-05 19:44:37.000000000 -0400
55858 @@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
55859 partitioned ? "_d" : "", minor,
55860 md_setup_args[ent].device_names);
55861
55862 - fd = sys_open(name, 0, 0);
55863 + fd = sys_open((__force char __user *)name, 0, 0);
55864 if (fd < 0) {
55865 printk(KERN_ERR "md: open failed - cannot start "
55866 "array %s\n", name);
55867 @@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
55868 * array without it
55869 */
55870 sys_close(fd);
55871 - fd = sys_open(name, 0, 0);
55872 + fd = sys_open((__force char __user *)name, 0, 0);
55873 sys_ioctl(fd, BLKRRPART, 0);
55874 }
55875 sys_close(fd);
55876 diff -urNp linux-2.6.39.4/init/initramfs.c linux-2.6.39.4/init/initramfs.c
55877 --- linux-2.6.39.4/init/initramfs.c 2011-05-19 00:06:34.000000000 -0400
55878 +++ linux-2.6.39.4/init/initramfs.c 2011-08-05 19:44:37.000000000 -0400
55879 @@ -74,7 +74,7 @@ static void __init free_hash(void)
55880 }
55881 }
55882
55883 -static long __init do_utime(char __user *filename, time_t mtime)
55884 +static long __init do_utime(__force char __user *filename, time_t mtime)
55885 {
55886 struct timespec t[2];
55887
55888 @@ -109,7 +109,7 @@ static void __init dir_utime(void)
55889 struct dir_entry *de, *tmp;
55890 list_for_each_entry_safe(de, tmp, &dir_list, list) {
55891 list_del(&de->list);
55892 - do_utime(de->name, de->mtime);
55893 + do_utime((__force char __user *)de->name, de->mtime);
55894 kfree(de->name);
55895 kfree(de);
55896 }
55897 @@ -271,7 +271,7 @@ static int __init maybe_link(void)
55898 if (nlink >= 2) {
55899 char *old = find_link(major, minor, ino, mode, collected);
55900 if (old)
55901 - return (sys_link(old, collected) < 0) ? -1 : 1;
55902 + return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
55903 }
55904 return 0;
55905 }
55906 @@ -280,11 +280,11 @@ static void __init clean_path(char *path
55907 {
55908 struct stat st;
55909
55910 - if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
55911 + if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
55912 if (S_ISDIR(st.st_mode))
55913 - sys_rmdir(path);
55914 + sys_rmdir((__force char __user *)path);
55915 else
55916 - sys_unlink(path);
55917 + sys_unlink((__force char __user *)path);
55918 }
55919 }
55920
55921 @@ -305,7 +305,7 @@ static int __init do_name(void)
55922 int openflags = O_WRONLY|O_CREAT;
55923 if (ml != 1)
55924 openflags |= O_TRUNC;
55925 - wfd = sys_open(collected, openflags, mode);
55926 + wfd = sys_open((__force char __user *)collected, openflags, mode);
55927
55928 if (wfd >= 0) {
55929 sys_fchown(wfd, uid, gid);
55930 @@ -317,17 +317,17 @@ static int __init do_name(void)
55931 }
55932 }
55933 } else if (S_ISDIR(mode)) {
55934 - sys_mkdir(collected, mode);
55935 - sys_chown(collected, uid, gid);
55936 - sys_chmod(collected, mode);
55937 + sys_mkdir((__force char __user *)collected, mode);
55938 + sys_chown((__force char __user *)collected, uid, gid);
55939 + sys_chmod((__force char __user *)collected, mode);
55940 dir_add(collected, mtime);
55941 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
55942 S_ISFIFO(mode) || S_ISSOCK(mode)) {
55943 if (maybe_link() == 0) {
55944 - sys_mknod(collected, mode, rdev);
55945 - sys_chown(collected, uid, gid);
55946 - sys_chmod(collected, mode);
55947 - do_utime(collected, mtime);
55948 + sys_mknod((__force char __user *)collected, mode, rdev);
55949 + sys_chown((__force char __user *)collected, uid, gid);
55950 + sys_chmod((__force char __user *)collected, mode);
55951 + do_utime((__force char __user *)collected, mtime);
55952 }
55953 }
55954 return 0;
55955 @@ -336,15 +336,15 @@ static int __init do_name(void)
55956 static int __init do_copy(void)
55957 {
55958 if (count >= body_len) {
55959 - sys_write(wfd, victim, body_len);
55960 + sys_write(wfd, (__force char __user *)victim, body_len);
55961 sys_close(wfd);
55962 - do_utime(vcollected, mtime);
55963 + do_utime((__force char __user *)vcollected, mtime);
55964 kfree(vcollected);
55965 eat(body_len);
55966 state = SkipIt;
55967 return 0;
55968 } else {
55969 - sys_write(wfd, victim, count);
55970 + sys_write(wfd, (__force char __user *)victim, count);
55971 body_len -= count;
55972 eat(count);
55973 return 1;
55974 @@ -355,9 +355,9 @@ static int __init do_symlink(void)
55975 {
55976 collected[N_ALIGN(name_len) + body_len] = '\0';
55977 clean_path(collected, 0);
55978 - sys_symlink(collected + N_ALIGN(name_len), collected);
55979 - sys_lchown(collected, uid, gid);
55980 - do_utime(collected, mtime);
55981 + sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
55982 + sys_lchown((__force char __user *)collected, uid, gid);
55983 + do_utime((__force char __user *)collected, mtime);
55984 state = SkipIt;
55985 next_state = Reset;
55986 return 0;
55987 diff -urNp linux-2.6.39.4/init/Kconfig linux-2.6.39.4/init/Kconfig
55988 --- linux-2.6.39.4/init/Kconfig 2011-05-19 00:06:34.000000000 -0400
55989 +++ linux-2.6.39.4/init/Kconfig 2011-08-05 19:44:37.000000000 -0400
55990 @@ -1202,7 +1202,7 @@ config SLUB_DEBUG
55991
55992 config COMPAT_BRK
55993 bool "Disable heap randomization"
55994 - default y
55995 + default n
55996 help
55997 Randomizing heap placement makes heap exploits harder, but it
55998 also breaks ancient binaries (including anything libc5 based).
55999 diff -urNp linux-2.6.39.4/init/main.c linux-2.6.39.4/init/main.c
56000 --- linux-2.6.39.4/init/main.c 2011-06-03 00:04:14.000000000 -0400
56001 +++ linux-2.6.39.4/init/main.c 2011-08-05 20:34:06.000000000 -0400
56002 @@ -96,6 +96,8 @@ static inline void mark_rodata_ro(void)
56003 extern void tc_init(void);
56004 #endif
56005
56006 +extern void grsecurity_init(void);
56007 +
56008 /*
56009 * Debug helper: via this flag we know that we are in 'early bootup code'
56010 * where only the boot processor is running with IRQ disabled. This means
56011 @@ -149,6 +151,49 @@ static int __init set_reset_devices(char
56012
56013 __setup("reset_devices", set_reset_devices);
56014
56015 +#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
56016 +extern char pax_enter_kernel_user[];
56017 +extern char pax_exit_kernel_user[];
56018 +extern pgdval_t clone_pgd_mask;
56019 +#endif
56020 +
56021 +#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
56022 +static int __init setup_pax_nouderef(char *str)
56023 +{
56024 +#ifdef CONFIG_X86_32
56025 + unsigned int cpu;
56026 + struct desc_struct *gdt;
56027 +
56028 + for (cpu = 0; cpu < NR_CPUS; cpu++) {
56029 + gdt = get_cpu_gdt_table(cpu);
56030 + gdt[GDT_ENTRY_KERNEL_DS].type = 3;
56031 + gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
56032 + gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
56033 + gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
56034 + }
56035 + asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
56036 +#else
56037 + memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
56038 + memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
56039 + clone_pgd_mask = ~(pgdval_t)0UL;
56040 +#endif
56041 +
56042 + return 0;
56043 +}
56044 +early_param("pax_nouderef", setup_pax_nouderef);
56045 +#endif
56046 +
56047 +#ifdef CONFIG_PAX_SOFTMODE
56048 +int pax_softmode;
56049 +
56050 +static int __init setup_pax_softmode(char *str)
56051 +{
56052 + get_option(&str, &pax_softmode);
56053 + return 1;
56054 +}
56055 +__setup("pax_softmode=", setup_pax_softmode);
56056 +#endif
56057 +
56058 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
56059 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
56060 static const char *panic_later, *panic_param;
56061 @@ -663,6 +708,7 @@ int __init_or_module do_one_initcall(ini
56062 {
56063 int count = preempt_count();
56064 int ret;
56065 + const char *msg1 = "", *msg2 = "";
56066
56067 if (initcall_debug)
56068 ret = do_one_initcall_debug(fn);
56069 @@ -675,15 +721,15 @@ int __init_or_module do_one_initcall(ini
56070 sprintf(msgbuf, "error code %d ", ret);
56071
56072 if (preempt_count() != count) {
56073 - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
56074 + msg1 = " preemption imbalance";
56075 preempt_count() = count;
56076 }
56077 if (irqs_disabled()) {
56078 - strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
56079 + msg2 = " disabled interrupts";
56080 local_irq_enable();
56081 }
56082 - if (msgbuf[0]) {
56083 - printk("initcall %pF returned with %s\n", fn, msgbuf);
56084 + if (msgbuf[0] || *msg1 || *msg2) {
56085 + printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
56086 }
56087
56088 return ret;
56089 @@ -801,7 +847,7 @@ static int __init kernel_init(void * unu
56090 do_basic_setup();
56091
56092 /* Open the /dev/console on the rootfs, this should never fail */
56093 - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
56094 + if (sys_open((__force const char __user *) "/dev/console", O_RDWR, 0) < 0)
56095 printk(KERN_WARNING "Warning: unable to open an initial console.\n");
56096
56097 (void) sys_dup(0);
56098 @@ -814,11 +860,13 @@ static int __init kernel_init(void * unu
56099 if (!ramdisk_execute_command)
56100 ramdisk_execute_command = "/init";
56101
56102 - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
56103 + if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
56104 ramdisk_execute_command = NULL;
56105 prepare_namespace();
56106 }
56107
56108 + grsecurity_init();
56109 +
56110 /*
56111 * Ok, we have completed the initial bootup, and
56112 * we're essentially up and running. Get rid of the
56113 diff -urNp linux-2.6.39.4/ipc/mqueue.c linux-2.6.39.4/ipc/mqueue.c
56114 --- linux-2.6.39.4/ipc/mqueue.c 2011-05-19 00:06:34.000000000 -0400
56115 +++ linux-2.6.39.4/ipc/mqueue.c 2011-08-05 19:44:37.000000000 -0400
56116 @@ -154,6 +154,7 @@ static struct inode *mqueue_get_inode(st
56117 mq_bytes = (mq_msg_tblsz +
56118 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
56119
56120 + gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
56121 spin_lock(&mq_lock);
56122 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
56123 u->mq_bytes + mq_bytes >
56124 diff -urNp linux-2.6.39.4/ipc/msg.c linux-2.6.39.4/ipc/msg.c
56125 --- linux-2.6.39.4/ipc/msg.c 2011-05-19 00:06:34.000000000 -0400
56126 +++ linux-2.6.39.4/ipc/msg.c 2011-08-05 20:34:06.000000000 -0400
56127 @@ -309,18 +309,19 @@ static inline int msg_security(struct ke
56128 return security_msg_queue_associate(msq, msgflg);
56129 }
56130
56131 +static struct ipc_ops msg_ops = {
56132 + .getnew = newque,
56133 + .associate = msg_security,
56134 + .more_checks = NULL
56135 +};
56136 +
56137 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
56138 {
56139 struct ipc_namespace *ns;
56140 - struct ipc_ops msg_ops;
56141 struct ipc_params msg_params;
56142
56143 ns = current->nsproxy->ipc_ns;
56144
56145 - msg_ops.getnew = newque;
56146 - msg_ops.associate = msg_security;
56147 - msg_ops.more_checks = NULL;
56148 -
56149 msg_params.key = key;
56150 msg_params.flg = msgflg;
56151
56152 diff -urNp linux-2.6.39.4/ipc/sem.c linux-2.6.39.4/ipc/sem.c
56153 --- linux-2.6.39.4/ipc/sem.c 2011-05-19 00:06:34.000000000 -0400
56154 +++ linux-2.6.39.4/ipc/sem.c 2011-08-05 20:34:06.000000000 -0400
56155 @@ -318,10 +318,15 @@ static inline int sem_more_checks(struct
56156 return 0;
56157 }
56158
56159 +static struct ipc_ops sem_ops = {
56160 + .getnew = newary,
56161 + .associate = sem_security,
56162 + .more_checks = sem_more_checks
56163 +};
56164 +
56165 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
56166 {
56167 struct ipc_namespace *ns;
56168 - struct ipc_ops sem_ops;
56169 struct ipc_params sem_params;
56170
56171 ns = current->nsproxy->ipc_ns;
56172 @@ -329,10 +334,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
56173 if (nsems < 0 || nsems > ns->sc_semmsl)
56174 return -EINVAL;
56175
56176 - sem_ops.getnew = newary;
56177 - sem_ops.associate = sem_security;
56178 - sem_ops.more_checks = sem_more_checks;
56179 -
56180 sem_params.key = key;
56181 sem_params.flg = semflg;
56182 sem_params.u.nsems = nsems;
56183 @@ -854,6 +855,8 @@ static int semctl_main(struct ipc_namesp
56184 int nsems;
56185 struct list_head tasks;
56186
56187 + pax_track_stack();
56188 +
56189 sma = sem_lock_check(ns, semid);
56190 if (IS_ERR(sma))
56191 return PTR_ERR(sma);
56192 @@ -1301,6 +1304,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
56193 struct ipc_namespace *ns;
56194 struct list_head tasks;
56195
56196 + pax_track_stack();
56197 +
56198 ns = current->nsproxy->ipc_ns;
56199
56200 if (nsops < 1 || semid < 0)
56201 diff -urNp linux-2.6.39.4/ipc/shm.c linux-2.6.39.4/ipc/shm.c
56202 --- linux-2.6.39.4/ipc/shm.c 2011-05-19 00:06:34.000000000 -0400
56203 +++ linux-2.6.39.4/ipc/shm.c 2011-08-05 20:34:06.000000000 -0400
56204 @@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name
56205 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
56206 #endif
56207
56208 +#ifdef CONFIG_GRKERNSEC
56209 +extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56210 + const time_t shm_createtime, const uid_t cuid,
56211 + const int shmid);
56212 +extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
56213 + const time_t shm_createtime);
56214 +#endif
56215 +
56216 void shm_init_ns(struct ipc_namespace *ns)
56217 {
56218 ns->shm_ctlmax = SHMMAX;
56219 @@ -401,6 +409,14 @@ static int newseg(struct ipc_namespace *
56220 shp->shm_lprid = 0;
56221 shp->shm_atim = shp->shm_dtim = 0;
56222 shp->shm_ctim = get_seconds();
56223 +#ifdef CONFIG_GRKERNSEC
56224 + {
56225 + struct timespec timeval;
56226 + do_posix_clock_monotonic_gettime(&timeval);
56227 +
56228 + shp->shm_createtime = timeval.tv_sec;
56229 + }
56230 +#endif
56231 shp->shm_segsz = size;
56232 shp->shm_nattch = 0;
56233 shp->shm_file = file;
56234 @@ -451,18 +467,19 @@ static inline int shm_more_checks(struct
56235 return 0;
56236 }
56237
56238 +static struct ipc_ops shm_ops = {
56239 + .getnew = newseg,
56240 + .associate = shm_security,
56241 + .more_checks = shm_more_checks
56242 +};
56243 +
56244 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
56245 {
56246 struct ipc_namespace *ns;
56247 - struct ipc_ops shm_ops;
56248 struct ipc_params shm_params;
56249
56250 ns = current->nsproxy->ipc_ns;
56251
56252 - shm_ops.getnew = newseg;
56253 - shm_ops.associate = shm_security;
56254 - shm_ops.more_checks = shm_more_checks;
56255 -
56256 shm_params.key = key;
56257 shm_params.flg = shmflg;
56258 shm_params.u.size = size;
56259 @@ -762,8 +779,6 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int,
56260 case SHM_LOCK:
56261 case SHM_UNLOCK:
56262 {
56263 - struct file *uninitialized_var(shm_file);
56264 -
56265 lru_add_drain_all(); /* drain pagevecs to lru lists */
56266
56267 shp = shm_lock_check(ns, shmid);
56268 @@ -896,9 +911,21 @@ long do_shmat(int shmid, char __user *sh
56269 if (err)
56270 goto out_unlock;
56271
56272 +#ifdef CONFIG_GRKERNSEC
56273 + if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
56274 + shp->shm_perm.cuid, shmid) ||
56275 + !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
56276 + err = -EACCES;
56277 + goto out_unlock;
56278 + }
56279 +#endif
56280 +
56281 path = shp->shm_file->f_path;
56282 path_get(&path);
56283 shp->shm_nattch++;
56284 +#ifdef CONFIG_GRKERNSEC
56285 + shp->shm_lapid = current->pid;
56286 +#endif
56287 size = i_size_read(path.dentry->d_inode);
56288 shm_unlock(shp);
56289
56290 diff -urNp linux-2.6.39.4/kernel/acct.c linux-2.6.39.4/kernel/acct.c
56291 --- linux-2.6.39.4/kernel/acct.c 2011-05-19 00:06:34.000000000 -0400
56292 +++ linux-2.6.39.4/kernel/acct.c 2011-08-05 19:44:37.000000000 -0400
56293 @@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a
56294 */
56295 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
56296 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
56297 - file->f_op->write(file, (char *)&ac,
56298 + file->f_op->write(file, (__force char __user *)&ac,
56299 sizeof(acct_t), &file->f_pos);
56300 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
56301 set_fs(fs);
56302 diff -urNp linux-2.6.39.4/kernel/audit.c linux-2.6.39.4/kernel/audit.c
56303 --- linux-2.6.39.4/kernel/audit.c 2011-05-19 00:06:34.000000000 -0400
56304 +++ linux-2.6.39.4/kernel/audit.c 2011-08-05 19:44:37.000000000 -0400
56305 @@ -112,7 +112,7 @@ u32 audit_sig_sid = 0;
56306 3) suppressed due to audit_rate_limit
56307 4) suppressed due to audit_backlog_limit
56308 */
56309 -static atomic_t audit_lost = ATOMIC_INIT(0);
56310 +static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
56311
56312 /* The netlink socket. */
56313 static struct sock *audit_sock;
56314 @@ -234,7 +234,7 @@ void audit_log_lost(const char *message)
56315 unsigned long now;
56316 int print;
56317
56318 - atomic_inc(&audit_lost);
56319 + atomic_inc_unchecked(&audit_lost);
56320
56321 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
56322
56323 @@ -253,7 +253,7 @@ void audit_log_lost(const char *message)
56324 printk(KERN_WARNING
56325 "audit: audit_lost=%d audit_rate_limit=%d "
56326 "audit_backlog_limit=%d\n",
56327 - atomic_read(&audit_lost),
56328 + atomic_read_unchecked(&audit_lost),
56329 audit_rate_limit,
56330 audit_backlog_limit);
56331 audit_panic(message);
56332 @@ -686,7 +686,7 @@ static int audit_receive_msg(struct sk_b
56333 status_set.pid = audit_pid;
56334 status_set.rate_limit = audit_rate_limit;
56335 status_set.backlog_limit = audit_backlog_limit;
56336 - status_set.lost = atomic_read(&audit_lost);
56337 + status_set.lost = atomic_read_unchecked(&audit_lost);
56338 status_set.backlog = skb_queue_len(&audit_skb_queue);
56339 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
56340 &status_set, sizeof(status_set));
56341 diff -urNp linux-2.6.39.4/kernel/auditsc.c linux-2.6.39.4/kernel/auditsc.c
56342 --- linux-2.6.39.4/kernel/auditsc.c 2011-05-19 00:06:34.000000000 -0400
56343 +++ linux-2.6.39.4/kernel/auditsc.c 2011-08-05 19:44:37.000000000 -0400
56344 @@ -2111,7 +2111,7 @@ int auditsc_get_stamp(struct audit_conte
56345 }
56346
56347 /* global counter which is incremented every time something logs in */
56348 -static atomic_t session_id = ATOMIC_INIT(0);
56349 +static atomic_unchecked_t session_id = ATOMIC_INIT(0);
56350
56351 /**
56352 * audit_set_loginuid - set a task's audit_context loginuid
56353 @@ -2124,7 +2124,7 @@ static atomic_t session_id = ATOMIC_INIT
56354 */
56355 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
56356 {
56357 - unsigned int sessionid = atomic_inc_return(&session_id);
56358 + unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
56359 struct audit_context *context = task->audit_context;
56360
56361 if (context && context->in_syscall) {
56362 diff -urNp linux-2.6.39.4/kernel/capability.c linux-2.6.39.4/kernel/capability.c
56363 --- linux-2.6.39.4/kernel/capability.c 2011-05-19 00:06:34.000000000 -0400
56364 +++ linux-2.6.39.4/kernel/capability.c 2011-08-05 19:44:37.000000000 -0400
56365 @@ -206,6 +206,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_
56366 * before modification is attempted and the application
56367 * fails.
56368 */
56369 + if (tocopy > ARRAY_SIZE(kdata))
56370 + return -EFAULT;
56371 +
56372 if (copy_to_user(dataptr, kdata, tocopy
56373 * sizeof(struct __user_cap_data_struct))) {
56374 return -EFAULT;
56375 @@ -378,7 +381,7 @@ bool ns_capable(struct user_namespace *n
56376 BUG();
56377 }
56378
56379 - if (security_capable(ns, current_cred(), cap) == 0) {
56380 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable(cap)) {
56381 current->flags |= PF_SUPERPRIV;
56382 return true;
56383 }
56384 @@ -386,6 +389,27 @@ bool ns_capable(struct user_namespace *n
56385 }
56386 EXPORT_SYMBOL(ns_capable);
56387
56388 +bool ns_capable_nolog(struct user_namespace *ns, int cap)
56389 +{
56390 + if (unlikely(!cap_valid(cap))) {
56391 + printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
56392 + BUG();
56393 + }
56394 +
56395 + if (security_capable(ns, current_cred(), cap) == 0 && gr_is_capable_nolog(cap)) {
56396 + current->flags |= PF_SUPERPRIV;
56397 + return true;
56398 + }
56399 + return false;
56400 +}
56401 +EXPORT_SYMBOL(ns_capable_nolog);
56402 +
56403 +bool capable_nolog(int cap)
56404 +{
56405 + return ns_capable_nolog(&init_user_ns, cap);
56406 +}
56407 +EXPORT_SYMBOL(capable_nolog);
56408 +
56409 /**
56410 * task_ns_capable - Determine whether current task has a superior
56411 * capability targeted at a specific task's user namespace.
56412 @@ -400,6 +424,12 @@ bool task_ns_capable(struct task_struct
56413 }
56414 EXPORT_SYMBOL(task_ns_capable);
56415
56416 +bool task_ns_capable_nolog(struct task_struct *t, int cap)
56417 +{
56418 + return ns_capable_nolog(task_cred_xxx(t, user)->user_ns, cap);
56419 +}
56420 +EXPORT_SYMBOL(task_ns_capable_nolog);
56421 +
56422 /**
56423 * nsown_capable - Check superior capability to one's own user_ns
56424 * @cap: The capability in question
56425 diff -urNp linux-2.6.39.4/kernel/cgroup.c linux-2.6.39.4/kernel/cgroup.c
56426 --- linux-2.6.39.4/kernel/cgroup.c 2011-05-19 00:06:34.000000000 -0400
56427 +++ linux-2.6.39.4/kernel/cgroup.c 2011-08-05 19:44:37.000000000 -0400
56428 @@ -598,6 +598,8 @@ static struct css_set *find_css_set(
56429 struct hlist_head *hhead;
56430 struct cg_cgroup_link *link;
56431
56432 + pax_track_stack();
56433 +
56434 /* First see if we already have a cgroup group that matches
56435 * the desired set */
56436 read_lock(&css_set_lock);
56437 diff -urNp linux-2.6.39.4/kernel/compat.c linux-2.6.39.4/kernel/compat.c
56438 --- linux-2.6.39.4/kernel/compat.c 2011-05-19 00:06:34.000000000 -0400
56439 +++ linux-2.6.39.4/kernel/compat.c 2011-08-05 19:44:37.000000000 -0400
56440 @@ -13,6 +13,7 @@
56441
56442 #include <linux/linkage.h>
56443 #include <linux/compat.h>
56444 +#include <linux/module.h>
56445 #include <linux/errno.h>
56446 #include <linux/time.h>
56447 #include <linux/signal.h>
56448 diff -urNp linux-2.6.39.4/kernel/configs.c linux-2.6.39.4/kernel/configs.c
56449 --- linux-2.6.39.4/kernel/configs.c 2011-05-19 00:06:34.000000000 -0400
56450 +++ linux-2.6.39.4/kernel/configs.c 2011-08-05 19:44:37.000000000 -0400
56451 @@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
56452 struct proc_dir_entry *entry;
56453
56454 /* create the current config file */
56455 +#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
56456 +#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
56457 + entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
56458 + &ikconfig_file_ops);
56459 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56460 + entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
56461 + &ikconfig_file_ops);
56462 +#endif
56463 +#else
56464 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
56465 &ikconfig_file_ops);
56466 +#endif
56467 +
56468 if (!entry)
56469 return -ENOMEM;
56470
56471 diff -urNp linux-2.6.39.4/kernel/cred.c linux-2.6.39.4/kernel/cred.c
56472 --- linux-2.6.39.4/kernel/cred.c 2011-05-19 00:06:34.000000000 -0400
56473 +++ linux-2.6.39.4/kernel/cred.c 2011-08-05 19:44:37.000000000 -0400
56474 @@ -158,6 +158,8 @@ static void put_cred_rcu(struct rcu_head
56475 */
56476 void __put_cred(struct cred *cred)
56477 {
56478 + pax_track_stack();
56479 +
56480 kdebug("__put_cred(%p{%d,%d})", cred,
56481 atomic_read(&cred->usage),
56482 read_cred_subscribers(cred));
56483 @@ -182,6 +184,8 @@ void exit_creds(struct task_struct *tsk)
56484 {
56485 struct cred *cred;
56486
56487 + pax_track_stack();
56488 +
56489 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
56490 atomic_read(&tsk->cred->usage),
56491 read_cred_subscribers(tsk->cred));
56492 @@ -220,6 +224,8 @@ const struct cred *get_task_cred(struct
56493 {
56494 const struct cred *cred;
56495
56496 + pax_track_stack();
56497 +
56498 rcu_read_lock();
56499
56500 do {
56501 @@ -239,6 +245,8 @@ struct cred *cred_alloc_blank(void)
56502 {
56503 struct cred *new;
56504
56505 + pax_track_stack();
56506 +
56507 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
56508 if (!new)
56509 return NULL;
56510 @@ -287,6 +295,8 @@ struct cred *prepare_creds(void)
56511 const struct cred *old;
56512 struct cred *new;
56513
56514 + pax_track_stack();
56515 +
56516 validate_process_creds();
56517
56518 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
56519 @@ -333,6 +343,8 @@ struct cred *prepare_exec_creds(void)
56520 struct thread_group_cred *tgcred = NULL;
56521 struct cred *new;
56522
56523 + pax_track_stack();
56524 +
56525 #ifdef CONFIG_KEYS
56526 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
56527 if (!tgcred)
56528 @@ -385,6 +397,8 @@ int copy_creds(struct task_struct *p, un
56529 struct cred *new;
56530 int ret;
56531
56532 + pax_track_stack();
56533 +
56534 if (
56535 #ifdef CONFIG_KEYS
56536 !p->cred->thread_keyring &&
56537 @@ -475,6 +489,8 @@ int commit_creds(struct cred *new)
56538 struct task_struct *task = current;
56539 const struct cred *old = task->real_cred;
56540
56541 + pax_track_stack();
56542 +
56543 kdebug("commit_creds(%p{%d,%d})", new,
56544 atomic_read(&new->usage),
56545 read_cred_subscribers(new));
56546 @@ -489,6 +505,8 @@ int commit_creds(struct cred *new)
56547
56548 get_cred(new); /* we will require a ref for the subj creds too */
56549
56550 + gr_set_role_label(task, new->uid, new->gid);
56551 +
56552 /* dumpability changes */
56553 if (old->euid != new->euid ||
56554 old->egid != new->egid ||
56555 @@ -551,6 +569,8 @@ EXPORT_SYMBOL(commit_creds);
56556 */
56557 void abort_creds(struct cred *new)
56558 {
56559 + pax_track_stack();
56560 +
56561 kdebug("abort_creds(%p{%d,%d})", new,
56562 atomic_read(&new->usage),
56563 read_cred_subscribers(new));
56564 @@ -574,6 +594,8 @@ const struct cred *override_creds(const
56565 {
56566 const struct cred *old = current->cred;
56567
56568 + pax_track_stack();
56569 +
56570 kdebug("override_creds(%p{%d,%d})", new,
56571 atomic_read(&new->usage),
56572 read_cred_subscribers(new));
56573 @@ -603,6 +625,8 @@ void revert_creds(const struct cred *old
56574 {
56575 const struct cred *override = current->cred;
56576
56577 + pax_track_stack();
56578 +
56579 kdebug("revert_creds(%p{%d,%d})", old,
56580 atomic_read(&old->usage),
56581 read_cred_subscribers(old));
56582 @@ -649,6 +673,8 @@ struct cred *prepare_kernel_cred(struct
56583 const struct cred *old;
56584 struct cred *new;
56585
56586 + pax_track_stack();
56587 +
56588 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
56589 if (!new)
56590 return NULL;
56591 @@ -703,6 +729,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
56592 */
56593 int set_security_override(struct cred *new, u32 secid)
56594 {
56595 + pax_track_stack();
56596 +
56597 return security_kernel_act_as(new, secid);
56598 }
56599 EXPORT_SYMBOL(set_security_override);
56600 @@ -722,6 +750,8 @@ int set_security_override_from_ctx(struc
56601 u32 secid;
56602 int ret;
56603
56604 + pax_track_stack();
56605 +
56606 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
56607 if (ret < 0)
56608 return ret;
56609 diff -urNp linux-2.6.39.4/kernel/debug/debug_core.c linux-2.6.39.4/kernel/debug/debug_core.c
56610 --- linux-2.6.39.4/kernel/debug/debug_core.c 2011-05-19 00:06:34.000000000 -0400
56611 +++ linux-2.6.39.4/kernel/debug/debug_core.c 2011-08-05 20:34:06.000000000 -0400
56612 @@ -119,7 +119,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_loc
56613 */
56614 static atomic_t masters_in_kgdb;
56615 static atomic_t slaves_in_kgdb;
56616 -static atomic_t kgdb_break_tasklet_var;
56617 +static atomic_unchecked_t kgdb_break_tasklet_var;
56618 atomic_t kgdb_setting_breakpoint;
56619
56620 struct task_struct *kgdb_usethread;
56621 @@ -129,7 +129,7 @@ int kgdb_single_step;
56622 static pid_t kgdb_sstep_pid;
56623
56624 /* to keep track of the CPU which is doing the single stepping*/
56625 -atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
56626 +atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
56627
56628 /*
56629 * If you are debugging a problem where roundup (the collection of
56630 @@ -542,7 +542,7 @@ return_normal:
56631 * kernel will only try for the value of sstep_tries before
56632 * giving up and continuing on.
56633 */
56634 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
56635 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
56636 (kgdb_info[cpu].task &&
56637 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
56638 atomic_set(&kgdb_active, -1);
56639 @@ -636,8 +636,8 @@ cpu_master_loop:
56640 }
56641
56642 kgdb_restore:
56643 - if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
56644 - int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
56645 + if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
56646 + int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
56647 if (kgdb_info[sstep_cpu].task)
56648 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
56649 else
56650 @@ -834,18 +834,18 @@ static void kgdb_unregister_callbacks(vo
56651 static void kgdb_tasklet_bpt(unsigned long ing)
56652 {
56653 kgdb_breakpoint();
56654 - atomic_set(&kgdb_break_tasklet_var, 0);
56655 + atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
56656 }
56657
56658 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
56659
56660 void kgdb_schedule_breakpoint(void)
56661 {
56662 - if (atomic_read(&kgdb_break_tasklet_var) ||
56663 + if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
56664 atomic_read(&kgdb_active) != -1 ||
56665 atomic_read(&kgdb_setting_breakpoint))
56666 return;
56667 - atomic_inc(&kgdb_break_tasklet_var);
56668 + atomic_inc_unchecked(&kgdb_break_tasklet_var);
56669 tasklet_schedule(&kgdb_tasklet_breakpoint);
56670 }
56671 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
56672 diff -urNp linux-2.6.39.4/kernel/debug/kdb/kdb_main.c linux-2.6.39.4/kernel/debug/kdb/kdb_main.c
56673 --- linux-2.6.39.4/kernel/debug/kdb/kdb_main.c 2011-05-19 00:06:34.000000000 -0400
56674 +++ linux-2.6.39.4/kernel/debug/kdb/kdb_main.c 2011-08-05 19:44:37.000000000 -0400
56675 @@ -1980,7 +1980,7 @@ static int kdb_lsmod(int argc, const cha
56676 list_for_each_entry(mod, kdb_modules, list) {
56677
56678 kdb_printf("%-20s%8u 0x%p ", mod->name,
56679 - mod->core_size, (void *)mod);
56680 + mod->core_size_rx + mod->core_size_rw, (void *)mod);
56681 #ifdef CONFIG_MODULE_UNLOAD
56682 kdb_printf("%4d ", module_refcount(mod));
56683 #endif
56684 @@ -1990,7 +1990,7 @@ static int kdb_lsmod(int argc, const cha
56685 kdb_printf(" (Loading)");
56686 else
56687 kdb_printf(" (Live)");
56688 - kdb_printf(" 0x%p", mod->module_core);
56689 + kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
56690
56691 #ifdef CONFIG_MODULE_UNLOAD
56692 {
56693 diff -urNp linux-2.6.39.4/kernel/exit.c linux-2.6.39.4/kernel/exit.c
56694 --- linux-2.6.39.4/kernel/exit.c 2011-05-19 00:06:34.000000000 -0400
56695 +++ linux-2.6.39.4/kernel/exit.c 2011-08-17 19:20:17.000000000 -0400
56696 @@ -57,6 +57,10 @@
56697 #include <asm/pgtable.h>
56698 #include <asm/mmu_context.h>
56699
56700 +#ifdef CONFIG_GRKERNSEC
56701 +extern rwlock_t grsec_exec_file_lock;
56702 +#endif
56703 +
56704 static void exit_mm(struct task_struct * tsk);
56705
56706 static void __unhash_process(struct task_struct *p, bool group_dead)
56707 @@ -169,6 +173,10 @@ void release_task(struct task_struct * p
56708 struct task_struct *leader;
56709 int zap_leader;
56710 repeat:
56711 +#ifdef CONFIG_NET
56712 + gr_del_task_from_ip_table(p);
56713 +#endif
56714 +
56715 tracehook_prepare_release_task(p);
56716 /* don't need to get the RCU readlock here - the process is dead and
56717 * can't be modifying its own credentials. But shut RCU-lockdep up */
56718 @@ -338,11 +346,22 @@ static void reparent_to_kthreadd(void)
56719 {
56720 write_lock_irq(&tasklist_lock);
56721
56722 +#ifdef CONFIG_GRKERNSEC
56723 + write_lock(&grsec_exec_file_lock);
56724 + if (current->exec_file) {
56725 + fput(current->exec_file);
56726 + current->exec_file = NULL;
56727 + }
56728 + write_unlock(&grsec_exec_file_lock);
56729 +#endif
56730 +
56731 ptrace_unlink(current);
56732 /* Reparent to init */
56733 current->real_parent = current->parent = kthreadd_task;
56734 list_move_tail(&current->sibling, &current->real_parent->children);
56735
56736 + gr_set_kernel_label(current);
56737 +
56738 /* Set the exit signal to SIGCHLD so we signal init on exit */
56739 current->exit_signal = SIGCHLD;
56740
56741 @@ -394,7 +413,7 @@ int allow_signal(int sig)
56742 * know it'll be handled, so that they don't get converted to
56743 * SIGKILL or just silently dropped.
56744 */
56745 - current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
56746 + current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
56747 recalc_sigpending();
56748 spin_unlock_irq(&current->sighand->siglock);
56749 return 0;
56750 @@ -430,6 +449,17 @@ void daemonize(const char *name, ...)
56751 vsnprintf(current->comm, sizeof(current->comm), name, args);
56752 va_end(args);
56753
56754 +#ifdef CONFIG_GRKERNSEC
56755 + write_lock(&grsec_exec_file_lock);
56756 + if (current->exec_file) {
56757 + fput(current->exec_file);
56758 + current->exec_file = NULL;
56759 + }
56760 + write_unlock(&grsec_exec_file_lock);
56761 +#endif
56762 +
56763 + gr_set_kernel_label(current);
56764 +
56765 /*
56766 * If we were started as result of loading a module, close all of the
56767 * user space pages. We don't need them, and if we didn't close them
56768 @@ -905,15 +935,8 @@ NORET_TYPE void do_exit(long code)
56769 struct task_struct *tsk = current;
56770 int group_dead;
56771
56772 - profile_task_exit(tsk);
56773 -
56774 - WARN_ON(atomic_read(&tsk->fs_excl));
56775 - WARN_ON(blk_needs_flush_plug(tsk));
56776 -
56777 if (unlikely(in_interrupt()))
56778 panic("Aiee, killing interrupt handler!");
56779 - if (unlikely(!tsk->pid))
56780 - panic("Attempted to kill the idle task!");
56781
56782 /*
56783 * If do_exit is called because this processes oopsed, it's possible
56784 @@ -924,6 +947,14 @@ NORET_TYPE void do_exit(long code)
56785 */
56786 set_fs(USER_DS);
56787
56788 + profile_task_exit(tsk);
56789 +
56790 + WARN_ON(atomic_read(&tsk->fs_excl));
56791 + WARN_ON(blk_needs_flush_plug(tsk));
56792 +
56793 + if (unlikely(!tsk->pid))
56794 + panic("Attempted to kill the idle task!");
56795 +
56796 tracehook_report_exit(&code);
56797
56798 validate_creds_for_do_exit(tsk);
56799 @@ -984,6 +1015,9 @@ NORET_TYPE void do_exit(long code)
56800 tsk->exit_code = code;
56801 taskstats_exit(tsk, group_dead);
56802
56803 + gr_acl_handle_psacct(tsk, code);
56804 + gr_acl_handle_exit();
56805 +
56806 exit_mm(tsk);
56807
56808 if (group_dead)
56809 diff -urNp linux-2.6.39.4/kernel/fork.c linux-2.6.39.4/kernel/fork.c
56810 --- linux-2.6.39.4/kernel/fork.c 2011-05-19 00:06:34.000000000 -0400
56811 +++ linux-2.6.39.4/kernel/fork.c 2011-08-05 19:44:37.000000000 -0400
56812 @@ -287,7 +287,7 @@ static struct task_struct *dup_task_stru
56813 *stackend = STACK_END_MAGIC; /* for overflow detection */
56814
56815 #ifdef CONFIG_CC_STACKPROTECTOR
56816 - tsk->stack_canary = get_random_int();
56817 + tsk->stack_canary = pax_get_random_long();
56818 #endif
56819
56820 /* One for us, one for whoever does the "release_task()" (usually parent) */
56821 @@ -309,13 +309,78 @@ out:
56822 }
56823
56824 #ifdef CONFIG_MMU
56825 +static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt)
56826 +{
56827 + struct vm_area_struct *tmp;
56828 + unsigned long charge;
56829 + struct mempolicy *pol;
56830 + struct file *file;
56831 +
56832 + charge = 0;
56833 + if (mpnt->vm_flags & VM_ACCOUNT) {
56834 + unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
56835 + if (security_vm_enough_memory(len))
56836 + goto fail_nomem;
56837 + charge = len;
56838 + }
56839 + tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
56840 + if (!tmp)
56841 + goto fail_nomem;
56842 + *tmp = *mpnt;
56843 + tmp->vm_mm = mm;
56844 + INIT_LIST_HEAD(&tmp->anon_vma_chain);
56845 + pol = mpol_dup(vma_policy(mpnt));
56846 + if (IS_ERR(pol))
56847 + goto fail_nomem_policy;
56848 + vma_set_policy(tmp, pol);
56849 + if (anon_vma_fork(tmp, mpnt))
56850 + goto fail_nomem_anon_vma_fork;
56851 + tmp->vm_flags &= ~VM_LOCKED;
56852 + tmp->vm_next = tmp->vm_prev = NULL;
56853 + tmp->vm_mirror = NULL;
56854 + file = tmp->vm_file;
56855 + if (file) {
56856 + struct inode *inode = file->f_path.dentry->d_inode;
56857 + struct address_space *mapping = file->f_mapping;
56858 +
56859 + get_file(file);
56860 + if (tmp->vm_flags & VM_DENYWRITE)
56861 + atomic_dec(&inode->i_writecount);
56862 + spin_lock(&mapping->i_mmap_lock);
56863 + if (tmp->vm_flags & VM_SHARED)
56864 + mapping->i_mmap_writable++;
56865 + tmp->vm_truncate_count = mpnt->vm_truncate_count;
56866 + flush_dcache_mmap_lock(mapping);
56867 + /* insert tmp into the share list, just after mpnt */
56868 + vma_prio_tree_add(tmp, mpnt);
56869 + flush_dcache_mmap_unlock(mapping);
56870 + spin_unlock(&mapping->i_mmap_lock);
56871 + }
56872 +
56873 + /*
56874 + * Clear hugetlb-related page reserves for children. This only
56875 + * affects MAP_PRIVATE mappings. Faults generated by the child
56876 + * are not guaranteed to succeed, even if read-only
56877 + */
56878 + if (is_vm_hugetlb_page(tmp))
56879 + reset_vma_resv_huge_pages(tmp);
56880 +
56881 + return tmp;
56882 +
56883 +fail_nomem_anon_vma_fork:
56884 + mpol_put(pol);
56885 +fail_nomem_policy:
56886 + kmem_cache_free(vm_area_cachep, tmp);
56887 +fail_nomem:
56888 + vm_unacct_memory(charge);
56889 + return NULL;
56890 +}
56891 +
56892 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
56893 {
56894 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
56895 struct rb_node **rb_link, *rb_parent;
56896 int retval;
56897 - unsigned long charge;
56898 - struct mempolicy *pol;
56899
56900 down_write(&oldmm->mmap_sem);
56901 flush_cache_dup_mm(oldmm);
56902 @@ -327,8 +392,8 @@ static int dup_mmap(struct mm_struct *mm
56903 mm->locked_vm = 0;
56904 mm->mmap = NULL;
56905 mm->mmap_cache = NULL;
56906 - mm->free_area_cache = oldmm->mmap_base;
56907 - mm->cached_hole_size = ~0UL;
56908 + mm->free_area_cache = oldmm->free_area_cache;
56909 + mm->cached_hole_size = oldmm->cached_hole_size;
56910 mm->map_count = 0;
56911 cpumask_clear(mm_cpumask(mm));
56912 mm->mm_rb = RB_ROOT;
56913 @@ -344,8 +409,6 @@ static int dup_mmap(struct mm_struct *mm
56914
56915 prev = NULL;
56916 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
56917 - struct file *file;
56918 -
56919 if (mpnt->vm_flags & VM_DONTCOPY) {
56920 long pages = vma_pages(mpnt);
56921 mm->total_vm -= pages;
56922 @@ -353,56 +416,13 @@ static int dup_mmap(struct mm_struct *mm
56923 -pages);
56924 continue;
56925 }
56926 - charge = 0;
56927 - if (mpnt->vm_flags & VM_ACCOUNT) {
56928 - unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
56929 - if (security_vm_enough_memory(len))
56930 - goto fail_nomem;
56931 - charge = len;
56932 - }
56933 - tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
56934 - if (!tmp)
56935 - goto fail_nomem;
56936 - *tmp = *mpnt;
56937 - INIT_LIST_HEAD(&tmp->anon_vma_chain);
56938 - pol = mpol_dup(vma_policy(mpnt));
56939 - retval = PTR_ERR(pol);
56940 - if (IS_ERR(pol))
56941 - goto fail_nomem_policy;
56942 - vma_set_policy(tmp, pol);
56943 - tmp->vm_mm = mm;
56944 - if (anon_vma_fork(tmp, mpnt))
56945 - goto fail_nomem_anon_vma_fork;
56946 - tmp->vm_flags &= ~VM_LOCKED;
56947 - tmp->vm_next = tmp->vm_prev = NULL;
56948 - file = tmp->vm_file;
56949 - if (file) {
56950 - struct inode *inode = file->f_path.dentry->d_inode;
56951 - struct address_space *mapping = file->f_mapping;
56952 -
56953 - get_file(file);
56954 - if (tmp->vm_flags & VM_DENYWRITE)
56955 - atomic_dec(&inode->i_writecount);
56956 - spin_lock(&mapping->i_mmap_lock);
56957 - if (tmp->vm_flags & VM_SHARED)
56958 - mapping->i_mmap_writable++;
56959 - tmp->vm_truncate_count = mpnt->vm_truncate_count;
56960 - flush_dcache_mmap_lock(mapping);
56961 - /* insert tmp into the share list, just after mpnt */
56962 - vma_prio_tree_add(tmp, mpnt);
56963 - flush_dcache_mmap_unlock(mapping);
56964 - spin_unlock(&mapping->i_mmap_lock);
56965 + tmp = dup_vma(mm, mpnt);
56966 + if (!tmp) {
56967 + retval = -ENOMEM;
56968 + goto out;
56969 }
56970
56971 /*
56972 - * Clear hugetlb-related page reserves for children. This only
56973 - * affects MAP_PRIVATE mappings. Faults generated by the child
56974 - * are not guaranteed to succeed, even if read-only
56975 - */
56976 - if (is_vm_hugetlb_page(tmp))
56977 - reset_vma_resv_huge_pages(tmp);
56978 -
56979 - /*
56980 * Link in the new vma and copy the page table entries.
56981 */
56982 *pprev = tmp;
56983 @@ -423,6 +443,31 @@ static int dup_mmap(struct mm_struct *mm
56984 if (retval)
56985 goto out;
56986 }
56987 +
56988 +#ifdef CONFIG_PAX_SEGMEXEC
56989 + if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
56990 + struct vm_area_struct *mpnt_m;
56991 +
56992 + for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
56993 + BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
56994 +
56995 + if (!mpnt->vm_mirror)
56996 + continue;
56997 +
56998 + if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
56999 + BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
57000 + mpnt->vm_mirror = mpnt_m;
57001 + } else {
57002 + BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
57003 + mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
57004 + mpnt_m->vm_mirror->vm_mirror = mpnt_m;
57005 + mpnt->vm_mirror->vm_mirror = mpnt;
57006 + }
57007 + }
57008 + BUG_ON(mpnt_m);
57009 + }
57010 +#endif
57011 +
57012 /* a new mm has just been created */
57013 arch_dup_mmap(oldmm, mm);
57014 retval = 0;
57015 @@ -431,14 +476,6 @@ out:
57016 flush_tlb_mm(oldmm);
57017 up_write(&oldmm->mmap_sem);
57018 return retval;
57019 -fail_nomem_anon_vma_fork:
57020 - mpol_put(pol);
57021 -fail_nomem_policy:
57022 - kmem_cache_free(vm_area_cachep, tmp);
57023 -fail_nomem:
57024 - retval = -ENOMEM;
57025 - vm_unacct_memory(charge);
57026 - goto out;
57027 }
57028
57029 static inline int mm_alloc_pgd(struct mm_struct * mm)
57030 @@ -785,13 +822,14 @@ static int copy_fs(unsigned long clone_f
57031 spin_unlock(&fs->lock);
57032 return -EAGAIN;
57033 }
57034 - fs->users++;
57035 + atomic_inc(&fs->users);
57036 spin_unlock(&fs->lock);
57037 return 0;
57038 }
57039 tsk->fs = copy_fs_struct(fs);
57040 if (!tsk->fs)
57041 return -ENOMEM;
57042 + gr_set_chroot_entries(tsk, &tsk->fs->root);
57043 return 0;
57044 }
57045
57046 @@ -1049,10 +1087,13 @@ static struct task_struct *copy_process(
57047 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
57048 #endif
57049 retval = -EAGAIN;
57050 +
57051 + gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
57052 +
57053 if (atomic_read(&p->real_cred->user->processes) >=
57054 task_rlimit(p, RLIMIT_NPROC)) {
57055 - if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
57056 - p->real_cred->user != INIT_USER)
57057 + if (p->real_cred->user != INIT_USER &&
57058 + !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE))
57059 goto bad_fork_free;
57060 }
57061
57062 @@ -1200,6 +1241,8 @@ static struct task_struct *copy_process(
57063 goto bad_fork_free_pid;
57064 }
57065
57066 + gr_copy_label(p);
57067 +
57068 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
57069 /*
57070 * Clear TID on mm_release()?
57071 @@ -1360,6 +1403,8 @@ bad_fork_cleanup_count:
57072 bad_fork_free:
57073 free_task(p);
57074 fork_out:
57075 + gr_log_forkfail(retval);
57076 +
57077 return ERR_PTR(retval);
57078 }
57079
57080 @@ -1448,6 +1493,8 @@ long do_fork(unsigned long clone_flags,
57081 if (clone_flags & CLONE_PARENT_SETTID)
57082 put_user(nr, parent_tidptr);
57083
57084 + gr_handle_brute_check();
57085 +
57086 if (clone_flags & CLONE_VFORK) {
57087 p->vfork_done = &vfork;
57088 init_completion(&vfork);
57089 @@ -1549,7 +1596,7 @@ static int unshare_fs(unsigned long unsh
57090 return 0;
57091
57092 /* don't need lock here; in the worst case we'll do useless copy */
57093 - if (fs->users == 1)
57094 + if (atomic_read(&fs->users) == 1)
57095 return 0;
57096
57097 *new_fsp = copy_fs_struct(fs);
57098 @@ -1636,7 +1683,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
57099 fs = current->fs;
57100 spin_lock(&fs->lock);
57101 current->fs = new_fs;
57102 - if (--fs->users)
57103 + gr_set_chroot_entries(current, &current->fs->root);
57104 + if (atomic_dec_return(&fs->users))
57105 new_fs = NULL;
57106 else
57107 new_fs = fs;
57108 diff -urNp linux-2.6.39.4/kernel/futex.c linux-2.6.39.4/kernel/futex.c
57109 --- linux-2.6.39.4/kernel/futex.c 2011-05-19 00:06:34.000000000 -0400
57110 +++ linux-2.6.39.4/kernel/futex.c 2011-08-05 19:44:37.000000000 -0400
57111 @@ -54,6 +54,7 @@
57112 #include <linux/mount.h>
57113 #include <linux/pagemap.h>
57114 #include <linux/syscalls.h>
57115 +#include <linux/ptrace.h>
57116 #include <linux/signal.h>
57117 #include <linux/module.h>
57118 #include <linux/magic.h>
57119 @@ -236,6 +237,11 @@ get_futex_key(u32 __user *uaddr, int fsh
57120 struct page *page, *page_head;
57121 int err;
57122
57123 +#ifdef CONFIG_PAX_SEGMEXEC
57124 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
57125 + return -EFAULT;
57126 +#endif
57127 +
57128 /*
57129 * The futex address must be "naturally" aligned.
57130 */
57131 @@ -1833,6 +1839,8 @@ static int futex_wait(u32 __user *uaddr,
57132 struct futex_q q = futex_q_init;
57133 int ret;
57134
57135 + pax_track_stack();
57136 +
57137 if (!bitset)
57138 return -EINVAL;
57139 q.bitset = bitset;
57140 @@ -2229,6 +2237,8 @@ static int futex_wait_requeue_pi(u32 __u
57141 struct futex_q q = futex_q_init;
57142 int res, ret;
57143
57144 + pax_track_stack();
57145 +
57146 if (!bitset)
57147 return -EINVAL;
57148
57149 @@ -2401,7 +2411,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
57150 {
57151 struct robust_list_head __user *head;
57152 unsigned long ret;
57153 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
57154 const struct cred *cred = current_cred(), *pcred;
57155 +#endif
57156
57157 if (!futex_cmpxchg_enabled)
57158 return -ENOSYS;
57159 @@ -2417,6 +2429,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
57160 if (!p)
57161 goto err_unlock;
57162 ret = -EPERM;
57163 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57164 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
57165 + goto err_unlock;
57166 +#else
57167 pcred = __task_cred(p);
57168 /* If victim is in different user_ns, then uids are not
57169 comparable, so we must have CAP_SYS_PTRACE */
57170 @@ -2431,6 +2447,7 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
57171 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
57172 goto err_unlock;
57173 ok:
57174 +#endif
57175 head = p->robust_list;
57176 rcu_read_unlock();
57177 }
57178 @@ -2682,6 +2699,7 @@ static int __init futex_init(void)
57179 {
57180 u32 curval;
57181 int i;
57182 + mm_segment_t oldfs;
57183
57184 /*
57185 * This will fail and we want it. Some arch implementations do
57186 @@ -2693,8 +2711,11 @@ static int __init futex_init(void)
57187 * implementation, the non-functional ones will return
57188 * -ENOSYS.
57189 */
57190 + oldfs = get_fs();
57191 + set_fs(USER_DS);
57192 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
57193 futex_cmpxchg_enabled = 1;
57194 + set_fs(oldfs);
57195
57196 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
57197 plist_head_init(&futex_queues[i].chain, &futex_queues[i].lock);
57198 diff -urNp linux-2.6.39.4/kernel/futex_compat.c linux-2.6.39.4/kernel/futex_compat.c
57199 --- linux-2.6.39.4/kernel/futex_compat.c 2011-05-19 00:06:34.000000000 -0400
57200 +++ linux-2.6.39.4/kernel/futex_compat.c 2011-08-05 19:44:37.000000000 -0400
57201 @@ -10,6 +10,7 @@
57202 #include <linux/compat.h>
57203 #include <linux/nsproxy.h>
57204 #include <linux/futex.h>
57205 +#include <linux/ptrace.h>
57206
57207 #include <asm/uaccess.h>
57208
57209 @@ -136,7 +137,10 @@ compat_sys_get_robust_list(int pid, comp
57210 {
57211 struct compat_robust_list_head __user *head;
57212 unsigned long ret;
57213 - const struct cred *cred = current_cred(), *pcred;
57214 +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
57215 + const struct cred *cred = current_cred();
57216 + const struct cred *pcred;
57217 +#endif
57218
57219 if (!futex_cmpxchg_enabled)
57220 return -ENOSYS;
57221 @@ -152,6 +156,10 @@ compat_sys_get_robust_list(int pid, comp
57222 if (!p)
57223 goto err_unlock;
57224 ret = -EPERM;
57225 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
57226 + if (!ptrace_may_access(p, PTRACE_MODE_READ))
57227 + goto err_unlock;
57228 +#else
57229 pcred = __task_cred(p);
57230 /* If victim is in different user_ns, then uids are not
57231 comparable, so we must have CAP_SYS_PTRACE */
57232 @@ -166,6 +174,7 @@ compat_sys_get_robust_list(int pid, comp
57233 !ns_capable(pcred->user->user_ns, CAP_SYS_PTRACE))
57234 goto err_unlock;
57235 ok:
57236 +#endif
57237 head = p->compat_robust_list;
57238 rcu_read_unlock();
57239 }
57240 diff -urNp linux-2.6.39.4/kernel/gcov/base.c linux-2.6.39.4/kernel/gcov/base.c
57241 --- linux-2.6.39.4/kernel/gcov/base.c 2011-05-19 00:06:34.000000000 -0400
57242 +++ linux-2.6.39.4/kernel/gcov/base.c 2011-08-05 19:44:37.000000000 -0400
57243 @@ -102,11 +102,6 @@ void gcov_enable_events(void)
57244 }
57245
57246 #ifdef CONFIG_MODULES
57247 -static inline int within(void *addr, void *start, unsigned long size)
57248 -{
57249 - return ((addr >= start) && (addr < start + size));
57250 -}
57251 -
57252 /* Update list and generate events when modules are unloaded. */
57253 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
57254 void *data)
57255 @@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
57256 prev = NULL;
57257 /* Remove entries located in module from linked list. */
57258 for (info = gcov_info_head; info; info = info->next) {
57259 - if (within(info, mod->module_core, mod->core_size)) {
57260 + if (within_module_core_rw((unsigned long)info, mod)) {
57261 if (prev)
57262 prev->next = info->next;
57263 else
57264 diff -urNp linux-2.6.39.4/kernel/hrtimer.c linux-2.6.39.4/kernel/hrtimer.c
57265 --- linux-2.6.39.4/kernel/hrtimer.c 2011-05-19 00:06:34.000000000 -0400
57266 +++ linux-2.6.39.4/kernel/hrtimer.c 2011-08-05 19:44:37.000000000 -0400
57267 @@ -1383,7 +1383,7 @@ void hrtimer_peek_ahead_timers(void)
57268 local_irq_restore(flags);
57269 }
57270
57271 -static void run_hrtimer_softirq(struct softirq_action *h)
57272 +static void run_hrtimer_softirq(void)
57273 {
57274 hrtimer_peek_ahead_timers();
57275 }
57276 diff -urNp linux-2.6.39.4/kernel/irq/manage.c linux-2.6.39.4/kernel/irq/manage.c
57277 --- linux-2.6.39.4/kernel/irq/manage.c 2011-05-19 00:06:34.000000000 -0400
57278 +++ linux-2.6.39.4/kernel/irq/manage.c 2011-08-05 19:44:37.000000000 -0400
57279 @@ -491,6 +491,9 @@ int irq_set_irq_wake(unsigned int irq, u
57280 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
57281 int ret = 0;
57282
57283 + if (!desc)
57284 + return -EINVAL;
57285 +
57286 /* wakeup-capable irqs can be shared between drivers that
57287 * don't need to have the same sleep mode behaviors.
57288 */
57289 diff -urNp linux-2.6.39.4/kernel/jump_label.c linux-2.6.39.4/kernel/jump_label.c
57290 --- linux-2.6.39.4/kernel/jump_label.c 2011-05-19 00:06:34.000000000 -0400
57291 +++ linux-2.6.39.4/kernel/jump_label.c 2011-08-05 19:44:37.000000000 -0400
57292 @@ -49,6 +49,17 @@ void jump_label_unlock(void)
57293 mutex_unlock(&jump_label_mutex);
57294 }
57295
57296 +static void jump_label_swap(void *a, void *b, int size)
57297 +{
57298 + struct jump_entry t;
57299 +
57300 + t = *(struct jump_entry *)a;
57301 + pax_open_kernel();
57302 + *(struct jump_entry *)a = *(struct jump_entry *)b;
57303 + *(struct jump_entry *)b = t;
57304 + pax_close_kernel();
57305 +}
57306 +
57307 static int jump_label_cmp(const void *a, const void *b)
57308 {
57309 const struct jump_entry *jea = a;
57310 @@ -70,7 +81,7 @@ sort_jump_label_entries(struct jump_entr
57311
57312 size = (((unsigned long)stop - (unsigned long)start)
57313 / sizeof(struct jump_entry));
57314 - sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
57315 + sort(start, size, sizeof(struct jump_entry), jump_label_cmp, jump_label_swap);
57316 }
57317
57318 static struct jump_label_entry *get_jump_label_entry(jump_label_t key)
57319 @@ -407,8 +418,11 @@ static void remove_jump_label_module_ini
57320 count = e_module->nr_entries;
57321 iter = e_module->table;
57322 while (count--) {
57323 - if (within_module_init(iter->code, mod))
57324 + if (within_module_init(iter->code, mod)) {
57325 + pax_open_kernel();
57326 iter->key = 0;
57327 + pax_close_kernel();
57328 + }
57329 iter++;
57330 }
57331 }
57332 diff -urNp linux-2.6.39.4/kernel/kallsyms.c linux-2.6.39.4/kernel/kallsyms.c
57333 --- linux-2.6.39.4/kernel/kallsyms.c 2011-05-19 00:06:34.000000000 -0400
57334 +++ linux-2.6.39.4/kernel/kallsyms.c 2011-08-05 19:44:37.000000000 -0400
57335 @@ -11,6 +11,9 @@
57336 * Changed the compression method from stem compression to "table lookup"
57337 * compression (see scripts/kallsyms.c for a more complete description)
57338 */
57339 +#ifdef CONFIG_GRKERNSEC_HIDESYM
57340 +#define __INCLUDED_BY_HIDESYM 1
57341 +#endif
57342 #include <linux/kallsyms.h>
57343 #include <linux/module.h>
57344 #include <linux/init.h>
57345 @@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark
57346
57347 static inline int is_kernel_inittext(unsigned long addr)
57348 {
57349 + if (system_state != SYSTEM_BOOTING)
57350 + return 0;
57351 +
57352 if (addr >= (unsigned long)_sinittext
57353 && addr <= (unsigned long)_einittext)
57354 return 1;
57355 return 0;
57356 }
57357
57358 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57359 +#ifdef CONFIG_MODULES
57360 +static inline int is_module_text(unsigned long addr)
57361 +{
57362 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
57363 + return 1;
57364 +
57365 + addr = ktla_ktva(addr);
57366 + return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
57367 +}
57368 +#else
57369 +static inline int is_module_text(unsigned long addr)
57370 +{
57371 + return 0;
57372 +}
57373 +#endif
57374 +#endif
57375 +
57376 static inline int is_kernel_text(unsigned long addr)
57377 {
57378 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
57379 @@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne
57380
57381 static inline int is_kernel(unsigned long addr)
57382 {
57383 +
57384 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57385 + if (is_kernel_text(addr) || is_kernel_inittext(addr))
57386 + return 1;
57387 +
57388 + if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
57389 +#else
57390 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
57391 +#endif
57392 +
57393 return 1;
57394 return in_gate_area_no_mm(addr);
57395 }
57396
57397 static int is_ksym_addr(unsigned long addr)
57398 {
57399 +
57400 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
57401 + if (is_module_text(addr))
57402 + return 0;
57403 +#endif
57404 +
57405 if (all_var)
57406 return is_kernel(addr);
57407
57408 @@ -454,7 +493,6 @@ static unsigned long get_ksymbol_core(st
57409
57410 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
57411 {
57412 - iter->name[0] = '\0';
57413 iter->nameoff = get_symbol_offset(new_pos);
57414 iter->pos = new_pos;
57415 }
57416 @@ -502,6 +540,11 @@ static int s_show(struct seq_file *m, vo
57417 {
57418 struct kallsym_iter *iter = m->private;
57419
57420 +#ifdef CONFIG_GRKERNSEC_HIDESYM
57421 + if (current_uid())
57422 + return 0;
57423 +#endif
57424 +
57425 /* Some debugging symbols have no name. Ignore them. */
57426 if (!iter->name[0])
57427 return 0;
57428 @@ -540,7 +583,7 @@ static int kallsyms_open(struct inode *i
57429 struct kallsym_iter *iter;
57430 int ret;
57431
57432 - iter = kmalloc(sizeof(*iter), GFP_KERNEL);
57433 + iter = kzalloc(sizeof(*iter), GFP_KERNEL);
57434 if (!iter)
57435 return -ENOMEM;
57436 reset_iter(iter, 0);
57437 diff -urNp linux-2.6.39.4/kernel/kmod.c linux-2.6.39.4/kernel/kmod.c
57438 --- linux-2.6.39.4/kernel/kmod.c 2011-05-19 00:06:34.000000000 -0400
57439 +++ linux-2.6.39.4/kernel/kmod.c 2011-08-05 19:44:37.000000000 -0400
57440 @@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
57441 * If module auto-loading support is disabled then this function
57442 * becomes a no-operation.
57443 */
57444 -int __request_module(bool wait, const char *fmt, ...)
57445 +static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
57446 {
57447 - va_list args;
57448 char module_name[MODULE_NAME_LEN];
57449 unsigned int max_modprobes;
57450 int ret;
57451 - char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
57452 + char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
57453 static char *envp[] = { "HOME=/",
57454 "TERM=linux",
57455 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
57456 @@ -80,9 +79,7 @@ int __request_module(bool wait, const ch
57457 #define MAX_KMOD_CONCURRENT 50 /* Completely arbitrary value - KAO */
57458 static int kmod_loop_msg;
57459
57460 - va_start(args, fmt);
57461 - ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
57462 - va_end(args);
57463 + ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
57464 if (ret >= MODULE_NAME_LEN)
57465 return -ENAMETOOLONG;
57466
57467 @@ -90,6 +87,20 @@ int __request_module(bool wait, const ch
57468 if (ret)
57469 return ret;
57470
57471 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57472 + if (!current_uid()) {
57473 + /* hack to workaround consolekit/udisks stupidity */
57474 + read_lock(&tasklist_lock);
57475 + if (!strcmp(current->comm, "mount") &&
57476 + current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
57477 + read_unlock(&tasklist_lock);
57478 + printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
57479 + return -EPERM;
57480 + }
57481 + read_unlock(&tasklist_lock);
57482 + }
57483 +#endif
57484 +
57485 /* If modprobe needs a service that is in a module, we get a recursive
57486 * loop. Limit the number of running kmod threads to max_threads/2 or
57487 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
57488 @@ -123,6 +134,47 @@ int __request_module(bool wait, const ch
57489 atomic_dec(&kmod_concurrent);
57490 return ret;
57491 }
57492 +
57493 +int ___request_module(bool wait, char *module_param, const char *fmt, ...)
57494 +{
57495 + va_list args;
57496 + int ret;
57497 +
57498 + va_start(args, fmt);
57499 + ret = ____request_module(wait, module_param, fmt, args);
57500 + va_end(args);
57501 +
57502 + return ret;
57503 +}
57504 +
57505 +int __request_module(bool wait, const char *fmt, ...)
57506 +{
57507 + va_list args;
57508 + int ret;
57509 +
57510 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57511 + if (current_uid()) {
57512 + char module_param[MODULE_NAME_LEN];
57513 +
57514 + memset(module_param, 0, sizeof(module_param));
57515 +
57516 + snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
57517 +
57518 + va_start(args, fmt);
57519 + ret = ____request_module(wait, module_param, fmt, args);
57520 + va_end(args);
57521 +
57522 + return ret;
57523 + }
57524 +#endif
57525 +
57526 + va_start(args, fmt);
57527 + ret = ____request_module(wait, NULL, fmt, args);
57528 + va_end(args);
57529 +
57530 + return ret;
57531 +}
57532 +
57533 EXPORT_SYMBOL(__request_module);
57534 #endif /* CONFIG_MODULES */
57535
57536 diff -urNp linux-2.6.39.4/kernel/kprobes.c linux-2.6.39.4/kernel/kprobes.c
57537 --- linux-2.6.39.4/kernel/kprobes.c 2011-05-19 00:06:34.000000000 -0400
57538 +++ linux-2.6.39.4/kernel/kprobes.c 2011-08-05 19:44:37.000000000 -0400
57539 @@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_
57540 * kernel image and loaded module images reside. This is required
57541 * so x86_64 can correctly handle the %rip-relative fixups.
57542 */
57543 - kip->insns = module_alloc(PAGE_SIZE);
57544 + kip->insns = module_alloc_exec(PAGE_SIZE);
57545 if (!kip->insns) {
57546 kfree(kip);
57547 return NULL;
57548 @@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(st
57549 */
57550 if (!list_is_singular(&kip->list)) {
57551 list_del(&kip->list);
57552 - module_free(NULL, kip->insns);
57553 + module_free_exec(NULL, kip->insns);
57554 kfree(kip);
57555 }
57556 return 1;
57557 @@ -1936,7 +1936,7 @@ static int __init init_kprobes(void)
57558 {
57559 int i, err = 0;
57560 unsigned long offset = 0, size = 0;
57561 - char *modname, namebuf[128];
57562 + char *modname, namebuf[KSYM_NAME_LEN];
57563 const char *symbol_name;
57564 void *addr;
57565 struct kprobe_blackpoint *kb;
57566 @@ -2062,7 +2062,7 @@ static int __kprobes show_kprobe_addr(st
57567 const char *sym = NULL;
57568 unsigned int i = *(loff_t *) v;
57569 unsigned long offset = 0;
57570 - char *modname, namebuf[128];
57571 + char *modname, namebuf[KSYM_NAME_LEN];
57572
57573 head = &kprobe_table[i];
57574 preempt_disable();
57575 diff -urNp linux-2.6.39.4/kernel/lockdep.c linux-2.6.39.4/kernel/lockdep.c
57576 --- linux-2.6.39.4/kernel/lockdep.c 2011-06-25 12:55:23.000000000 -0400
57577 +++ linux-2.6.39.4/kernel/lockdep.c 2011-08-05 19:44:37.000000000 -0400
57578 @@ -571,6 +571,10 @@ static int static_obj(void *obj)
57579 end = (unsigned long) &_end,
57580 addr = (unsigned long) obj;
57581
57582 +#ifdef CONFIG_PAX_KERNEXEC
57583 + start = ktla_ktva(start);
57584 +#endif
57585 +
57586 /*
57587 * static variable?
57588 */
57589 @@ -706,6 +710,7 @@ register_lock_class(struct lockdep_map *
57590 if (!static_obj(lock->key)) {
57591 debug_locks_off();
57592 printk("INFO: trying to register non-static key.\n");
57593 + printk("lock:%pS key:%pS.\n", lock, lock->key);
57594 printk("the code is fine but needs lockdep annotation.\n");
57595 printk("turning off the locking correctness validator.\n");
57596 dump_stack();
57597 @@ -2752,7 +2757,7 @@ static int __lock_acquire(struct lockdep
57598 if (!class)
57599 return 0;
57600 }
57601 - atomic_inc((atomic_t *)&class->ops);
57602 + atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
57603 if (very_verbose(class)) {
57604 printk("\nacquire class [%p] %s", class->key, class->name);
57605 if (class->name_version > 1)
57606 diff -urNp linux-2.6.39.4/kernel/lockdep_proc.c linux-2.6.39.4/kernel/lockdep_proc.c
57607 --- linux-2.6.39.4/kernel/lockdep_proc.c 2011-05-19 00:06:34.000000000 -0400
57608 +++ linux-2.6.39.4/kernel/lockdep_proc.c 2011-08-05 19:44:37.000000000 -0400
57609 @@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
57610
57611 static void print_name(struct seq_file *m, struct lock_class *class)
57612 {
57613 - char str[128];
57614 + char str[KSYM_NAME_LEN];
57615 const char *name = class->name;
57616
57617 if (!name) {
57618 diff -urNp linux-2.6.39.4/kernel/module.c linux-2.6.39.4/kernel/module.c
57619 --- linux-2.6.39.4/kernel/module.c 2011-05-19 00:06:34.000000000 -0400
57620 +++ linux-2.6.39.4/kernel/module.c 2011-08-05 19:44:37.000000000 -0400
57621 @@ -57,6 +57,7 @@
57622 #include <linux/kmemleak.h>
57623 #include <linux/jump_label.h>
57624 #include <linux/pfn.h>
57625 +#include <linux/grsecurity.h>
57626
57627 #define CREATE_TRACE_POINTS
57628 #include <trace/events/module.h>
57629 @@ -118,7 +119,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not
57630
57631 /* Bounds of module allocation, for speeding __module_address.
57632 * Protected by module_mutex. */
57633 -static unsigned long module_addr_min = -1UL, module_addr_max = 0;
57634 +static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
57635 +static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
57636
57637 int register_module_notifier(struct notifier_block * nb)
57638 {
57639 @@ -282,7 +284,7 @@ bool each_symbol(bool (*fn)(const struct
57640 return true;
57641
57642 list_for_each_entry_rcu(mod, &modules, list) {
57643 - struct symsearch arr[] = {
57644 + struct symsearch modarr[] = {
57645 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
57646 NOT_GPL_ONLY, false },
57647 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
57648 @@ -304,7 +306,7 @@ bool each_symbol(bool (*fn)(const struct
57649 #endif
57650 };
57651
57652 - if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
57653 + if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
57654 return true;
57655 }
57656 return false;
57657 @@ -415,7 +417,7 @@ static inline void __percpu *mod_percpu(
57658 static int percpu_modalloc(struct module *mod,
57659 unsigned long size, unsigned long align)
57660 {
57661 - if (align > PAGE_SIZE) {
57662 + if (align-1 >= PAGE_SIZE) {
57663 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
57664 mod->name, align, PAGE_SIZE);
57665 align = PAGE_SIZE;
57666 @@ -1143,7 +1145,7 @@ resolve_symbol_wait(struct module *mod,
57667 */
57668 #ifdef CONFIG_SYSFS
57669
57670 -#ifdef CONFIG_KALLSYMS
57671 +#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
57672 static inline bool sect_empty(const Elf_Shdr *sect)
57673 {
57674 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
57675 @@ -1612,17 +1614,17 @@ void unset_section_ro_nx(struct module *
57676 {
57677 unsigned long total_pages;
57678
57679 - if (mod->module_core == module_region) {
57680 + if (mod->module_core_rx == module_region) {
57681 /* Set core as NX+RW */
57682 - total_pages = MOD_NUMBER_OF_PAGES(mod->module_core, mod->core_size);
57683 - set_memory_nx((unsigned long)mod->module_core, total_pages);
57684 - set_memory_rw((unsigned long)mod->module_core, total_pages);
57685 + total_pages = MOD_NUMBER_OF_PAGES(mod->module_core_rx, mod->core_size_rx);
57686 + set_memory_nx((unsigned long)mod->module_core_rx, total_pages);
57687 + set_memory_rw((unsigned long)mod->module_core_rx, total_pages);
57688
57689 - } else if (mod->module_init == module_region) {
57690 + } else if (mod->module_init_rx == module_region) {
57691 /* Set init as NX+RW */
57692 - total_pages = MOD_NUMBER_OF_PAGES(mod->module_init, mod->init_size);
57693 - set_memory_nx((unsigned long)mod->module_init, total_pages);
57694 - set_memory_rw((unsigned long)mod->module_init, total_pages);
57695 + total_pages = MOD_NUMBER_OF_PAGES(mod->module_init_rx, mod->init_size_rx);
57696 + set_memory_nx((unsigned long)mod->module_init_rx, total_pages);
57697 + set_memory_rw((unsigned long)mod->module_init_rx, total_pages);
57698 }
57699 }
57700
57701 @@ -1633,14 +1635,14 @@ void set_all_modules_text_rw()
57702
57703 mutex_lock(&module_mutex);
57704 list_for_each_entry_rcu(mod, &modules, list) {
57705 - if ((mod->module_core) && (mod->core_text_size)) {
57706 - set_page_attributes(mod->module_core,
57707 - mod->module_core + mod->core_text_size,
57708 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
57709 + set_page_attributes(mod->module_core_rx,
57710 + mod->module_core_rx + mod->core_size_rx,
57711 set_memory_rw);
57712 }
57713 - if ((mod->module_init) && (mod->init_text_size)) {
57714 - set_page_attributes(mod->module_init,
57715 - mod->module_init + mod->init_text_size,
57716 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
57717 + set_page_attributes(mod->module_init_rx,
57718 + mod->module_init_rx + mod->init_size_rx,
57719 set_memory_rw);
57720 }
57721 }
57722 @@ -1654,14 +1656,14 @@ void set_all_modules_text_ro()
57723
57724 mutex_lock(&module_mutex);
57725 list_for_each_entry_rcu(mod, &modules, list) {
57726 - if ((mod->module_core) && (mod->core_text_size)) {
57727 - set_page_attributes(mod->module_core,
57728 - mod->module_core + mod->core_text_size,
57729 + if ((mod->module_core_rx) && (mod->core_size_rx)) {
57730 + set_page_attributes(mod->module_core_rx,
57731 + mod->module_core_rx + mod->core_size_rx,
57732 set_memory_ro);
57733 }
57734 - if ((mod->module_init) && (mod->init_text_size)) {
57735 - set_page_attributes(mod->module_init,
57736 - mod->module_init + mod->init_text_size,
57737 + if ((mod->module_init_rx) && (mod->init_size_rx)) {
57738 + set_page_attributes(mod->module_init_rx,
57739 + mod->module_init_rx + mod->init_size_rx,
57740 set_memory_ro);
57741 }
57742 }
57743 @@ -1696,17 +1698,20 @@ static void free_module(struct module *m
57744 destroy_params(mod->kp, mod->num_kp);
57745
57746 /* This may be NULL, but that's OK */
57747 - unset_section_ro_nx(mod, mod->module_init);
57748 - module_free(mod, mod->module_init);
57749 + unset_section_ro_nx(mod, mod->module_init_rx);
57750 + module_free(mod, mod->module_init_rw);
57751 + module_free_exec(mod, mod->module_init_rx);
57752 kfree(mod->args);
57753 percpu_modfree(mod);
57754
57755 /* Free lock-classes: */
57756 - lockdep_free_key_range(mod->module_core, mod->core_size);
57757 + lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
57758 + lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
57759
57760 /* Finally, free the core (containing the module structure) */
57761 - unset_section_ro_nx(mod, mod->module_core);
57762 - module_free(mod, mod->module_core);
57763 + unset_section_ro_nx(mod, mod->module_core_rx);
57764 + module_free_exec(mod, mod->module_core_rx);
57765 + module_free(mod, mod->module_core_rw);
57766
57767 #ifdef CONFIG_MPU
57768 update_protections(current->mm);
57769 @@ -1775,10 +1780,31 @@ static int simplify_symbols(struct modul
57770 unsigned int i;
57771 int ret = 0;
57772 const struct kernel_symbol *ksym;
57773 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57774 + int is_fs_load = 0;
57775 + int register_filesystem_found = 0;
57776 + char *p;
57777 +
57778 + p = strstr(mod->args, "grsec_modharden_fs");
57779 + if (p) {
57780 + char *endptr = p + strlen("grsec_modharden_fs");
57781 + /* copy \0 as well */
57782 + memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
57783 + is_fs_load = 1;
57784 + }
57785 +#endif
57786
57787 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
57788 const char *name = info->strtab + sym[i].st_name;
57789
57790 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57791 + /* it's a real shame this will never get ripped and copied
57792 + upstream! ;(
57793 + */
57794 + if (is_fs_load && !strcmp(name, "register_filesystem"))
57795 + register_filesystem_found = 1;
57796 +#endif
57797 +
57798 switch (sym[i].st_shndx) {
57799 case SHN_COMMON:
57800 /* We compiled with -fno-common. These are not
57801 @@ -1799,7 +1825,9 @@ static int simplify_symbols(struct modul
57802 ksym = resolve_symbol_wait(mod, info, name);
57803 /* Ok if resolved. */
57804 if (ksym && !IS_ERR(ksym)) {
57805 + pax_open_kernel();
57806 sym[i].st_value = ksym->value;
57807 + pax_close_kernel();
57808 break;
57809 }
57810
57811 @@ -1818,11 +1846,20 @@ static int simplify_symbols(struct modul
57812 secbase = (unsigned long)mod_percpu(mod);
57813 else
57814 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
57815 + pax_open_kernel();
57816 sym[i].st_value += secbase;
57817 + pax_close_kernel();
57818 break;
57819 }
57820 }
57821
57822 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
57823 + if (is_fs_load && !register_filesystem_found) {
57824 + printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
57825 + ret = -EPERM;
57826 + }
57827 +#endif
57828 +
57829 return ret;
57830 }
57831
57832 @@ -1906,22 +1943,12 @@ static void layout_sections(struct modul
57833 || s->sh_entsize != ~0UL
57834 || strstarts(sname, ".init"))
57835 continue;
57836 - s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
57837 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
57838 + s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
57839 + else
57840 + s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
57841 DEBUGP("\t%s\n", name);
57842 }
57843 - switch (m) {
57844 - case 0: /* executable */
57845 - mod->core_size = debug_align(mod->core_size);
57846 - mod->core_text_size = mod->core_size;
57847 - break;
57848 - case 1: /* RO: text and ro-data */
57849 - mod->core_size = debug_align(mod->core_size);
57850 - mod->core_ro_size = mod->core_size;
57851 - break;
57852 - case 3: /* whole core */
57853 - mod->core_size = debug_align(mod->core_size);
57854 - break;
57855 - }
57856 }
57857
57858 DEBUGP("Init section allocation order:\n");
57859 @@ -1935,23 +1962,13 @@ static void layout_sections(struct modul
57860 || s->sh_entsize != ~0UL
57861 || !strstarts(sname, ".init"))
57862 continue;
57863 - s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
57864 - | INIT_OFFSET_MASK);
57865 + if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
57866 + s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
57867 + else
57868 + s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
57869 + s->sh_entsize |= INIT_OFFSET_MASK;
57870 DEBUGP("\t%s\n", sname);
57871 }
57872 - switch (m) {
57873 - case 0: /* executable */
57874 - mod->init_size = debug_align(mod->init_size);
57875 - mod->init_text_size = mod->init_size;
57876 - break;
57877 - case 1: /* RO: text and ro-data */
57878 - mod->init_size = debug_align(mod->init_size);
57879 - mod->init_ro_size = mod->init_size;
57880 - break;
57881 - case 3: /* whole init */
57882 - mod->init_size = debug_align(mod->init_size);
57883 - break;
57884 - }
57885 }
57886 }
57887
57888 @@ -2119,7 +2136,7 @@ static void layout_symtab(struct module
57889
57890 /* Put symbol section at end of init part of module. */
57891 symsect->sh_flags |= SHF_ALLOC;
57892 - symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
57893 + symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
57894 info->index.sym) | INIT_OFFSET_MASK;
57895 DEBUGP("\t%s\n", info->secstrings + symsect->sh_name);
57896
57897 @@ -2136,19 +2153,19 @@ static void layout_symtab(struct module
57898 }
57899
57900 /* Append room for core symbols at end of core part. */
57901 - info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
57902 - mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
57903 + info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
57904 + mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
57905
57906 /* Put string table section at end of init part of module. */
57907 strsect->sh_flags |= SHF_ALLOC;
57908 - strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
57909 + strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
57910 info->index.str) | INIT_OFFSET_MASK;
57911 DEBUGP("\t%s\n", info->secstrings + strsect->sh_name);
57912
57913 /* Append room for core symbols' strings at end of core part. */
57914 - info->stroffs = mod->core_size;
57915 + info->stroffs = mod->core_size_rx;
57916 __set_bit(0, info->strmap);
57917 - mod->core_size += bitmap_weight(info->strmap, strsect->sh_size);
57918 + mod->core_size_rx += bitmap_weight(info->strmap, strsect->sh_size);
57919 }
57920
57921 static void add_kallsyms(struct module *mod, const struct load_info *info)
57922 @@ -2164,11 +2181,13 @@ static void add_kallsyms(struct module *
57923 /* Make sure we get permanent strtab: don't use info->strtab. */
57924 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
57925
57926 + pax_open_kernel();
57927 +
57928 /* Set types up while we still have access to sections. */
57929 for (i = 0; i < mod->num_symtab; i++)
57930 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
57931
57932 - mod->core_symtab = dst = mod->module_core + info->symoffs;
57933 + mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
57934 src = mod->symtab;
57935 *dst = *src;
57936 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
57937 @@ -2181,10 +2200,12 @@ static void add_kallsyms(struct module *
57938 }
57939 mod->core_num_syms = ndst;
57940
57941 - mod->core_strtab = s = mod->module_core + info->stroffs;
57942 + mod->core_strtab = s = mod->module_core_rx + info->stroffs;
57943 for (*s = 0, i = 1; i < info->sechdrs[info->index.str].sh_size; ++i)
57944 if (test_bit(i, info->strmap))
57945 *++s = mod->strtab[i];
57946 +
57947 + pax_close_kernel();
57948 }
57949 #else
57950 static inline void layout_symtab(struct module *mod, struct load_info *info)
57951 @@ -2213,17 +2234,33 @@ static void dynamic_debug_remove(struct
57952 ddebug_remove_module(debug->modname);
57953 }
57954
57955 -static void *module_alloc_update_bounds(unsigned long size)
57956 +static void *module_alloc_update_bounds_rw(unsigned long size)
57957 {
57958 void *ret = module_alloc(size);
57959
57960 if (ret) {
57961 mutex_lock(&module_mutex);
57962 /* Update module bounds. */
57963 - if ((unsigned long)ret < module_addr_min)
57964 - module_addr_min = (unsigned long)ret;
57965 - if ((unsigned long)ret + size > module_addr_max)
57966 - module_addr_max = (unsigned long)ret + size;
57967 + if ((unsigned long)ret < module_addr_min_rw)
57968 + module_addr_min_rw = (unsigned long)ret;
57969 + if ((unsigned long)ret + size > module_addr_max_rw)
57970 + module_addr_max_rw = (unsigned long)ret + size;
57971 + mutex_unlock(&module_mutex);
57972 + }
57973 + return ret;
57974 +}
57975 +
57976 +static void *module_alloc_update_bounds_rx(unsigned long size)
57977 +{
57978 + void *ret = module_alloc_exec(size);
57979 +
57980 + if (ret) {
57981 + mutex_lock(&module_mutex);
57982 + /* Update module bounds. */
57983 + if ((unsigned long)ret < module_addr_min_rx)
57984 + module_addr_min_rx = (unsigned long)ret;
57985 + if ((unsigned long)ret + size > module_addr_max_rx)
57986 + module_addr_max_rx = (unsigned long)ret + size;
57987 mutex_unlock(&module_mutex);
57988 }
57989 return ret;
57990 @@ -2516,7 +2553,7 @@ static int move_module(struct module *mo
57991 void *ptr;
57992
57993 /* Do the allocs. */
57994 - ptr = module_alloc_update_bounds(mod->core_size);
57995 + ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
57996 /*
57997 * The pointer to this block is stored in the module structure
57998 * which is inside the block. Just mark it as not being a
57999 @@ -2526,23 +2563,50 @@ static int move_module(struct module *mo
58000 if (!ptr)
58001 return -ENOMEM;
58002
58003 - memset(ptr, 0, mod->core_size);
58004 - mod->module_core = ptr;
58005 + memset(ptr, 0, mod->core_size_rw);
58006 + mod->module_core_rw = ptr;
58007
58008 - ptr = module_alloc_update_bounds(mod->init_size);
58009 + ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
58010 /*
58011 * The pointer to this block is stored in the module structure
58012 * which is inside the block. This block doesn't need to be
58013 * scanned as it contains data and code that will be freed
58014 * after the module is initialized.
58015 */
58016 - kmemleak_ignore(ptr);
58017 - if (!ptr && mod->init_size) {
58018 - module_free(mod, mod->module_core);
58019 + kmemleak_not_leak(ptr);
58020 + if (!ptr && mod->init_size_rw) {
58021 + module_free(mod, mod->module_core_rw);
58022 return -ENOMEM;
58023 }
58024 - memset(ptr, 0, mod->init_size);
58025 - mod->module_init = ptr;
58026 + memset(ptr, 0, mod->init_size_rw);
58027 + mod->module_init_rw = ptr;
58028 +
58029 + ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
58030 + kmemleak_not_leak(ptr);
58031 + if (!ptr) {
58032 + module_free(mod, mod->module_init_rw);
58033 + module_free(mod, mod->module_core_rw);
58034 + return -ENOMEM;
58035 + }
58036 +
58037 + pax_open_kernel();
58038 + memset(ptr, 0, mod->core_size_rx);
58039 + pax_close_kernel();
58040 + mod->module_core_rx = ptr;
58041 +
58042 + ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
58043 + kmemleak_not_leak(ptr);
58044 + if (!ptr && mod->init_size_rx) {
58045 + module_free_exec(mod, mod->module_core_rx);
58046 + module_free(mod, mod->module_init_rw);
58047 + module_free(mod, mod->module_core_rw);
58048 + return -ENOMEM;
58049 + }
58050 +
58051 + pax_open_kernel();
58052 + memset(ptr, 0, mod->init_size_rx);
58053 + pax_close_kernel();
58054 + mod->module_init_rx = ptr;
58055
58056 /* Transfer each section which specifies SHF_ALLOC */
58057 DEBUGP("final section addresses:\n");
58058 @@ -2553,16 +2617,45 @@ static int move_module(struct module *mo
58059 if (!(shdr->sh_flags & SHF_ALLOC))
58060 continue;
58061
58062 - if (shdr->sh_entsize & INIT_OFFSET_MASK)
58063 - dest = mod->module_init
58064 - + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
58065 - else
58066 - dest = mod->module_core + shdr->sh_entsize;
58067 + if (shdr->sh_entsize & INIT_OFFSET_MASK) {
58068 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
58069 + dest = mod->module_init_rw
58070 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
58071 + else
58072 + dest = mod->module_init_rx
58073 + + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
58074 + } else {
58075 + if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
58076 + dest = mod->module_core_rw + shdr->sh_entsize;
58077 + else
58078 + dest = mod->module_core_rx + shdr->sh_entsize;
58079 + }
58080 +
58081 + if (shdr->sh_type != SHT_NOBITS) {
58082 +
58083 +#ifdef CONFIG_PAX_KERNEXEC
58084 +#ifdef CONFIG_X86_64
58085 + if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
58086 + set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
58087 +#endif
58088 + if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
58089 + pax_open_kernel();
58090 + memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
58091 + pax_close_kernel();
58092 + } else
58093 +#endif
58094
58095 - if (shdr->sh_type != SHT_NOBITS)
58096 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
58097 + }
58098 /* Update sh_addr to point to copy in image. */
58099 - shdr->sh_addr = (unsigned long)dest;
58100 +
58101 +#ifdef CONFIG_PAX_KERNEXEC
58102 + if (shdr->sh_flags & SHF_EXECINSTR)
58103 + shdr->sh_addr = ktva_ktla((unsigned long)dest);
58104 + else
58105 +#endif
58106 +
58107 + shdr->sh_addr = (unsigned long)dest;
58108 DEBUGP("\t0x%lx %s\n",
58109 shdr->sh_addr, info->secstrings + shdr->sh_name);
58110 }
58111 @@ -2613,12 +2706,12 @@ static void flush_module_icache(const st
58112 * Do it before processing of module parameters, so the module
58113 * can provide parameter accessor functions of its own.
58114 */
58115 - if (mod->module_init)
58116 - flush_icache_range((unsigned long)mod->module_init,
58117 - (unsigned long)mod->module_init
58118 - + mod->init_size);
58119 - flush_icache_range((unsigned long)mod->module_core,
58120 - (unsigned long)mod->module_core + mod->core_size);
58121 + if (mod->module_init_rx)
58122 + flush_icache_range((unsigned long)mod->module_init_rx,
58123 + (unsigned long)mod->module_init_rx
58124 + + mod->init_size_rx);
58125 + flush_icache_range((unsigned long)mod->module_core_rx,
58126 + (unsigned long)mod->module_core_rx + mod->core_size_rx);
58127
58128 set_fs(old_fs);
58129 }
58130 @@ -2690,8 +2783,10 @@ static void module_deallocate(struct mod
58131 {
58132 kfree(info->strmap);
58133 percpu_modfree(mod);
58134 - module_free(mod, mod->module_init);
58135 - module_free(mod, mod->module_core);
58136 + module_free_exec(mod, mod->module_init_rx);
58137 + module_free_exec(mod, mod->module_core_rx);
58138 + module_free(mod, mod->module_init_rw);
58139 + module_free(mod, mod->module_core_rw);
58140 }
58141
58142 static int post_relocation(struct module *mod, const struct load_info *info)
58143 @@ -2748,9 +2843,38 @@ static struct module *load_module(void _
58144 if (err)
58145 goto free_unload;
58146
58147 + /* Now copy in args */
58148 + mod->args = strndup_user(uargs, ~0UL >> 1);
58149 + if (IS_ERR(mod->args)) {
58150 + err = PTR_ERR(mod->args);
58151 + goto free_unload;
58152 + }
58153 +
58154 /* Set up MODINFO_ATTR fields */
58155 setup_modinfo(mod, &info);
58156
58157 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
58158 + {
58159 + char *p, *p2;
58160 +
58161 + if (strstr(mod->args, "grsec_modharden_netdev")) {
58162 + printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
58163 + err = -EPERM;
58164 + goto free_modinfo;
58165 + } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
58166 + p += strlen("grsec_modharden_normal");
58167 + p2 = strstr(p, "_");
58168 + if (p2) {
58169 + *p2 = '\0';
58170 + printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
58171 + *p2 = '_';
58172 + }
58173 + err = -EPERM;
58174 + goto free_modinfo;
58175 + }
58176 + }
58177 +#endif
58178 +
58179 /* Fix up syms, so that st_value is a pointer to location. */
58180 err = simplify_symbols(mod, &info);
58181 if (err < 0)
58182 @@ -2766,13 +2890,6 @@ static struct module *load_module(void _
58183
58184 flush_module_icache(mod);
58185
58186 - /* Now copy in args */
58187 - mod->args = strndup_user(uargs, ~0UL >> 1);
58188 - if (IS_ERR(mod->args)) {
58189 - err = PTR_ERR(mod->args);
58190 - goto free_arch_cleanup;
58191 - }
58192 -
58193 /* Mark state as coming so strong_try_module_get() ignores us. */
58194 mod->state = MODULE_STATE_COMING;
58195
58196 @@ -2832,11 +2949,10 @@ static struct module *load_module(void _
58197 unlock:
58198 mutex_unlock(&module_mutex);
58199 synchronize_sched();
58200 - kfree(mod->args);
58201 - free_arch_cleanup:
58202 module_arch_cleanup(mod);
58203 free_modinfo:
58204 free_modinfo(mod);
58205 + kfree(mod->args);
58206 free_unload:
58207 module_unload_free(mod);
58208 free_module:
58209 @@ -2877,16 +2993,16 @@ SYSCALL_DEFINE3(init_module, void __user
58210 MODULE_STATE_COMING, mod);
58211
58212 /* Set RO and NX regions for core */
58213 - set_section_ro_nx(mod->module_core,
58214 - mod->core_text_size,
58215 - mod->core_ro_size,
58216 - mod->core_size);
58217 + set_section_ro_nx(mod->module_core_rx,
58218 + mod->core_size_rx,
58219 + mod->core_size_rx,
58220 + mod->core_size_rx);
58221
58222 /* Set RO and NX regions for init */
58223 - set_section_ro_nx(mod->module_init,
58224 - mod->init_text_size,
58225 - mod->init_ro_size,
58226 - mod->init_size);
58227 + set_section_ro_nx(mod->module_init_rx,
58228 + mod->init_size_rx,
58229 + mod->init_size_rx,
58230 + mod->init_size_rx);
58231
58232 do_mod_ctors(mod);
58233 /* Start the module */
58234 @@ -2931,11 +3047,13 @@ SYSCALL_DEFINE3(init_module, void __user
58235 mod->symtab = mod->core_symtab;
58236 mod->strtab = mod->core_strtab;
58237 #endif
58238 - unset_section_ro_nx(mod, mod->module_init);
58239 - module_free(mod, mod->module_init);
58240 - mod->module_init = NULL;
58241 - mod->init_size = 0;
58242 - mod->init_text_size = 0;
58243 + unset_section_ro_nx(mod, mod->module_init_rx);
58244 + module_free(mod, mod->module_init_rw);
58245 + module_free_exec(mod, mod->module_init_rx);
58246 + mod->module_init_rw = NULL;
58247 + mod->module_init_rx = NULL;
58248 + mod->init_size_rw = 0;
58249 + mod->init_size_rx = 0;
58250 mutex_unlock(&module_mutex);
58251
58252 return 0;
58253 @@ -2966,10 +3084,16 @@ static const char *get_ksymbol(struct mo
58254 unsigned long nextval;
58255
58256 /* At worse, next value is at end of module */
58257 - if (within_module_init(addr, mod))
58258 - nextval = (unsigned long)mod->module_init+mod->init_text_size;
58259 + if (within_module_init_rx(addr, mod))
58260 + nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
58261 + else if (within_module_init_rw(addr, mod))
58262 + nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
58263 + else if (within_module_core_rx(addr, mod))
58264 + nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
58265 + else if (within_module_core_rw(addr, mod))
58266 + nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
58267 else
58268 - nextval = (unsigned long)mod->module_core+mod->core_text_size;
58269 + return NULL;
58270
58271 /* Scan for closest preceding symbol, and next symbol. (ELF
58272 starts real symbols at 1). */
58273 @@ -3215,7 +3339,7 @@ static int m_show(struct seq_file *m, vo
58274 char buf[8];
58275
58276 seq_printf(m, "%s %u",
58277 - mod->name, mod->init_size + mod->core_size);
58278 + mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
58279 print_unload_info(m, mod);
58280
58281 /* Informative for users. */
58282 @@ -3224,7 +3348,7 @@ static int m_show(struct seq_file *m, vo
58283 mod->state == MODULE_STATE_COMING ? "Loading":
58284 "Live");
58285 /* Used by oprofile and other similar tools. */
58286 - seq_printf(m, " 0x%pK", mod->module_core);
58287 + seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
58288
58289 /* Taints info */
58290 if (mod->taints)
58291 @@ -3260,7 +3384,17 @@ static const struct file_operations proc
58292
58293 static int __init proc_modules_init(void)
58294 {
58295 +#ifndef CONFIG_GRKERNSEC_HIDESYM
58296 +#ifdef CONFIG_GRKERNSEC_PROC_USER
58297 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
58298 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
58299 + proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
58300 +#else
58301 proc_create("modules", 0, NULL, &proc_modules_operations);
58302 +#endif
58303 +#else
58304 + proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
58305 +#endif
58306 return 0;
58307 }
58308 module_init(proc_modules_init);
58309 @@ -3319,12 +3453,12 @@ struct module *__module_address(unsigned
58310 {
58311 struct module *mod;
58312
58313 - if (addr < module_addr_min || addr > module_addr_max)
58314 + if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
58315 + (addr < module_addr_min_rw || addr > module_addr_max_rw))
58316 return NULL;
58317
58318 list_for_each_entry_rcu(mod, &modules, list)
58319 - if (within_module_core(addr, mod)
58320 - || within_module_init(addr, mod))
58321 + if (within_module_init(addr, mod) || within_module_core(addr, mod))
58322 return mod;
58323 return NULL;
58324 }
58325 @@ -3358,11 +3492,20 @@ bool is_module_text_address(unsigned lon
58326 */
58327 struct module *__module_text_address(unsigned long addr)
58328 {
58329 - struct module *mod = __module_address(addr);
58330 + struct module *mod;
58331 +
58332 +#ifdef CONFIG_X86_32
58333 + addr = ktla_ktva(addr);
58334 +#endif
58335 +
58336 + if (addr < module_addr_min_rx || addr > module_addr_max_rx)
58337 + return NULL;
58338 +
58339 + mod = __module_address(addr);
58340 +
58341 if (mod) {
58342 /* Make sure it's within the text section. */
58343 - if (!within(addr, mod->module_init, mod->init_text_size)
58344 - && !within(addr, mod->module_core, mod->core_text_size))
58345 + if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
58346 mod = NULL;
58347 }
58348 return mod;
58349 diff -urNp linux-2.6.39.4/kernel/mutex.c linux-2.6.39.4/kernel/mutex.c
58350 --- linux-2.6.39.4/kernel/mutex.c 2011-05-19 00:06:34.000000000 -0400
58351 +++ linux-2.6.39.4/kernel/mutex.c 2011-08-05 19:44:37.000000000 -0400
58352 @@ -160,7 +160,7 @@ __mutex_lock_common(struct mutex *lock,
58353 */
58354
58355 for (;;) {
58356 - struct thread_info *owner;
58357 + struct task_struct *owner;
58358
58359 /*
58360 * If we own the BKL, then don't spin. The owner of
58361 @@ -205,7 +205,7 @@ __mutex_lock_common(struct mutex *lock,
58362 spin_lock_mutex(&lock->wait_lock, flags);
58363
58364 debug_mutex_lock_common(lock, &waiter);
58365 - debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
58366 + debug_mutex_add_waiter(lock, &waiter, task);
58367
58368 /* add waiting tasks to the end of the waitqueue (FIFO): */
58369 list_add_tail(&waiter.list, &lock->wait_list);
58370 @@ -234,8 +234,7 @@ __mutex_lock_common(struct mutex *lock,
58371 * TASK_UNINTERRUPTIBLE case.)
58372 */
58373 if (unlikely(signal_pending_state(state, task))) {
58374 - mutex_remove_waiter(lock, &waiter,
58375 - task_thread_info(task));
58376 + mutex_remove_waiter(lock, &waiter, task);
58377 mutex_release(&lock->dep_map, 1, ip);
58378 spin_unlock_mutex(&lock->wait_lock, flags);
58379
58380 @@ -256,7 +255,7 @@ __mutex_lock_common(struct mutex *lock,
58381 done:
58382 lock_acquired(&lock->dep_map, ip);
58383 /* got the lock - rejoice! */
58384 - mutex_remove_waiter(lock, &waiter, current_thread_info());
58385 + mutex_remove_waiter(lock, &waiter, task);
58386 mutex_set_owner(lock);
58387
58388 /* set it to 0 if there are no waiters left: */
58389 diff -urNp linux-2.6.39.4/kernel/mutex-debug.c linux-2.6.39.4/kernel/mutex-debug.c
58390 --- linux-2.6.39.4/kernel/mutex-debug.c 2011-05-19 00:06:34.000000000 -0400
58391 +++ linux-2.6.39.4/kernel/mutex-debug.c 2011-08-05 19:44:37.000000000 -0400
58392 @@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
58393 }
58394
58395 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58396 - struct thread_info *ti)
58397 + struct task_struct *task)
58398 {
58399 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
58400
58401 /* Mark the current thread as blocked on the lock: */
58402 - ti->task->blocked_on = waiter;
58403 + task->blocked_on = waiter;
58404 }
58405
58406 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58407 - struct thread_info *ti)
58408 + struct task_struct *task)
58409 {
58410 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
58411 - DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
58412 - DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
58413 - ti->task->blocked_on = NULL;
58414 + DEBUG_LOCKS_WARN_ON(waiter->task != task);
58415 + DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
58416 + task->blocked_on = NULL;
58417
58418 list_del_init(&waiter->list);
58419 waiter->task = NULL;
58420 @@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
58421 return;
58422
58423 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
58424 - DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
58425 + DEBUG_LOCKS_WARN_ON(lock->owner != current);
58426 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
58427 mutex_clear_owner(lock);
58428 }
58429 diff -urNp linux-2.6.39.4/kernel/mutex-debug.h linux-2.6.39.4/kernel/mutex-debug.h
58430 --- linux-2.6.39.4/kernel/mutex-debug.h 2011-05-19 00:06:34.000000000 -0400
58431 +++ linux-2.6.39.4/kernel/mutex-debug.h 2011-08-05 19:44:37.000000000 -0400
58432 @@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
58433 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
58434 extern void debug_mutex_add_waiter(struct mutex *lock,
58435 struct mutex_waiter *waiter,
58436 - struct thread_info *ti);
58437 + struct task_struct *task);
58438 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
58439 - struct thread_info *ti);
58440 + struct task_struct *task);
58441 extern void debug_mutex_unlock(struct mutex *lock);
58442 extern void debug_mutex_init(struct mutex *lock, const char *name,
58443 struct lock_class_key *key);
58444
58445 static inline void mutex_set_owner(struct mutex *lock)
58446 {
58447 - lock->owner = current_thread_info();
58448 + lock->owner = current;
58449 }
58450
58451 static inline void mutex_clear_owner(struct mutex *lock)
58452 diff -urNp linux-2.6.39.4/kernel/mutex.h linux-2.6.39.4/kernel/mutex.h
58453 --- linux-2.6.39.4/kernel/mutex.h 2011-05-19 00:06:34.000000000 -0400
58454 +++ linux-2.6.39.4/kernel/mutex.h 2011-08-05 19:44:37.000000000 -0400
58455 @@ -19,7 +19,7 @@
58456 #ifdef CONFIG_SMP
58457 static inline void mutex_set_owner(struct mutex *lock)
58458 {
58459 - lock->owner = current_thread_info();
58460 + lock->owner = current;
58461 }
58462
58463 static inline void mutex_clear_owner(struct mutex *lock)
58464 diff -urNp linux-2.6.39.4/kernel/padata.c linux-2.6.39.4/kernel/padata.c
58465 --- linux-2.6.39.4/kernel/padata.c 2011-05-19 00:06:34.000000000 -0400
58466 +++ linux-2.6.39.4/kernel/padata.c 2011-08-05 19:44:37.000000000 -0400
58467 @@ -132,10 +132,10 @@ int padata_do_parallel(struct padata_ins
58468 padata->pd = pd;
58469 padata->cb_cpu = cb_cpu;
58470
58471 - if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
58472 - atomic_set(&pd->seq_nr, -1);
58473 + if (unlikely(atomic_read_unchecked(&pd->seq_nr) == pd->max_seq_nr))
58474 + atomic_set_unchecked(&pd->seq_nr, -1);
58475
58476 - padata->seq_nr = atomic_inc_return(&pd->seq_nr);
58477 + padata->seq_nr = atomic_inc_return_unchecked(&pd->seq_nr);
58478
58479 target_cpu = padata_cpu_hash(padata);
58480 queue = per_cpu_ptr(pd->pqueue, target_cpu);
58481 @@ -444,7 +444,7 @@ static struct parallel_data *padata_allo
58482 padata_init_pqueues(pd);
58483 padata_init_squeues(pd);
58484 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
58485 - atomic_set(&pd->seq_nr, -1);
58486 + atomic_set_unchecked(&pd->seq_nr, -1);
58487 atomic_set(&pd->reorder_objects, 0);
58488 atomic_set(&pd->refcnt, 0);
58489 pd->pinst = pinst;
58490 diff -urNp linux-2.6.39.4/kernel/panic.c linux-2.6.39.4/kernel/panic.c
58491 --- linux-2.6.39.4/kernel/panic.c 2011-05-19 00:06:34.000000000 -0400
58492 +++ linux-2.6.39.4/kernel/panic.c 2011-08-05 19:44:37.000000000 -0400
58493 @@ -369,7 +369,7 @@ static void warn_slowpath_common(const c
58494 const char *board;
58495
58496 printk(KERN_WARNING "------------[ cut here ]------------\n");
58497 - printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
58498 + printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
58499 board = dmi_get_system_info(DMI_PRODUCT_NAME);
58500 if (board)
58501 printk(KERN_WARNING "Hardware name: %s\n", board);
58502 @@ -424,7 +424,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
58503 */
58504 void __stack_chk_fail(void)
58505 {
58506 - panic("stack-protector: Kernel stack is corrupted in: %p\n",
58507 + dump_stack();
58508 + panic("stack-protector: Kernel stack is corrupted in: %pA\n",
58509 __builtin_return_address(0));
58510 }
58511 EXPORT_SYMBOL(__stack_chk_fail);
58512 diff -urNp linux-2.6.39.4/kernel/perf_event.c linux-2.6.39.4/kernel/perf_event.c
58513 --- linux-2.6.39.4/kernel/perf_event.c 2011-05-19 00:06:34.000000000 -0400
58514 +++ linux-2.6.39.4/kernel/perf_event.c 2011-08-05 20:34:06.000000000 -0400
58515 @@ -170,7 +170,7 @@ int perf_proc_update_handler(struct ctl_
58516 return 0;
58517 }
58518
58519 -static atomic64_t perf_event_id;
58520 +static atomic64_unchecked_t perf_event_id;
58521
58522 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
58523 enum event_type_t event_type);
58524 @@ -2496,7 +2496,7 @@ static void __perf_event_read(void *info
58525
58526 static inline u64 perf_event_count(struct perf_event *event)
58527 {
58528 - return local64_read(&event->count) + atomic64_read(&event->child_count);
58529 + return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
58530 }
58531
58532 static u64 perf_event_read(struct perf_event *event)
58533 @@ -3031,9 +3031,9 @@ u64 perf_event_read_value(struct perf_ev
58534 mutex_lock(&event->child_mutex);
58535 total += perf_event_read(event);
58536 *enabled += event->total_time_enabled +
58537 - atomic64_read(&event->child_total_time_enabled);
58538 + atomic64_read_unchecked(&event->child_total_time_enabled);
58539 *running += event->total_time_running +
58540 - atomic64_read(&event->child_total_time_running);
58541 + atomic64_read_unchecked(&event->child_total_time_running);
58542
58543 list_for_each_entry(child, &event->child_list, child_list) {
58544 total += perf_event_read(child);
58545 @@ -3396,10 +3396,10 @@ void perf_event_update_userpage(struct p
58546 userpg->offset -= local64_read(&event->hw.prev_count);
58547
58548 userpg->time_enabled = event->total_time_enabled +
58549 - atomic64_read(&event->child_total_time_enabled);
58550 + atomic64_read_unchecked(&event->child_total_time_enabled);
58551
58552 userpg->time_running = event->total_time_running +
58553 - atomic64_read(&event->child_total_time_running);
58554 + atomic64_read_unchecked(&event->child_total_time_running);
58555
58556 barrier();
58557 ++userpg->lock;
58558 @@ -4196,11 +4196,11 @@ static void perf_output_read_one(struct
58559 values[n++] = perf_event_count(event);
58560 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
58561 values[n++] = enabled +
58562 - atomic64_read(&event->child_total_time_enabled);
58563 + atomic64_read_unchecked(&event->child_total_time_enabled);
58564 }
58565 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
58566 values[n++] = running +
58567 - atomic64_read(&event->child_total_time_running);
58568 + atomic64_read_unchecked(&event->child_total_time_running);
58569 }
58570 if (read_format & PERF_FORMAT_ID)
58571 values[n++] = primary_event_id(event);
58572 @@ -6201,7 +6201,7 @@ perf_event_alloc(struct perf_event_attr
58573 event->parent = parent_event;
58574
58575 event->ns = get_pid_ns(current->nsproxy->pid_ns);
58576 - event->id = atomic64_inc_return(&perf_event_id);
58577 + event->id = atomic64_inc_return_unchecked(&perf_event_id);
58578
58579 event->state = PERF_EVENT_STATE_INACTIVE;
58580
58581 @@ -6724,10 +6724,10 @@ static void sync_child_event(struct perf
58582 /*
58583 * Add back the child's count to the parent's count:
58584 */
58585 - atomic64_add(child_val, &parent_event->child_count);
58586 - atomic64_add(child_event->total_time_enabled,
58587 + atomic64_add_unchecked(child_val, &parent_event->child_count);
58588 + atomic64_add_unchecked(child_event->total_time_enabled,
58589 &parent_event->child_total_time_enabled);
58590 - atomic64_add(child_event->total_time_running,
58591 + atomic64_add_unchecked(child_event->total_time_running,
58592 &parent_event->child_total_time_running);
58593
58594 /*
58595 diff -urNp linux-2.6.39.4/kernel/pid.c linux-2.6.39.4/kernel/pid.c
58596 --- linux-2.6.39.4/kernel/pid.c 2011-05-19 00:06:34.000000000 -0400
58597 +++ linux-2.6.39.4/kernel/pid.c 2011-08-05 19:44:37.000000000 -0400
58598 @@ -33,6 +33,7 @@
58599 #include <linux/rculist.h>
58600 #include <linux/bootmem.h>
58601 #include <linux/hash.h>
58602 +#include <linux/security.h>
58603 #include <linux/pid_namespace.h>
58604 #include <linux/init_task.h>
58605 #include <linux/syscalls.h>
58606 @@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
58607
58608 int pid_max = PID_MAX_DEFAULT;
58609
58610 -#define RESERVED_PIDS 300
58611 +#define RESERVED_PIDS 500
58612
58613 int pid_max_min = RESERVED_PIDS + 1;
58614 int pid_max_max = PID_MAX_LIMIT;
58615 @@ -419,8 +420,15 @@ EXPORT_SYMBOL(pid_task);
58616 */
58617 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
58618 {
58619 + struct task_struct *task;
58620 +
58621 rcu_lockdep_assert(rcu_read_lock_held());
58622 - return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
58623 + task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
58624 +
58625 + if (gr_pid_is_chrooted(task))
58626 + return NULL;
58627 +
58628 + return task;
58629 }
58630
58631 struct task_struct *find_task_by_vpid(pid_t vnr)
58632 @@ -428,6 +436,12 @@ struct task_struct *find_task_by_vpid(pi
58633 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
58634 }
58635
58636 +struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
58637 +{
58638 + rcu_lockdep_assert(rcu_read_lock_held());
58639 + return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
58640 +}
58641 +
58642 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
58643 {
58644 struct pid *pid;
58645 diff -urNp linux-2.6.39.4/kernel/posix-cpu-timers.c linux-2.6.39.4/kernel/posix-cpu-timers.c
58646 --- linux-2.6.39.4/kernel/posix-cpu-timers.c 2011-05-19 00:06:34.000000000 -0400
58647 +++ linux-2.6.39.4/kernel/posix-cpu-timers.c 2011-08-06 09:34:48.000000000 -0400
58648 @@ -6,6 +6,7 @@
58649 #include <linux/posix-timers.h>
58650 #include <linux/errno.h>
58651 #include <linux/math64.h>
58652 +#include <linux/security.h>
58653 #include <asm/uaccess.h>
58654 #include <linux/kernel_stat.h>
58655 #include <trace/events/timer.h>
58656 @@ -1604,14 +1605,14 @@ struct k_clock clock_posix_cpu = {
58657
58658 static __init int init_posix_cpu_timers(void)
58659 {
58660 - struct k_clock process = {
58661 + static struct k_clock process = {
58662 .clock_getres = process_cpu_clock_getres,
58663 .clock_get = process_cpu_clock_get,
58664 .timer_create = process_cpu_timer_create,
58665 .nsleep = process_cpu_nsleep,
58666 .nsleep_restart = process_cpu_nsleep_restart,
58667 };
58668 - struct k_clock thread = {
58669 + static struct k_clock thread = {
58670 .clock_getres = thread_cpu_clock_getres,
58671 .clock_get = thread_cpu_clock_get,
58672 .timer_create = thread_cpu_timer_create,
58673 diff -urNp linux-2.6.39.4/kernel/posix-timers.c linux-2.6.39.4/kernel/posix-timers.c
58674 --- linux-2.6.39.4/kernel/posix-timers.c 2011-05-19 00:06:34.000000000 -0400
58675 +++ linux-2.6.39.4/kernel/posix-timers.c 2011-08-06 09:30:46.000000000 -0400
58676 @@ -43,6 +43,7 @@
58677 #include <linux/idr.h>
58678 #include <linux/posix-clock.h>
58679 #include <linux/posix-timers.h>
58680 +#include <linux/grsecurity.h>
58681 #include <linux/syscalls.h>
58682 #include <linux/wait.h>
58683 #include <linux/workqueue.h>
58684 @@ -129,7 +130,7 @@ static DEFINE_SPINLOCK(idr_lock);
58685 * which we beg off on and pass to do_sys_settimeofday().
58686 */
58687
58688 -static struct k_clock posix_clocks[MAX_CLOCKS];
58689 +static struct k_clock *posix_clocks[MAX_CLOCKS];
58690
58691 /*
58692 * These ones are defined below.
58693 @@ -227,7 +228,7 @@ static int posix_get_boottime(const cloc
58694 */
58695 static __init int init_posix_timers(void)
58696 {
58697 - struct k_clock clock_realtime = {
58698 + static struct k_clock clock_realtime = {
58699 .clock_getres = hrtimer_get_res,
58700 .clock_get = posix_clock_realtime_get,
58701 .clock_set = posix_clock_realtime_set,
58702 @@ -239,7 +240,7 @@ static __init int init_posix_timers(void
58703 .timer_get = common_timer_get,
58704 .timer_del = common_timer_del,
58705 };
58706 - struct k_clock clock_monotonic = {
58707 + static struct k_clock clock_monotonic = {
58708 .clock_getres = hrtimer_get_res,
58709 .clock_get = posix_ktime_get_ts,
58710 .nsleep = common_nsleep,
58711 @@ -249,19 +250,19 @@ static __init int init_posix_timers(void
58712 .timer_get = common_timer_get,
58713 .timer_del = common_timer_del,
58714 };
58715 - struct k_clock clock_monotonic_raw = {
58716 + static struct k_clock clock_monotonic_raw = {
58717 .clock_getres = hrtimer_get_res,
58718 .clock_get = posix_get_monotonic_raw,
58719 };
58720 - struct k_clock clock_realtime_coarse = {
58721 + static struct k_clock clock_realtime_coarse = {
58722 .clock_getres = posix_get_coarse_res,
58723 .clock_get = posix_get_realtime_coarse,
58724 };
58725 - struct k_clock clock_monotonic_coarse = {
58726 + static struct k_clock clock_monotonic_coarse = {
58727 .clock_getres = posix_get_coarse_res,
58728 .clock_get = posix_get_monotonic_coarse,
58729 };
58730 - struct k_clock clock_boottime = {
58731 + static struct k_clock clock_boottime = {
58732 .clock_getres = hrtimer_get_res,
58733 .clock_get = posix_get_boottime,
58734 .nsleep = common_nsleep,
58735 @@ -272,6 +273,8 @@ static __init int init_posix_timers(void
58736 .timer_del = common_timer_del,
58737 };
58738
58739 + pax_track_stack();
58740 +
58741 posix_timers_register_clock(CLOCK_REALTIME, &clock_realtime);
58742 posix_timers_register_clock(CLOCK_MONOTONIC, &clock_monotonic);
58743 posix_timers_register_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
58744 @@ -473,7 +476,7 @@ void posix_timers_register_clock(const c
58745 return;
58746 }
58747
58748 - posix_clocks[clock_id] = *new_clock;
58749 + posix_clocks[clock_id] = new_clock;
58750 }
58751 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
58752
58753 @@ -512,9 +515,9 @@ static struct k_clock *clockid_to_kclock
58754 return (id & CLOCKFD_MASK) == CLOCKFD ?
58755 &clock_posix_dynamic : &clock_posix_cpu;
58756
58757 - if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
58758 + if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
58759 return NULL;
58760 - return &posix_clocks[id];
58761 + return posix_clocks[id];
58762 }
58763
58764 static int common_timer_create(struct k_itimer *new_timer)
58765 @@ -956,6 +959,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
58766 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
58767 return -EFAULT;
58768
58769 + /* only the CLOCK_REALTIME clock can be set, all other clocks
58770 + have their clock_set fptr set to a nosettime dummy function
58771 + CLOCK_REALTIME has a NULL clock_set fptr which causes it to
58772 + call common_clock_set, which calls do_sys_settimeofday, which
58773 + we hook
58774 + */
58775 +
58776 return kc->clock_set(which_clock, &new_tp);
58777 }
58778
58779 diff -urNp linux-2.6.39.4/kernel/power/poweroff.c linux-2.6.39.4/kernel/power/poweroff.c
58780 --- linux-2.6.39.4/kernel/power/poweroff.c 2011-05-19 00:06:34.000000000 -0400
58781 +++ linux-2.6.39.4/kernel/power/poweroff.c 2011-08-05 19:44:37.000000000 -0400
58782 @@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
58783 .enable_mask = SYSRQ_ENABLE_BOOT,
58784 };
58785
58786 -static int pm_sysrq_init(void)
58787 +static int __init pm_sysrq_init(void)
58788 {
58789 register_sysrq_key('o', &sysrq_poweroff_op);
58790 return 0;
58791 diff -urNp linux-2.6.39.4/kernel/power/process.c linux-2.6.39.4/kernel/power/process.c
58792 --- linux-2.6.39.4/kernel/power/process.c 2011-05-19 00:06:34.000000000 -0400
58793 +++ linux-2.6.39.4/kernel/power/process.c 2011-08-05 19:44:37.000000000 -0400
58794 @@ -41,6 +41,7 @@ static int try_to_freeze_tasks(bool sig_
58795 u64 elapsed_csecs64;
58796 unsigned int elapsed_csecs;
58797 bool wakeup = false;
58798 + bool timedout = false;
58799
58800 do_gettimeofday(&start);
58801
58802 @@ -51,6 +52,8 @@ static int try_to_freeze_tasks(bool sig_
58803
58804 while (true) {
58805 todo = 0;
58806 + if (time_after(jiffies, end_time))
58807 + timedout = true;
58808 read_lock(&tasklist_lock);
58809 do_each_thread(g, p) {
58810 if (frozen(p) || !freezable(p))
58811 @@ -71,9 +74,13 @@ static int try_to_freeze_tasks(bool sig_
58812 * try_to_stop() after schedule() in ptrace/signal
58813 * stop sees TIF_FREEZE.
58814 */
58815 - if (!task_is_stopped_or_traced(p) &&
58816 - !freezer_should_skip(p))
58817 + if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
58818 todo++;
58819 + if (timedout) {
58820 + printk(KERN_ERR "Task refusing to freeze:\n");
58821 + sched_show_task(p);
58822 + }
58823 + }
58824 } while_each_thread(g, p);
58825 read_unlock(&tasklist_lock);
58826
58827 @@ -82,7 +89,7 @@ static int try_to_freeze_tasks(bool sig_
58828 todo += wq_busy;
58829 }
58830
58831 - if (!todo || time_after(jiffies, end_time))
58832 + if (!todo || timedout)
58833 break;
58834
58835 if (pm_wakeup_pending()) {
58836 diff -urNp linux-2.6.39.4/kernel/printk.c linux-2.6.39.4/kernel/printk.c
58837 --- linux-2.6.39.4/kernel/printk.c 2011-05-19 00:06:34.000000000 -0400
58838 +++ linux-2.6.39.4/kernel/printk.c 2011-08-05 19:44:37.000000000 -0400
58839 @@ -284,12 +284,17 @@ static int check_syslog_permissions(int
58840 if (from_file && type != SYSLOG_ACTION_OPEN)
58841 return 0;
58842
58843 +#ifdef CONFIG_GRKERNSEC_DMESG
58844 + if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
58845 + return -EPERM;
58846 +#endif
58847 +
58848 if (syslog_action_restricted(type)) {
58849 if (capable(CAP_SYSLOG))
58850 return 0;
58851 /* For historical reasons, accept CAP_SYS_ADMIN too, with a warning */
58852 if (capable(CAP_SYS_ADMIN)) {
58853 - WARN_ONCE(1, "Attempt to access syslog with CAP_SYS_ADMIN "
58854 + printk_once(KERN_WARNING "Attempt to access syslog with CAP_SYS_ADMIN "
58855 "but no CAP_SYSLOG (deprecated).\n");
58856 return 0;
58857 }
58858 diff -urNp linux-2.6.39.4/kernel/profile.c linux-2.6.39.4/kernel/profile.c
58859 --- linux-2.6.39.4/kernel/profile.c 2011-05-19 00:06:34.000000000 -0400
58860 +++ linux-2.6.39.4/kernel/profile.c 2011-08-05 19:44:37.000000000 -0400
58861 @@ -39,7 +39,7 @@ struct profile_hit {
58862 /* Oprofile timer tick hook */
58863 static int (*timer_hook)(struct pt_regs *) __read_mostly;
58864
58865 -static atomic_t *prof_buffer;
58866 +static atomic_unchecked_t *prof_buffer;
58867 static unsigned long prof_len, prof_shift;
58868
58869 int prof_on __read_mostly;
58870 @@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
58871 hits[i].pc = 0;
58872 continue;
58873 }
58874 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
58875 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
58876 hits[i].hits = hits[i].pc = 0;
58877 }
58878 }
58879 @@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
58880 * Add the current hit(s) and flush the write-queue out
58881 * to the global buffer:
58882 */
58883 - atomic_add(nr_hits, &prof_buffer[pc]);
58884 + atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
58885 for (i = 0; i < NR_PROFILE_HIT; ++i) {
58886 - atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
58887 + atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
58888 hits[i].pc = hits[i].hits = 0;
58889 }
58890 out:
58891 @@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
58892 if (prof_on != type || !prof_buffer)
58893 return;
58894 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
58895 - atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
58896 + atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
58897 }
58898 #endif /* !CONFIG_SMP */
58899 EXPORT_SYMBOL_GPL(profile_hits);
58900 @@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
58901 return -EFAULT;
58902 buf++; p++; count--; read++;
58903 }
58904 - pnt = (char *)prof_buffer + p - sizeof(atomic_t);
58905 + pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
58906 if (copy_to_user(buf, (void *)pnt, count))
58907 return -EFAULT;
58908 read += count;
58909 @@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
58910 }
58911 #endif
58912 profile_discard_flip_buffers();
58913 - memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
58914 + memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
58915 return count;
58916 }
58917
58918 diff -urNp linux-2.6.39.4/kernel/ptrace.c linux-2.6.39.4/kernel/ptrace.c
58919 --- linux-2.6.39.4/kernel/ptrace.c 2011-05-19 00:06:34.000000000 -0400
58920 +++ linux-2.6.39.4/kernel/ptrace.c 2011-08-05 19:44:37.000000000 -0400
58921 @@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
58922 return ret;
58923 }
58924
58925 -int __ptrace_may_access(struct task_struct *task, unsigned int mode)
58926 +static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
58927 + unsigned int log)
58928 {
58929 const struct cred *cred = current_cred(), *tcred;
58930
58931 @@ -143,7 +144,8 @@ int __ptrace_may_access(struct task_stru
58932 cred->gid == tcred->sgid &&
58933 cred->gid == tcred->gid))
58934 goto ok;
58935 - if (ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE))
58936 + if ((!log && ns_capable_nolog(tcred->user->user_ns, CAP_SYS_PTRACE)) ||
58937 + (log && ns_capable(tcred->user->user_ns, CAP_SYS_PTRACE)))
58938 goto ok;
58939 rcu_read_unlock();
58940 return -EPERM;
58941 @@ -152,7 +154,9 @@ ok:
58942 smp_rmb();
58943 if (task->mm)
58944 dumpable = get_dumpable(task->mm);
58945 - if (!dumpable && !task_ns_capable(task, CAP_SYS_PTRACE))
58946 + if (!dumpable &&
58947 + ((!log && !task_ns_capable_nolog(task, CAP_SYS_PTRACE)) ||
58948 + (log && !task_ns_capable(task, CAP_SYS_PTRACE))))
58949 return -EPERM;
58950
58951 return security_ptrace_access_check(task, mode);
58952 @@ -162,7 +166,16 @@ bool ptrace_may_access(struct task_struc
58953 {
58954 int err;
58955 task_lock(task);
58956 - err = __ptrace_may_access(task, mode);
58957 + err = __ptrace_may_access(task, mode, 0);
58958 + task_unlock(task);
58959 + return !err;
58960 +}
58961 +
58962 +bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
58963 +{
58964 + int err;
58965 + task_lock(task);
58966 + err = __ptrace_may_access(task, mode, 1);
58967 task_unlock(task);
58968 return !err;
58969 }
58970 @@ -189,7 +202,7 @@ static int ptrace_attach(struct task_str
58971 goto out;
58972
58973 task_lock(task);
58974 - retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
58975 + retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
58976 task_unlock(task);
58977 if (retval)
58978 goto unlock_creds;
58979 @@ -202,7 +215,7 @@ static int ptrace_attach(struct task_str
58980 goto unlock_tasklist;
58981
58982 task->ptrace = PT_PTRACED;
58983 - if (task_ns_capable(task, CAP_SYS_PTRACE))
58984 + if (task_ns_capable_nolog(task, CAP_SYS_PTRACE))
58985 task->ptrace |= PT_PTRACE_CAP;
58986
58987 __ptrace_link(task, current);
58988 @@ -362,6 +375,8 @@ int ptrace_readdata(struct task_struct *
58989 {
58990 int copied = 0;
58991
58992 + pax_track_stack();
58993 +
58994 while (len > 0) {
58995 char buf[128];
58996 int this_len, retval;
58997 @@ -373,7 +388,7 @@ int ptrace_readdata(struct task_struct *
58998 break;
58999 return -EIO;
59000 }
59001 - if (copy_to_user(dst, buf, retval))
59002 + if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
59003 return -EFAULT;
59004 copied += retval;
59005 src += retval;
59006 @@ -387,6 +402,8 @@ int ptrace_writedata(struct task_struct
59007 {
59008 int copied = 0;
59009
59010 + pax_track_stack();
59011 +
59012 while (len > 0) {
59013 char buf[128];
59014 int this_len, retval;
59015 @@ -569,9 +586,11 @@ int ptrace_request(struct task_struct *c
59016 {
59017 int ret = -EIO;
59018 siginfo_t siginfo;
59019 - void __user *datavp = (void __user *) data;
59020 + void __user *datavp = (__force void __user *) data;
59021 unsigned long __user *datalp = datavp;
59022
59023 + pax_track_stack();
59024 +
59025 switch (request) {
59026 case PTRACE_PEEKTEXT:
59027 case PTRACE_PEEKDATA:
59028 @@ -717,14 +736,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
59029 goto out;
59030 }
59031
59032 + if (gr_handle_ptrace(child, request)) {
59033 + ret = -EPERM;
59034 + goto out_put_task_struct;
59035 + }
59036 +
59037 if (request == PTRACE_ATTACH) {
59038 ret = ptrace_attach(child);
59039 /*
59040 * Some architectures need to do book-keeping after
59041 * a ptrace attach.
59042 */
59043 - if (!ret)
59044 + if (!ret) {
59045 arch_ptrace_attach(child);
59046 + gr_audit_ptrace(child);
59047 + }
59048 goto out_put_task_struct;
59049 }
59050
59051 @@ -749,7 +775,7 @@ int generic_ptrace_peekdata(struct task_
59052 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
59053 if (copied != sizeof(tmp))
59054 return -EIO;
59055 - return put_user(tmp, (unsigned long __user *)data);
59056 + return put_user(tmp, (__force unsigned long __user *)data);
59057 }
59058
59059 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
59060 @@ -772,6 +798,8 @@ int compat_ptrace_request(struct task_st
59061 siginfo_t siginfo;
59062 int ret;
59063
59064 + pax_track_stack();
59065 +
59066 switch (request) {
59067 case PTRACE_PEEKTEXT:
59068 case PTRACE_PEEKDATA:
59069 @@ -859,14 +887,21 @@ asmlinkage long compat_sys_ptrace(compat
59070 goto out;
59071 }
59072
59073 + if (gr_handle_ptrace(child, request)) {
59074 + ret = -EPERM;
59075 + goto out_put_task_struct;
59076 + }
59077 +
59078 if (request == PTRACE_ATTACH) {
59079 ret = ptrace_attach(child);
59080 /*
59081 * Some architectures need to do book-keeping after
59082 * a ptrace attach.
59083 */
59084 - if (!ret)
59085 + if (!ret) {
59086 arch_ptrace_attach(child);
59087 + gr_audit_ptrace(child);
59088 + }
59089 goto out_put_task_struct;
59090 }
59091
59092 diff -urNp linux-2.6.39.4/kernel/rcutorture.c linux-2.6.39.4/kernel/rcutorture.c
59093 --- linux-2.6.39.4/kernel/rcutorture.c 2011-05-19 00:06:34.000000000 -0400
59094 +++ linux-2.6.39.4/kernel/rcutorture.c 2011-08-05 19:44:37.000000000 -0400
59095 @@ -138,12 +138,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
59096 { 0 };
59097 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
59098 { 0 };
59099 -static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
59100 -static atomic_t n_rcu_torture_alloc;
59101 -static atomic_t n_rcu_torture_alloc_fail;
59102 -static atomic_t n_rcu_torture_free;
59103 -static atomic_t n_rcu_torture_mberror;
59104 -static atomic_t n_rcu_torture_error;
59105 +static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
59106 +static atomic_unchecked_t n_rcu_torture_alloc;
59107 +static atomic_unchecked_t n_rcu_torture_alloc_fail;
59108 +static atomic_unchecked_t n_rcu_torture_free;
59109 +static atomic_unchecked_t n_rcu_torture_mberror;
59110 +static atomic_unchecked_t n_rcu_torture_error;
59111 static long n_rcu_torture_boost_ktrerror;
59112 static long n_rcu_torture_boost_rterror;
59113 static long n_rcu_torture_boost_allocerror;
59114 @@ -225,11 +225,11 @@ rcu_torture_alloc(void)
59115
59116 spin_lock_bh(&rcu_torture_lock);
59117 if (list_empty(&rcu_torture_freelist)) {
59118 - atomic_inc(&n_rcu_torture_alloc_fail);
59119 + atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
59120 spin_unlock_bh(&rcu_torture_lock);
59121 return NULL;
59122 }
59123 - atomic_inc(&n_rcu_torture_alloc);
59124 + atomic_inc_unchecked(&n_rcu_torture_alloc);
59125 p = rcu_torture_freelist.next;
59126 list_del_init(p);
59127 spin_unlock_bh(&rcu_torture_lock);
59128 @@ -242,7 +242,7 @@ rcu_torture_alloc(void)
59129 static void
59130 rcu_torture_free(struct rcu_torture *p)
59131 {
59132 - atomic_inc(&n_rcu_torture_free);
59133 + atomic_inc_unchecked(&n_rcu_torture_free);
59134 spin_lock_bh(&rcu_torture_lock);
59135 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
59136 spin_unlock_bh(&rcu_torture_lock);
59137 @@ -362,7 +362,7 @@ rcu_torture_cb(struct rcu_head *p)
59138 i = rp->rtort_pipe_count;
59139 if (i > RCU_TORTURE_PIPE_LEN)
59140 i = RCU_TORTURE_PIPE_LEN;
59141 - atomic_inc(&rcu_torture_wcount[i]);
59142 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
59143 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
59144 rp->rtort_mbtest = 0;
59145 rcu_torture_free(rp);
59146 @@ -409,7 +409,7 @@ static void rcu_sync_torture_deferred_fr
59147 i = rp->rtort_pipe_count;
59148 if (i > RCU_TORTURE_PIPE_LEN)
59149 i = RCU_TORTURE_PIPE_LEN;
59150 - atomic_inc(&rcu_torture_wcount[i]);
59151 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
59152 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
59153 rp->rtort_mbtest = 0;
59154 list_del(&rp->rtort_free);
59155 @@ -882,7 +882,7 @@ rcu_torture_writer(void *arg)
59156 i = old_rp->rtort_pipe_count;
59157 if (i > RCU_TORTURE_PIPE_LEN)
59158 i = RCU_TORTURE_PIPE_LEN;
59159 - atomic_inc(&rcu_torture_wcount[i]);
59160 + atomic_inc_unchecked(&rcu_torture_wcount[i]);
59161 old_rp->rtort_pipe_count++;
59162 cur_ops->deferred_free(old_rp);
59163 }
59164 @@ -951,7 +951,7 @@ static void rcu_torture_timer(unsigned l
59165 return;
59166 }
59167 if (p->rtort_mbtest == 0)
59168 - atomic_inc(&n_rcu_torture_mberror);
59169 + atomic_inc_unchecked(&n_rcu_torture_mberror);
59170 spin_lock(&rand_lock);
59171 cur_ops->read_delay(&rand);
59172 n_rcu_torture_timers++;
59173 @@ -1013,7 +1013,7 @@ rcu_torture_reader(void *arg)
59174 continue;
59175 }
59176 if (p->rtort_mbtest == 0)
59177 - atomic_inc(&n_rcu_torture_mberror);
59178 + atomic_inc_unchecked(&n_rcu_torture_mberror);
59179 cur_ops->read_delay(&rand);
59180 preempt_disable();
59181 pipe_count = p->rtort_pipe_count;
59182 @@ -1072,10 +1072,10 @@ rcu_torture_printk(char *page)
59183 rcu_torture_current,
59184 rcu_torture_current_version,
59185 list_empty(&rcu_torture_freelist),
59186 - atomic_read(&n_rcu_torture_alloc),
59187 - atomic_read(&n_rcu_torture_alloc_fail),
59188 - atomic_read(&n_rcu_torture_free),
59189 - atomic_read(&n_rcu_torture_mberror),
59190 + atomic_read_unchecked(&n_rcu_torture_alloc),
59191 + atomic_read_unchecked(&n_rcu_torture_alloc_fail),
59192 + atomic_read_unchecked(&n_rcu_torture_free),
59193 + atomic_read_unchecked(&n_rcu_torture_mberror),
59194 n_rcu_torture_boost_ktrerror,
59195 n_rcu_torture_boost_rterror,
59196 n_rcu_torture_boost_allocerror,
59197 @@ -1083,7 +1083,7 @@ rcu_torture_printk(char *page)
59198 n_rcu_torture_boost_failure,
59199 n_rcu_torture_boosts,
59200 n_rcu_torture_timers);
59201 - if (atomic_read(&n_rcu_torture_mberror) != 0 ||
59202 + if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
59203 n_rcu_torture_boost_ktrerror != 0 ||
59204 n_rcu_torture_boost_rterror != 0 ||
59205 n_rcu_torture_boost_allocerror != 0 ||
59206 @@ -1093,7 +1093,7 @@ rcu_torture_printk(char *page)
59207 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
59208 if (i > 1) {
59209 cnt += sprintf(&page[cnt], "!!! ");
59210 - atomic_inc(&n_rcu_torture_error);
59211 + atomic_inc_unchecked(&n_rcu_torture_error);
59212 WARN_ON_ONCE(1);
59213 }
59214 cnt += sprintf(&page[cnt], "Reader Pipe: ");
59215 @@ -1107,7 +1107,7 @@ rcu_torture_printk(char *page)
59216 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
59217 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
59218 cnt += sprintf(&page[cnt], " %d",
59219 - atomic_read(&rcu_torture_wcount[i]));
59220 + atomic_read_unchecked(&rcu_torture_wcount[i]));
59221 }
59222 cnt += sprintf(&page[cnt], "\n");
59223 if (cur_ops->stats)
59224 @@ -1415,7 +1415,7 @@ rcu_torture_cleanup(void)
59225
59226 if (cur_ops->cleanup)
59227 cur_ops->cleanup();
59228 - if (atomic_read(&n_rcu_torture_error))
59229 + if (atomic_read_unchecked(&n_rcu_torture_error))
59230 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
59231 else
59232 rcu_torture_print_module_parms(cur_ops, "End of test: SUCCESS");
59233 @@ -1479,11 +1479,11 @@ rcu_torture_init(void)
59234
59235 rcu_torture_current = NULL;
59236 rcu_torture_current_version = 0;
59237 - atomic_set(&n_rcu_torture_alloc, 0);
59238 - atomic_set(&n_rcu_torture_alloc_fail, 0);
59239 - atomic_set(&n_rcu_torture_free, 0);
59240 - atomic_set(&n_rcu_torture_mberror, 0);
59241 - atomic_set(&n_rcu_torture_error, 0);
59242 + atomic_set_unchecked(&n_rcu_torture_alloc, 0);
59243 + atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
59244 + atomic_set_unchecked(&n_rcu_torture_free, 0);
59245 + atomic_set_unchecked(&n_rcu_torture_mberror, 0);
59246 + atomic_set_unchecked(&n_rcu_torture_error, 0);
59247 n_rcu_torture_boost_ktrerror = 0;
59248 n_rcu_torture_boost_rterror = 0;
59249 n_rcu_torture_boost_allocerror = 0;
59250 @@ -1491,7 +1491,7 @@ rcu_torture_init(void)
59251 n_rcu_torture_boost_failure = 0;
59252 n_rcu_torture_boosts = 0;
59253 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
59254 - atomic_set(&rcu_torture_wcount[i], 0);
59255 + atomic_set_unchecked(&rcu_torture_wcount[i], 0);
59256 for_each_possible_cpu(cpu) {
59257 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
59258 per_cpu(rcu_torture_count, cpu)[i] = 0;
59259 diff -urNp linux-2.6.39.4/kernel/rcutree.c linux-2.6.39.4/kernel/rcutree.c
59260 --- linux-2.6.39.4/kernel/rcutree.c 2011-05-19 00:06:34.000000000 -0400
59261 +++ linux-2.6.39.4/kernel/rcutree.c 2011-08-05 19:44:37.000000000 -0400
59262 @@ -1389,7 +1389,7 @@ __rcu_process_callbacks(struct rcu_state
59263 /*
59264 * Do softirq processing for the current CPU.
59265 */
59266 -static void rcu_process_callbacks(struct softirq_action *unused)
59267 +static void rcu_process_callbacks(void)
59268 {
59269 /*
59270 * Memory references from any prior RCU read-side critical sections
59271 diff -urNp linux-2.6.39.4/kernel/rcutree_plugin.h linux-2.6.39.4/kernel/rcutree_plugin.h
59272 --- linux-2.6.39.4/kernel/rcutree_plugin.h 2011-05-19 00:06:34.000000000 -0400
59273 +++ linux-2.6.39.4/kernel/rcutree_plugin.h 2011-08-05 19:44:37.000000000 -0400
59274 @@ -730,7 +730,7 @@ void synchronize_rcu_expedited(void)
59275
59276 /* Clean up and exit. */
59277 smp_mb(); /* ensure expedited GP seen before counter increment. */
59278 - ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
59279 + ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
59280 unlock_mb_ret:
59281 mutex_unlock(&sync_rcu_preempt_exp_mutex);
59282 mb_ret:
59283 @@ -1025,8 +1025,8 @@ EXPORT_SYMBOL_GPL(synchronize_sched_expe
59284
59285 #else /* #ifndef CONFIG_SMP */
59286
59287 -static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
59288 -static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
59289 +static atomic_unchecked_t sync_sched_expedited_started = ATOMIC_INIT(0);
59290 +static atomic_unchecked_t sync_sched_expedited_done = ATOMIC_INIT(0);
59291
59292 static int synchronize_sched_expedited_cpu_stop(void *data)
59293 {
59294 @@ -1081,7 +1081,7 @@ void synchronize_sched_expedited(void)
59295 int firstsnap, s, snap, trycount = 0;
59296
59297 /* Note that atomic_inc_return() implies full memory barrier. */
59298 - firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started);
59299 + firstsnap = snap = atomic_inc_return_unchecked(&sync_sched_expedited_started);
59300 get_online_cpus();
59301
59302 /*
59303 @@ -1102,7 +1102,7 @@ void synchronize_sched_expedited(void)
59304 }
59305
59306 /* Check to see if someone else did our work for us. */
59307 - s = atomic_read(&sync_sched_expedited_done);
59308 + s = atomic_read_unchecked(&sync_sched_expedited_done);
59309 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) {
59310 smp_mb(); /* ensure test happens before caller kfree */
59311 return;
59312 @@ -1117,7 +1117,7 @@ void synchronize_sched_expedited(void)
59313 * grace period works for us.
59314 */
59315 get_online_cpus();
59316 - snap = atomic_read(&sync_sched_expedited_started) - 1;
59317 + snap = atomic_read_unchecked(&sync_sched_expedited_started) - 1;
59318 smp_mb(); /* ensure read is before try_stop_cpus(). */
59319 }
59320
59321 @@ -1128,12 +1128,12 @@ void synchronize_sched_expedited(void)
59322 * than we did beat us to the punch.
59323 */
59324 do {
59325 - s = atomic_read(&sync_sched_expedited_done);
59326 + s = atomic_read_unchecked(&sync_sched_expedited_done);
59327 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) {
59328 smp_mb(); /* ensure test happens before caller kfree */
59329 break;
59330 }
59331 - } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s);
59332 + } while (atomic_cmpxchg_unchecked(&sync_sched_expedited_done, s, snap) != s);
59333
59334 put_online_cpus();
59335 }
59336 diff -urNp linux-2.6.39.4/kernel/relay.c linux-2.6.39.4/kernel/relay.c
59337 --- linux-2.6.39.4/kernel/relay.c 2011-05-19 00:06:34.000000000 -0400
59338 +++ linux-2.6.39.4/kernel/relay.c 2011-08-05 19:44:37.000000000 -0400
59339 @@ -1236,6 +1236,8 @@ static ssize_t subbuf_splice_actor(struc
59340 };
59341 ssize_t ret;
59342
59343 + pax_track_stack();
59344 +
59345 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
59346 return 0;
59347 if (splice_grow_spd(pipe, &spd))
59348 diff -urNp linux-2.6.39.4/kernel/resource.c linux-2.6.39.4/kernel/resource.c
59349 --- linux-2.6.39.4/kernel/resource.c 2011-05-19 00:06:34.000000000 -0400
59350 +++ linux-2.6.39.4/kernel/resource.c 2011-08-05 19:44:37.000000000 -0400
59351 @@ -133,8 +133,18 @@ static const struct file_operations proc
59352
59353 static int __init ioresources_init(void)
59354 {
59355 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
59356 +#ifdef CONFIG_GRKERNSEC_PROC_USER
59357 + proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
59358 + proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
59359 +#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59360 + proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
59361 + proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
59362 +#endif
59363 +#else
59364 proc_create("ioports", 0, NULL, &proc_ioports_operations);
59365 proc_create("iomem", 0, NULL, &proc_iomem_operations);
59366 +#endif
59367 return 0;
59368 }
59369 __initcall(ioresources_init);
59370 diff -urNp linux-2.6.39.4/kernel/rtmutex-tester.c linux-2.6.39.4/kernel/rtmutex-tester.c
59371 --- linux-2.6.39.4/kernel/rtmutex-tester.c 2011-05-19 00:06:34.000000000 -0400
59372 +++ linux-2.6.39.4/kernel/rtmutex-tester.c 2011-08-05 19:44:37.000000000 -0400
59373 @@ -20,7 +20,7 @@
59374 #define MAX_RT_TEST_MUTEXES 8
59375
59376 static spinlock_t rttest_lock;
59377 -static atomic_t rttest_event;
59378 +static atomic_unchecked_t rttest_event;
59379
59380 struct test_thread_data {
59381 int opcode;
59382 @@ -61,7 +61,7 @@ static int handle_op(struct test_thread_
59383
59384 case RTTEST_LOCKCONT:
59385 td->mutexes[td->opdata] = 1;
59386 - td->event = atomic_add_return(1, &rttest_event);
59387 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59388 return 0;
59389
59390 case RTTEST_RESET:
59391 @@ -74,7 +74,7 @@ static int handle_op(struct test_thread_
59392 return 0;
59393
59394 case RTTEST_RESETEVENT:
59395 - atomic_set(&rttest_event, 0);
59396 + atomic_set_unchecked(&rttest_event, 0);
59397 return 0;
59398
59399 default:
59400 @@ -91,9 +91,9 @@ static int handle_op(struct test_thread_
59401 return ret;
59402
59403 td->mutexes[id] = 1;
59404 - td->event = atomic_add_return(1, &rttest_event);
59405 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59406 rt_mutex_lock(&mutexes[id]);
59407 - td->event = atomic_add_return(1, &rttest_event);
59408 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59409 td->mutexes[id] = 4;
59410 return 0;
59411
59412 @@ -104,9 +104,9 @@ static int handle_op(struct test_thread_
59413 return ret;
59414
59415 td->mutexes[id] = 1;
59416 - td->event = atomic_add_return(1, &rttest_event);
59417 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59418 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
59419 - td->event = atomic_add_return(1, &rttest_event);
59420 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59421 td->mutexes[id] = ret ? 0 : 4;
59422 return ret ? -EINTR : 0;
59423
59424 @@ -115,9 +115,9 @@ static int handle_op(struct test_thread_
59425 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
59426 return ret;
59427
59428 - td->event = atomic_add_return(1, &rttest_event);
59429 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59430 rt_mutex_unlock(&mutexes[id]);
59431 - td->event = atomic_add_return(1, &rttest_event);
59432 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59433 td->mutexes[id] = 0;
59434 return 0;
59435
59436 @@ -164,7 +164,7 @@ void schedule_rt_mutex_test(struct rt_mu
59437 break;
59438
59439 td->mutexes[dat] = 2;
59440 - td->event = atomic_add_return(1, &rttest_event);
59441 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59442 break;
59443
59444 default:
59445 @@ -184,7 +184,7 @@ void schedule_rt_mutex_test(struct rt_mu
59446 return;
59447
59448 td->mutexes[dat] = 3;
59449 - td->event = atomic_add_return(1, &rttest_event);
59450 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59451 break;
59452
59453 case RTTEST_LOCKNOWAIT:
59454 @@ -196,7 +196,7 @@ void schedule_rt_mutex_test(struct rt_mu
59455 return;
59456
59457 td->mutexes[dat] = 1;
59458 - td->event = atomic_add_return(1, &rttest_event);
59459 + td->event = atomic_add_return_unchecked(1, &rttest_event);
59460 return;
59461
59462 default:
59463 diff -urNp linux-2.6.39.4/kernel/sched_autogroup.c linux-2.6.39.4/kernel/sched_autogroup.c
59464 --- linux-2.6.39.4/kernel/sched_autogroup.c 2011-05-19 00:06:34.000000000 -0400
59465 +++ linux-2.6.39.4/kernel/sched_autogroup.c 2011-08-05 19:44:37.000000000 -0400
59466 @@ -7,7 +7,7 @@
59467
59468 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
59469 static struct autogroup autogroup_default;
59470 -static atomic_t autogroup_seq_nr;
59471 +static atomic_unchecked_t autogroup_seq_nr;
59472
59473 static void __init autogroup_init(struct task_struct *init_task)
59474 {
59475 @@ -78,7 +78,7 @@ static inline struct autogroup *autogrou
59476
59477 kref_init(&ag->kref);
59478 init_rwsem(&ag->lock);
59479 - ag->id = atomic_inc_return(&autogroup_seq_nr);
59480 + ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
59481 ag->tg = tg;
59482 #ifdef CONFIG_RT_GROUP_SCHED
59483 /*
59484 diff -urNp linux-2.6.39.4/kernel/sched.c linux-2.6.39.4/kernel/sched.c
59485 --- linux-2.6.39.4/kernel/sched.c 2011-05-19 00:06:34.000000000 -0400
59486 +++ linux-2.6.39.4/kernel/sched.c 2011-08-05 19:44:37.000000000 -0400
59487 @@ -4078,6 +4078,8 @@ asmlinkage void __sched schedule(void)
59488 struct rq *rq;
59489 int cpu;
59490
59491 + pax_track_stack();
59492 +
59493 need_resched:
59494 preempt_disable();
59495 cpu = smp_processor_id();
59496 @@ -4165,7 +4167,7 @@ EXPORT_SYMBOL(schedule);
59497 * Look out! "owner" is an entirely speculative pointer
59498 * access and not reliable.
59499 */
59500 -int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
59501 +int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
59502 {
59503 unsigned int cpu;
59504 struct rq *rq;
59505 @@ -4179,10 +4181,10 @@ int mutex_spin_on_owner(struct mutex *lo
59506 * DEBUG_PAGEALLOC could have unmapped it if
59507 * the mutex owner just released it and exited.
59508 */
59509 - if (probe_kernel_address(&owner->cpu, cpu))
59510 + if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
59511 return 0;
59512 #else
59513 - cpu = owner->cpu;
59514 + cpu = task_thread_info(owner)->cpu;
59515 #endif
59516
59517 /*
59518 @@ -4219,7 +4221,7 @@ int mutex_spin_on_owner(struct mutex *lo
59519 /*
59520 * Is that owner really running on that cpu?
59521 */
59522 - if (task_thread_info(rq->curr) != owner || need_resched())
59523 + if (rq->curr != owner || need_resched())
59524 return 0;
59525
59526 arch_mutex_cpu_relax();
59527 @@ -4778,6 +4780,8 @@ int can_nice(const struct task_struct *p
59528 /* convert nice value [19,-20] to rlimit style value [1,40] */
59529 int nice_rlim = 20 - nice;
59530
59531 + gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
59532 +
59533 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
59534 capable(CAP_SYS_NICE));
59535 }
59536 @@ -4811,7 +4815,8 @@ SYSCALL_DEFINE1(nice, int, increment)
59537 if (nice > 19)
59538 nice = 19;
59539
59540 - if (increment < 0 && !can_nice(current, nice))
59541 + if (increment < 0 && (!can_nice(current, nice) ||
59542 + gr_handle_chroot_nice()))
59543 return -EPERM;
59544
59545 retval = security_task_setnice(current, nice);
59546 @@ -4957,6 +4962,7 @@ recheck:
59547 unsigned long rlim_rtprio =
59548 task_rlimit(p, RLIMIT_RTPRIO);
59549
59550 + gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
59551 /* can't set/change the rt policy */
59552 if (policy != p->policy && !rlim_rtprio)
59553 return -EPERM;
59554 @@ -7164,7 +7170,7 @@ static void init_sched_groups_power(int
59555 long power;
59556 int weight;
59557
59558 - WARN_ON(!sd || !sd->groups);
59559 + BUG_ON(!sd || !sd->groups);
59560
59561 if (cpu != group_first_cpu(sd->groups))
59562 return;
59563 diff -urNp linux-2.6.39.4/kernel/sched_fair.c linux-2.6.39.4/kernel/sched_fair.c
59564 --- linux-2.6.39.4/kernel/sched_fair.c 2011-05-19 00:06:34.000000000 -0400
59565 +++ linux-2.6.39.4/kernel/sched_fair.c 2011-08-05 19:44:37.000000000 -0400
59566 @@ -3999,7 +3999,7 @@ static void nohz_idle_balance(int this_c
59567 * run_rebalance_domains is triggered when needed from the scheduler tick.
59568 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
59569 */
59570 -static void run_rebalance_domains(struct softirq_action *h)
59571 +static void run_rebalance_domains(void)
59572 {
59573 int this_cpu = smp_processor_id();
59574 struct rq *this_rq = cpu_rq(this_cpu);
59575 diff -urNp linux-2.6.39.4/kernel/signal.c linux-2.6.39.4/kernel/signal.c
59576 --- linux-2.6.39.4/kernel/signal.c 2011-05-19 00:06:34.000000000 -0400
59577 +++ linux-2.6.39.4/kernel/signal.c 2011-08-16 21:16:33.000000000 -0400
59578 @@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache
59579
59580 int print_fatal_signals __read_mostly;
59581
59582 -static void __user *sig_handler(struct task_struct *t, int sig)
59583 +static __sighandler_t sig_handler(struct task_struct *t, int sig)
59584 {
59585 return t->sighand->action[sig - 1].sa.sa_handler;
59586 }
59587
59588 -static int sig_handler_ignored(void __user *handler, int sig)
59589 +static int sig_handler_ignored(__sighandler_t handler, int sig)
59590 {
59591 /* Is it explicitly or implicitly ignored? */
59592 return handler == SIG_IGN ||
59593 @@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us
59594 static int sig_task_ignored(struct task_struct *t, int sig,
59595 int from_ancestor_ns)
59596 {
59597 - void __user *handler;
59598 + __sighandler_t handler;
59599
59600 handler = sig_handler(t, sig);
59601
59602 @@ -243,6 +243,9 @@ __sigqueue_alloc(int sig, struct task_st
59603 atomic_inc(&user->sigpending);
59604 rcu_read_unlock();
59605
59606 + if (!override_rlimit)
59607 + gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
59608 +
59609 if (override_rlimit ||
59610 atomic_read(&user->sigpending) <=
59611 task_rlimit(t, RLIMIT_SIGPENDING)) {
59612 @@ -367,7 +370,7 @@ flush_signal_handlers(struct task_struct
59613
59614 int unhandled_signal(struct task_struct *tsk, int sig)
59615 {
59616 - void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
59617 + __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
59618 if (is_global_init(tsk))
59619 return 1;
59620 if (handler != SIG_IGN && handler != SIG_DFL)
59621 @@ -693,6 +696,13 @@ static int check_kill_permission(int sig
59622 }
59623 }
59624
59625 + /* allow glibc communication via tgkill to other threads in our
59626 + thread group */
59627 + if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
59628 + sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
59629 + && gr_handle_signal(t, sig))
59630 + return -EPERM;
59631 +
59632 return security_task_kill(t, info, sig, 0);
59633 }
59634
59635 @@ -1041,7 +1051,7 @@ __group_send_sig_info(int sig, struct si
59636 return send_signal(sig, info, p, 1);
59637 }
59638
59639 -static int
59640 +int
59641 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
59642 {
59643 return send_signal(sig, info, t, 0);
59644 @@ -1078,6 +1088,7 @@ force_sig_info(int sig, struct siginfo *
59645 unsigned long int flags;
59646 int ret, blocked, ignored;
59647 struct k_sigaction *action;
59648 + int is_unhandled = 0;
59649
59650 spin_lock_irqsave(&t->sighand->siglock, flags);
59651 action = &t->sighand->action[sig-1];
59652 @@ -1092,9 +1103,18 @@ force_sig_info(int sig, struct siginfo *
59653 }
59654 if (action->sa.sa_handler == SIG_DFL)
59655 t->signal->flags &= ~SIGNAL_UNKILLABLE;
59656 + if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
59657 + is_unhandled = 1;
59658 ret = specific_send_sig_info(sig, info, t);
59659 spin_unlock_irqrestore(&t->sighand->siglock, flags);
59660
59661 + /* only deal with unhandled signals, java etc trigger SIGSEGV during
59662 + normal operation */
59663 + if (is_unhandled) {
59664 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
59665 + gr_handle_crash(t, sig);
59666 + }
59667 +
59668 return ret;
59669 }
59670
59671 @@ -1153,8 +1173,11 @@ int group_send_sig_info(int sig, struct
59672 ret = check_kill_permission(sig, info, p);
59673 rcu_read_unlock();
59674
59675 - if (!ret && sig)
59676 + if (!ret && sig) {
59677 ret = do_send_sig_info(sig, info, p, true);
59678 + if (!ret)
59679 + gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
59680 + }
59681
59682 return ret;
59683 }
59684 @@ -1718,6 +1741,8 @@ void ptrace_notify(int exit_code)
59685 {
59686 siginfo_t info;
59687
59688 + pax_track_stack();
59689 +
59690 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
59691
59692 memset(&info, 0, sizeof info);
59693 @@ -2393,7 +2418,15 @@ do_send_specific(pid_t tgid, pid_t pid,
59694 int error = -ESRCH;
59695
59696 rcu_read_lock();
59697 - p = find_task_by_vpid(pid);
59698 +#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59699 + /* allow glibc communication via tgkill to other threads in our
59700 + thread group */
59701 + if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
59702 + sig == (SIGRTMIN+1) && tgid == info->si_pid)
59703 + p = find_task_by_vpid_unrestricted(pid);
59704 + else
59705 +#endif
59706 + p = find_task_by_vpid(pid);
59707 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
59708 error = check_kill_permission(sig, info, p);
59709 /*
59710 diff -urNp linux-2.6.39.4/kernel/smp.c linux-2.6.39.4/kernel/smp.c
59711 --- linux-2.6.39.4/kernel/smp.c 2011-05-19 00:06:34.000000000 -0400
59712 +++ linux-2.6.39.4/kernel/smp.c 2011-08-05 19:44:37.000000000 -0400
59713 @@ -583,22 +583,22 @@ int smp_call_function(smp_call_func_t fu
59714 }
59715 EXPORT_SYMBOL(smp_call_function);
59716
59717 -void ipi_call_lock(void)
59718 +void ipi_call_lock(void) __acquires(call_function.lock)
59719 {
59720 raw_spin_lock(&call_function.lock);
59721 }
59722
59723 -void ipi_call_unlock(void)
59724 +void ipi_call_unlock(void) __releases(call_function.lock)
59725 {
59726 raw_spin_unlock(&call_function.lock);
59727 }
59728
59729 -void ipi_call_lock_irq(void)
59730 +void ipi_call_lock_irq(void) __acquires(call_function.lock)
59731 {
59732 raw_spin_lock_irq(&call_function.lock);
59733 }
59734
59735 -void ipi_call_unlock_irq(void)
59736 +void ipi_call_unlock_irq(void) __releases(call_function.lock)
59737 {
59738 raw_spin_unlock_irq(&call_function.lock);
59739 }
59740 diff -urNp linux-2.6.39.4/kernel/softirq.c linux-2.6.39.4/kernel/softirq.c
59741 --- linux-2.6.39.4/kernel/softirq.c 2011-05-19 00:06:34.000000000 -0400
59742 +++ linux-2.6.39.4/kernel/softirq.c 2011-08-05 20:34:06.000000000 -0400
59743 @@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
59744
59745 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
59746
59747 -char *softirq_to_name[NR_SOFTIRQS] = {
59748 +const char * const softirq_to_name[NR_SOFTIRQS] = {
59749 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
59750 "TASKLET", "SCHED", "HRTIMER", "RCU"
59751 };
59752 @@ -235,7 +235,7 @@ restart:
59753 kstat_incr_softirqs_this_cpu(vec_nr);
59754
59755 trace_softirq_entry(vec_nr);
59756 - h->action(h);
59757 + h->action();
59758 trace_softirq_exit(vec_nr);
59759 if (unlikely(prev_count != preempt_count())) {
59760 printk(KERN_ERR "huh, entered softirq %u %s %p"
59761 @@ -377,9 +377,11 @@ void raise_softirq(unsigned int nr)
59762 local_irq_restore(flags);
59763 }
59764
59765 -void open_softirq(int nr, void (*action)(struct softirq_action *))
59766 +void open_softirq(int nr, void (*action)(void))
59767 {
59768 - softirq_vec[nr].action = action;
59769 + pax_open_kernel();
59770 + *(void **)&softirq_vec[nr].action = action;
59771 + pax_close_kernel();
59772 }
59773
59774 /*
59775 @@ -433,7 +435,7 @@ void __tasklet_hi_schedule_first(struct
59776
59777 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
59778
59779 -static void tasklet_action(struct softirq_action *a)
59780 +static void tasklet_action(void)
59781 {
59782 struct tasklet_struct *list;
59783
59784 @@ -468,7 +470,7 @@ static void tasklet_action(struct softir
59785 }
59786 }
59787
59788 -static void tasklet_hi_action(struct softirq_action *a)
59789 +static void tasklet_hi_action(void)
59790 {
59791 struct tasklet_struct *list;
59792
59793 diff -urNp linux-2.6.39.4/kernel/sys.c linux-2.6.39.4/kernel/sys.c
59794 --- linux-2.6.39.4/kernel/sys.c 2011-05-19 00:06:34.000000000 -0400
59795 +++ linux-2.6.39.4/kernel/sys.c 2011-08-05 19:44:37.000000000 -0400
59796 @@ -154,6 +154,12 @@ static int set_one_prio(struct task_stru
59797 error = -EACCES;
59798 goto out;
59799 }
59800 +
59801 + if (gr_handle_chroot_setpriority(p, niceval)) {
59802 + error = -EACCES;
59803 + goto out;
59804 + }
59805 +
59806 no_nice = security_task_setnice(p, niceval);
59807 if (no_nice) {
59808 error = no_nice;
59809 @@ -538,6 +544,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
59810 goto error;
59811 }
59812
59813 + if (gr_check_group_change(new->gid, new->egid, -1))
59814 + goto error;
59815 +
59816 if (rgid != (gid_t) -1 ||
59817 (egid != (gid_t) -1 && egid != old->gid))
59818 new->sgid = new->egid;
59819 @@ -567,6 +576,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
59820 old = current_cred();
59821
59822 retval = -EPERM;
59823 +
59824 + if (gr_check_group_change(gid, gid, gid))
59825 + goto error;
59826 +
59827 if (nsown_capable(CAP_SETGID))
59828 new->gid = new->egid = new->sgid = new->fsgid = gid;
59829 else if (gid == old->gid || gid == old->sgid)
59830 @@ -647,6 +660,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
59831 goto error;
59832 }
59833
59834 + if (gr_check_user_change(new->uid, new->euid, -1))
59835 + goto error;
59836 +
59837 if (new->uid != old->uid) {
59838 retval = set_user(new);
59839 if (retval < 0)
59840 @@ -691,6 +707,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
59841 old = current_cred();
59842
59843 retval = -EPERM;
59844 +
59845 + if (gr_check_crash_uid(uid))
59846 + goto error;
59847 + if (gr_check_user_change(uid, uid, uid))
59848 + goto error;
59849 +
59850 if (nsown_capable(CAP_SETUID)) {
59851 new->suid = new->uid = uid;
59852 if (uid != old->uid) {
59853 @@ -745,6 +767,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
59854 goto error;
59855 }
59856
59857 + if (gr_check_user_change(ruid, euid, -1))
59858 + goto error;
59859 +
59860 if (ruid != (uid_t) -1) {
59861 new->uid = ruid;
59862 if (ruid != old->uid) {
59863 @@ -809,6 +834,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
59864 goto error;
59865 }
59866
59867 + if (gr_check_group_change(rgid, egid, -1))
59868 + goto error;
59869 +
59870 if (rgid != (gid_t) -1)
59871 new->gid = rgid;
59872 if (egid != (gid_t) -1)
59873 @@ -855,6 +883,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
59874 old = current_cred();
59875 old_fsuid = old->fsuid;
59876
59877 + if (gr_check_user_change(-1, -1, uid))
59878 + goto error;
59879 +
59880 if (uid == old->uid || uid == old->euid ||
59881 uid == old->suid || uid == old->fsuid ||
59882 nsown_capable(CAP_SETUID)) {
59883 @@ -865,6 +896,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
59884 }
59885 }
59886
59887 +error:
59888 abort_creds(new);
59889 return old_fsuid;
59890
59891 @@ -891,12 +923,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
59892 if (gid == old->gid || gid == old->egid ||
59893 gid == old->sgid || gid == old->fsgid ||
59894 nsown_capable(CAP_SETGID)) {
59895 + if (gr_check_group_change(-1, -1, gid))
59896 + goto error;
59897 +
59898 if (gid != old_fsgid) {
59899 new->fsgid = gid;
59900 goto change_okay;
59901 }
59902 }
59903
59904 +error:
59905 abort_creds(new);
59906 return old_fsgid;
59907
59908 @@ -1643,7 +1679,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
59909 error = get_dumpable(me->mm);
59910 break;
59911 case PR_SET_DUMPABLE:
59912 - if (arg2 < 0 || arg2 > 1) {
59913 + if (arg2 > 1) {
59914 error = -EINVAL;
59915 break;
59916 }
59917 diff -urNp linux-2.6.39.4/kernel/sysctl.c linux-2.6.39.4/kernel/sysctl.c
59918 --- linux-2.6.39.4/kernel/sysctl.c 2011-05-19 00:06:34.000000000 -0400
59919 +++ linux-2.6.39.4/kernel/sysctl.c 2011-08-05 19:44:37.000000000 -0400
59920 @@ -84,6 +84,13 @@
59921
59922
59923 #if defined(CONFIG_SYSCTL)
59924 +#include <linux/grsecurity.h>
59925 +#include <linux/grinternal.h>
59926 +
59927 +extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
59928 +extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
59929 + const int op);
59930 +extern int gr_handle_chroot_sysctl(const int op);
59931
59932 /* External variables not in a header file. */
59933 extern int sysctl_overcommit_memory;
59934 @@ -196,6 +203,7 @@ static int sysrq_sysctl_handler(ctl_tabl
59935 }
59936
59937 #endif
59938 +extern struct ctl_table grsecurity_table[];
59939
59940 static struct ctl_table root_table[];
59941 static struct ctl_table_root sysctl_table_root;
59942 @@ -225,6 +233,20 @@ extern struct ctl_table epoll_table[];
59943 int sysctl_legacy_va_layout;
59944 #endif
59945
59946 +#ifdef CONFIG_PAX_SOFTMODE
59947 +static ctl_table pax_table[] = {
59948 + {
59949 + .procname = "softmode",
59950 + .data = &pax_softmode,
59951 + .maxlen = sizeof(unsigned int),
59952 + .mode = 0600,
59953 + .proc_handler = &proc_dointvec,
59954 + },
59955 +
59956 + { }
59957 +};
59958 +#endif
59959 +
59960 /* The default sysctl tables: */
59961
59962 static struct ctl_table root_table[] = {
59963 @@ -271,6 +293,22 @@ static int max_extfrag_threshold = 1000;
59964 #endif
59965
59966 static struct ctl_table kern_table[] = {
59967 +#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
59968 + {
59969 + .procname = "grsecurity",
59970 + .mode = 0500,
59971 + .child = grsecurity_table,
59972 + },
59973 +#endif
59974 +
59975 +#ifdef CONFIG_PAX_SOFTMODE
59976 + {
59977 + .procname = "pax",
59978 + .mode = 0500,
59979 + .child = pax_table,
59980 + },
59981 +#endif
59982 +
59983 {
59984 .procname = "sched_child_runs_first",
59985 .data = &sysctl_sched_child_runs_first,
59986 @@ -545,7 +583,7 @@ static struct ctl_table kern_table[] = {
59987 .data = &modprobe_path,
59988 .maxlen = KMOD_PATH_LEN,
59989 .mode = 0644,
59990 - .proc_handler = proc_dostring,
59991 + .proc_handler = proc_dostring_modpriv,
59992 },
59993 {
59994 .procname = "modules_disabled",
59995 @@ -707,16 +745,20 @@ static struct ctl_table kern_table[] = {
59996 .extra1 = &zero,
59997 .extra2 = &one,
59998 },
59999 +#endif
60000 {
60001 .procname = "kptr_restrict",
60002 .data = &kptr_restrict,
60003 .maxlen = sizeof(int),
60004 .mode = 0644,
60005 .proc_handler = proc_dmesg_restrict,
60006 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60007 + .extra1 = &two,
60008 +#else
60009 .extra1 = &zero,
60010 +#endif
60011 .extra2 = &two,
60012 },
60013 -#endif
60014 {
60015 .procname = "ngroups_max",
60016 .data = &ngroups_max,
60017 @@ -1189,6 +1231,13 @@ static struct ctl_table vm_table[] = {
60018 .proc_handler = proc_dointvec_minmax,
60019 .extra1 = &zero,
60020 },
60021 + {
60022 + .procname = "heap_stack_gap",
60023 + .data = &sysctl_heap_stack_gap,
60024 + .maxlen = sizeof(sysctl_heap_stack_gap),
60025 + .mode = 0644,
60026 + .proc_handler = proc_doulongvec_minmax,
60027 + },
60028 #else
60029 {
60030 .procname = "nr_trim_pages",
60031 @@ -1698,6 +1747,17 @@ static int test_perm(int mode, int op)
60032 int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
60033 {
60034 int mode;
60035 + int error;
60036 +
60037 + if (table->parent != NULL && table->parent->procname != NULL &&
60038 + table->procname != NULL &&
60039 + gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
60040 + return -EACCES;
60041 + if (gr_handle_chroot_sysctl(op))
60042 + return -EACCES;
60043 + error = gr_handle_sysctl(table, op);
60044 + if (error)
60045 + return error;
60046
60047 if (root->permissions)
60048 mode = root->permissions(root, current->nsproxy, table);
60049 @@ -2102,6 +2162,16 @@ int proc_dostring(struct ctl_table *tabl
60050 buffer, lenp, ppos);
60051 }
60052
60053 +int proc_dostring_modpriv(struct ctl_table *table, int write,
60054 + void __user *buffer, size_t *lenp, loff_t *ppos)
60055 +{
60056 + if (write && !capable(CAP_SYS_MODULE))
60057 + return -EPERM;
60058 +
60059 + return _proc_do_string(table->data, table->maxlen, write,
60060 + buffer, lenp, ppos);
60061 +}
60062 +
60063 static size_t proc_skip_spaces(char **buf)
60064 {
60065 size_t ret;
60066 @@ -2207,6 +2277,8 @@ static int proc_put_long(void __user **b
60067 len = strlen(tmp);
60068 if (len > *size)
60069 len = *size;
60070 + if (len > sizeof(tmp))
60071 + len = sizeof(tmp);
60072 if (copy_to_user(*buf, tmp, len))
60073 return -EFAULT;
60074 *size -= len;
60075 @@ -2523,8 +2595,11 @@ static int __do_proc_doulongvec_minmax(v
60076 *i = val;
60077 } else {
60078 val = convdiv * (*i) / convmul;
60079 - if (!first)
60080 + if (!first) {
60081 err = proc_put_char(&buffer, &left, '\t');
60082 + if (err)
60083 + break;
60084 + }
60085 err = proc_put_long(&buffer, &left, val, false);
60086 if (err)
60087 break;
60088 @@ -2919,6 +2994,12 @@ int proc_dostring(struct ctl_table *tabl
60089 return -ENOSYS;
60090 }
60091
60092 +int proc_dostring_modpriv(struct ctl_table *table, int write,
60093 + void __user *buffer, size_t *lenp, loff_t *ppos)
60094 +{
60095 + return -ENOSYS;
60096 +}
60097 +
60098 int proc_dointvec(struct ctl_table *table, int write,
60099 void __user *buffer, size_t *lenp, loff_t *ppos)
60100 {
60101 @@ -2975,6 +3056,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
60102 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
60103 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
60104 EXPORT_SYMBOL(proc_dostring);
60105 +EXPORT_SYMBOL(proc_dostring_modpriv);
60106 EXPORT_SYMBOL(proc_doulongvec_minmax);
60107 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
60108 EXPORT_SYMBOL(register_sysctl_table);
60109 diff -urNp linux-2.6.39.4/kernel/sysctl_check.c linux-2.6.39.4/kernel/sysctl_check.c
60110 --- linux-2.6.39.4/kernel/sysctl_check.c 2011-05-19 00:06:34.000000000 -0400
60111 +++ linux-2.6.39.4/kernel/sysctl_check.c 2011-08-05 19:44:37.000000000 -0400
60112 @@ -129,6 +129,7 @@ int sysctl_check_table(struct nsproxy *n
60113 set_fail(&fail, table, "Directory with extra2");
60114 } else {
60115 if ((table->proc_handler == proc_dostring) ||
60116 + (table->proc_handler == proc_dostring_modpriv) ||
60117 (table->proc_handler == proc_dointvec) ||
60118 (table->proc_handler == proc_dointvec_minmax) ||
60119 (table->proc_handler == proc_dointvec_jiffies) ||
60120 diff -urNp linux-2.6.39.4/kernel/taskstats.c linux-2.6.39.4/kernel/taskstats.c
60121 --- linux-2.6.39.4/kernel/taskstats.c 2011-07-09 09:18:51.000000000 -0400
60122 +++ linux-2.6.39.4/kernel/taskstats.c 2011-08-05 19:44:37.000000000 -0400
60123 @@ -27,9 +27,12 @@
60124 #include <linux/cgroup.h>
60125 #include <linux/fs.h>
60126 #include <linux/file.h>
60127 +#include <linux/grsecurity.h>
60128 #include <net/genetlink.h>
60129 #include <asm/atomic.h>
60130
60131 +extern int gr_is_taskstats_denied(int pid);
60132 +
60133 /*
60134 * Maximum length of a cpumask that can be specified in
60135 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
60136 @@ -558,6 +561,9 @@ err:
60137
60138 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
60139 {
60140 + if (gr_is_taskstats_denied(current->pid))
60141 + return -EACCES;
60142 +
60143 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
60144 return cmd_attr_register_cpumask(info);
60145 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
60146 diff -urNp linux-2.6.39.4/kernel/time/tick-broadcast.c linux-2.6.39.4/kernel/time/tick-broadcast.c
60147 --- linux-2.6.39.4/kernel/time/tick-broadcast.c 2011-05-19 00:06:34.000000000 -0400
60148 +++ linux-2.6.39.4/kernel/time/tick-broadcast.c 2011-08-05 19:44:37.000000000 -0400
60149 @@ -115,7 +115,7 @@ int tick_device_uses_broadcast(struct cl
60150 * then clear the broadcast bit.
60151 */
60152 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
60153 - int cpu = smp_processor_id();
60154 + cpu = smp_processor_id();
60155
60156 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
60157 tick_broadcast_clear_oneshot(cpu);
60158 diff -urNp linux-2.6.39.4/kernel/time/timekeeping.c linux-2.6.39.4/kernel/time/timekeeping.c
60159 --- linux-2.6.39.4/kernel/time/timekeeping.c 2011-05-19 00:06:34.000000000 -0400
60160 +++ linux-2.6.39.4/kernel/time/timekeeping.c 2011-08-05 19:44:37.000000000 -0400
60161 @@ -14,6 +14,7 @@
60162 #include <linux/init.h>
60163 #include <linux/mm.h>
60164 #include <linux/sched.h>
60165 +#include <linux/grsecurity.h>
60166 #include <linux/syscore_ops.h>
60167 #include <linux/clocksource.h>
60168 #include <linux/jiffies.h>
60169 @@ -361,6 +362,8 @@ int do_settimeofday(const struct timespe
60170 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
60171 return -EINVAL;
60172
60173 + gr_log_timechange();
60174 +
60175 write_seqlock_irqsave(&xtime_lock, flags);
60176
60177 timekeeping_forward_now();
60178 diff -urNp linux-2.6.39.4/kernel/time/timer_list.c linux-2.6.39.4/kernel/time/timer_list.c
60179 --- linux-2.6.39.4/kernel/time/timer_list.c 2011-05-19 00:06:34.000000000 -0400
60180 +++ linux-2.6.39.4/kernel/time/timer_list.c 2011-08-05 19:44:37.000000000 -0400
60181 @@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
60182
60183 static void print_name_offset(struct seq_file *m, void *sym)
60184 {
60185 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60186 + SEQ_printf(m, "<%p>", NULL);
60187 +#else
60188 char symname[KSYM_NAME_LEN];
60189
60190 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
60191 SEQ_printf(m, "<%pK>", sym);
60192 else
60193 SEQ_printf(m, "%s", symname);
60194 +#endif
60195 }
60196
60197 static void
60198 @@ -112,7 +116,11 @@ next_one:
60199 static void
60200 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
60201 {
60202 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60203 + SEQ_printf(m, " .base: %p\n", NULL);
60204 +#else
60205 SEQ_printf(m, " .base: %pK\n", base);
60206 +#endif
60207 SEQ_printf(m, " .index: %d\n",
60208 base->index);
60209 SEQ_printf(m, " .resolution: %Lu nsecs\n",
60210 @@ -293,7 +301,11 @@ static int __init init_timer_list_procfs
60211 {
60212 struct proc_dir_entry *pe;
60213
60214 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
60215 + pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
60216 +#else
60217 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
60218 +#endif
60219 if (!pe)
60220 return -ENOMEM;
60221 return 0;
60222 diff -urNp linux-2.6.39.4/kernel/time/timer_stats.c linux-2.6.39.4/kernel/time/timer_stats.c
60223 --- linux-2.6.39.4/kernel/time/timer_stats.c 2011-05-19 00:06:34.000000000 -0400
60224 +++ linux-2.6.39.4/kernel/time/timer_stats.c 2011-08-05 19:44:37.000000000 -0400
60225 @@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
60226 static unsigned long nr_entries;
60227 static struct entry entries[MAX_ENTRIES];
60228
60229 -static atomic_t overflow_count;
60230 +static atomic_unchecked_t overflow_count;
60231
60232 /*
60233 * The entries are in a hash-table, for fast lookup:
60234 @@ -140,7 +140,7 @@ static void reset_entries(void)
60235 nr_entries = 0;
60236 memset(entries, 0, sizeof(entries));
60237 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
60238 - atomic_set(&overflow_count, 0);
60239 + atomic_set_unchecked(&overflow_count, 0);
60240 }
60241
60242 static struct entry *alloc_entry(void)
60243 @@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
60244 if (likely(entry))
60245 entry->count++;
60246 else
60247 - atomic_inc(&overflow_count);
60248 + atomic_inc_unchecked(&overflow_count);
60249
60250 out_unlock:
60251 raw_spin_unlock_irqrestore(lock, flags);
60252 @@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
60253
60254 static void print_name_offset(struct seq_file *m, unsigned long addr)
60255 {
60256 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60257 + seq_printf(m, "<%p>", NULL);
60258 +#else
60259 char symname[KSYM_NAME_LEN];
60260
60261 if (lookup_symbol_name(addr, symname) < 0)
60262 seq_printf(m, "<%p>", (void *)addr);
60263 else
60264 seq_printf(m, "%s", symname);
60265 +#endif
60266 }
60267
60268 static int tstats_show(struct seq_file *m, void *v)
60269 @@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
60270
60271 seq_puts(m, "Timer Stats Version: v0.2\n");
60272 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
60273 - if (atomic_read(&overflow_count))
60274 + if (atomic_read_unchecked(&overflow_count))
60275 seq_printf(m, "Overflow: %d entries\n",
60276 - atomic_read(&overflow_count));
60277 + atomic_read_unchecked(&overflow_count));
60278
60279 for (i = 0; i < nr_entries; i++) {
60280 entry = entries + i;
60281 @@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi
60282 {
60283 struct proc_dir_entry *pe;
60284
60285 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
60286 + pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
60287 +#else
60288 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
60289 +#endif
60290 if (!pe)
60291 return -ENOMEM;
60292 return 0;
60293 diff -urNp linux-2.6.39.4/kernel/time.c linux-2.6.39.4/kernel/time.c
60294 --- linux-2.6.39.4/kernel/time.c 2011-05-19 00:06:34.000000000 -0400
60295 +++ linux-2.6.39.4/kernel/time.c 2011-08-05 19:44:37.000000000 -0400
60296 @@ -163,6 +163,11 @@ int do_sys_settimeofday(const struct tim
60297 return error;
60298
60299 if (tz) {
60300 + /* we log in do_settimeofday called below, so don't log twice
60301 + */
60302 + if (!tv)
60303 + gr_log_timechange();
60304 +
60305 /* SMP safe, global irq locking makes it work. */
60306 sys_tz = *tz;
60307 update_vsyscall_tz();
60308 diff -urNp linux-2.6.39.4/kernel/timer.c linux-2.6.39.4/kernel/timer.c
60309 --- linux-2.6.39.4/kernel/timer.c 2011-05-19 00:06:34.000000000 -0400
60310 +++ linux-2.6.39.4/kernel/timer.c 2011-08-05 19:44:37.000000000 -0400
60311 @@ -1305,7 +1305,7 @@ void update_process_times(int user_tick)
60312 /*
60313 * This function runs timers and the timer-tq in bottom half context.
60314 */
60315 -static void run_timer_softirq(struct softirq_action *h)
60316 +static void run_timer_softirq(void)
60317 {
60318 struct tvec_base *base = __this_cpu_read(tvec_bases);
60319
60320 diff -urNp linux-2.6.39.4/kernel/trace/blktrace.c linux-2.6.39.4/kernel/trace/blktrace.c
60321 --- linux-2.6.39.4/kernel/trace/blktrace.c 2011-05-19 00:06:34.000000000 -0400
60322 +++ linux-2.6.39.4/kernel/trace/blktrace.c 2011-08-05 19:44:37.000000000 -0400
60323 @@ -321,7 +321,7 @@ static ssize_t blk_dropped_read(struct f
60324 struct blk_trace *bt = filp->private_data;
60325 char buf[16];
60326
60327 - snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
60328 + snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
60329
60330 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
60331 }
60332 @@ -386,7 +386,7 @@ static int blk_subbuf_start_callback(str
60333 return 1;
60334
60335 bt = buf->chan->private_data;
60336 - atomic_inc(&bt->dropped);
60337 + atomic_inc_unchecked(&bt->dropped);
60338 return 0;
60339 }
60340
60341 @@ -487,7 +487,7 @@ int do_blk_trace_setup(struct request_qu
60342
60343 bt->dir = dir;
60344 bt->dev = dev;
60345 - atomic_set(&bt->dropped, 0);
60346 + atomic_set_unchecked(&bt->dropped, 0);
60347
60348 ret = -EIO;
60349 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
60350 diff -urNp linux-2.6.39.4/kernel/trace/ftrace.c linux-2.6.39.4/kernel/trace/ftrace.c
60351 --- linux-2.6.39.4/kernel/trace/ftrace.c 2011-06-03 00:04:14.000000000 -0400
60352 +++ linux-2.6.39.4/kernel/trace/ftrace.c 2011-08-05 20:34:06.000000000 -0400
60353 @@ -1107,13 +1107,18 @@ ftrace_code_disable(struct module *mod,
60354
60355 ip = rec->ip;
60356
60357 + ret = ftrace_arch_code_modify_prepare();
60358 + FTRACE_WARN_ON(ret);
60359 + if (ret)
60360 + return 0;
60361 +
60362 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
60363 + FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
60364 if (ret) {
60365 ftrace_bug(ret, ip);
60366 rec->flags |= FTRACE_FL_FAILED;
60367 - return 0;
60368 }
60369 - return 1;
60370 + return ret ? 0 : 1;
60371 }
60372
60373 /*
60374 @@ -2011,7 +2016,7 @@ static void ftrace_free_entry_rcu(struct
60375
60376 int
60377 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
60378 - void *data)
60379 + void *data)
60380 {
60381 struct ftrace_func_probe *entry;
60382 struct ftrace_page *pg;
60383 diff -urNp linux-2.6.39.4/kernel/trace/trace.c linux-2.6.39.4/kernel/trace/trace.c
60384 --- linux-2.6.39.4/kernel/trace/trace.c 2011-05-19 00:06:34.000000000 -0400
60385 +++ linux-2.6.39.4/kernel/trace/trace.c 2011-08-05 19:44:37.000000000 -0400
60386 @@ -3330,6 +3330,8 @@ static ssize_t tracing_splice_read_pipe(
60387 size_t rem;
60388 unsigned int i;
60389
60390 + pax_track_stack();
60391 +
60392 if (splice_grow_spd(pipe, &spd))
60393 return -ENOMEM;
60394
60395 @@ -3813,6 +3815,8 @@ tracing_buffers_splice_read(struct file
60396 int entries, size, i;
60397 size_t ret;
60398
60399 + pax_track_stack();
60400 +
60401 if (splice_grow_spd(pipe, &spd))
60402 return -ENOMEM;
60403
60404 @@ -3981,10 +3985,9 @@ static const struct file_operations trac
60405 };
60406 #endif
60407
60408 -static struct dentry *d_tracer;
60409 -
60410 struct dentry *tracing_init_dentry(void)
60411 {
60412 + static struct dentry *d_tracer;
60413 static int once;
60414
60415 if (d_tracer)
60416 @@ -4004,10 +4007,9 @@ struct dentry *tracing_init_dentry(void)
60417 return d_tracer;
60418 }
60419
60420 -static struct dentry *d_percpu;
60421 -
60422 struct dentry *tracing_dentry_percpu(void)
60423 {
60424 + static struct dentry *d_percpu;
60425 static int once;
60426 struct dentry *d_tracer;
60427
60428 diff -urNp linux-2.6.39.4/kernel/trace/trace_events.c linux-2.6.39.4/kernel/trace/trace_events.c
60429 --- linux-2.6.39.4/kernel/trace/trace_events.c 2011-05-19 00:06:34.000000000 -0400
60430 +++ linux-2.6.39.4/kernel/trace/trace_events.c 2011-08-05 20:34:06.000000000 -0400
60431 @@ -1241,10 +1241,6 @@ static LIST_HEAD(ftrace_module_file_list
60432 struct ftrace_module_file_ops {
60433 struct list_head list;
60434 struct module *mod;
60435 - struct file_operations id;
60436 - struct file_operations enable;
60437 - struct file_operations format;
60438 - struct file_operations filter;
60439 };
60440
60441 static struct ftrace_module_file_ops *
60442 @@ -1265,17 +1261,12 @@ trace_create_file_ops(struct module *mod
60443
60444 file_ops->mod = mod;
60445
60446 - file_ops->id = ftrace_event_id_fops;
60447 - file_ops->id.owner = mod;
60448 -
60449 - file_ops->enable = ftrace_enable_fops;
60450 - file_ops->enable.owner = mod;
60451 -
60452 - file_ops->filter = ftrace_event_filter_fops;
60453 - file_ops->filter.owner = mod;
60454 -
60455 - file_ops->format = ftrace_event_format_fops;
60456 - file_ops->format.owner = mod;
60457 + pax_open_kernel();
60458 + *(void **)&mod->trace_id.owner = mod;
60459 + *(void **)&mod->trace_enable.owner = mod;
60460 + *(void **)&mod->trace_filter.owner = mod;
60461 + *(void **)&mod->trace_format.owner = mod;
60462 + pax_close_kernel();
60463
60464 list_add(&file_ops->list, &ftrace_module_file_list);
60465
60466 @@ -1299,8 +1290,8 @@ static void trace_module_add_events(stru
60467
60468 for_each_event(call, start, end) {
60469 __trace_add_event_call(*call, mod,
60470 - &file_ops->id, &file_ops->enable,
60471 - &file_ops->filter, &file_ops->format);
60472 + &mod->trace_id, &mod->trace_enable,
60473 + &mod->trace_filter, &mod->trace_format);
60474 }
60475 }
60476
60477 diff -urNp linux-2.6.39.4/kernel/trace/trace_mmiotrace.c linux-2.6.39.4/kernel/trace/trace_mmiotrace.c
60478 --- linux-2.6.39.4/kernel/trace/trace_mmiotrace.c 2011-05-19 00:06:34.000000000 -0400
60479 +++ linux-2.6.39.4/kernel/trace/trace_mmiotrace.c 2011-08-05 19:44:37.000000000 -0400
60480 @@ -24,7 +24,7 @@ struct header_iter {
60481 static struct trace_array *mmio_trace_array;
60482 static bool overrun_detected;
60483 static unsigned long prev_overruns;
60484 -static atomic_t dropped_count;
60485 +static atomic_unchecked_t dropped_count;
60486
60487 static void mmio_reset_data(struct trace_array *tr)
60488 {
60489 @@ -127,7 +127,7 @@ static void mmio_close(struct trace_iter
60490
60491 static unsigned long count_overruns(struct trace_iterator *iter)
60492 {
60493 - unsigned long cnt = atomic_xchg(&dropped_count, 0);
60494 + unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
60495 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
60496
60497 if (over > prev_overruns)
60498 @@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct
60499 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
60500 sizeof(*entry), 0, pc);
60501 if (!event) {
60502 - atomic_inc(&dropped_count);
60503 + atomic_inc_unchecked(&dropped_count);
60504 return;
60505 }
60506 entry = ring_buffer_event_data(event);
60507 @@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct
60508 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
60509 sizeof(*entry), 0, pc);
60510 if (!event) {
60511 - atomic_inc(&dropped_count);
60512 + atomic_inc_unchecked(&dropped_count);
60513 return;
60514 }
60515 entry = ring_buffer_event_data(event);
60516 diff -urNp linux-2.6.39.4/kernel/trace/trace_output.c linux-2.6.39.4/kernel/trace/trace_output.c
60517 --- linux-2.6.39.4/kernel/trace/trace_output.c 2011-05-19 00:06:34.000000000 -0400
60518 +++ linux-2.6.39.4/kernel/trace/trace_output.c 2011-08-05 19:44:37.000000000 -0400
60519 @@ -278,7 +278,7 @@ int trace_seq_path(struct trace_seq *s,
60520
60521 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
60522 if (!IS_ERR(p)) {
60523 - p = mangle_path(s->buffer + s->len, p, "\n");
60524 + p = mangle_path(s->buffer + s->len, p, "\n\\");
60525 if (p) {
60526 s->len = p - s->buffer;
60527 return 1;
60528 diff -urNp linux-2.6.39.4/kernel/trace/trace_stack.c linux-2.6.39.4/kernel/trace/trace_stack.c
60529 --- linux-2.6.39.4/kernel/trace/trace_stack.c 2011-05-19 00:06:34.000000000 -0400
60530 +++ linux-2.6.39.4/kernel/trace/trace_stack.c 2011-08-05 19:44:37.000000000 -0400
60531 @@ -50,7 +50,7 @@ static inline void check_stack(void)
60532 return;
60533
60534 /* we do not handle interrupt stacks yet */
60535 - if (!object_is_on_stack(&this_size))
60536 + if (!object_starts_on_stack(&this_size))
60537 return;
60538
60539 local_irq_save(flags);
60540 diff -urNp linux-2.6.39.4/kernel/trace/trace_workqueue.c linux-2.6.39.4/kernel/trace/trace_workqueue.c
60541 --- linux-2.6.39.4/kernel/trace/trace_workqueue.c 2011-05-19 00:06:34.000000000 -0400
60542 +++ linux-2.6.39.4/kernel/trace/trace_workqueue.c 2011-08-05 19:44:37.000000000 -0400
60543 @@ -22,7 +22,7 @@ struct cpu_workqueue_stats {
60544 int cpu;
60545 pid_t pid;
60546 /* Can be inserted from interrupt or user context, need to be atomic */
60547 - atomic_t inserted;
60548 + atomic_unchecked_t inserted;
60549 /*
60550 * Don't need to be atomic, works are serialized in a single workqueue thread
60551 * on a single CPU.
60552 @@ -60,7 +60,7 @@ probe_workqueue_insertion(void *ignore,
60553 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
60554 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
60555 if (node->pid == wq_thread->pid) {
60556 - atomic_inc(&node->inserted);
60557 + atomic_inc_unchecked(&node->inserted);
60558 goto found;
60559 }
60560 }
60561 @@ -210,7 +210,7 @@ static int workqueue_stat_show(struct se
60562 tsk = get_pid_task(pid, PIDTYPE_PID);
60563 if (tsk) {
60564 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
60565 - atomic_read(&cws->inserted), cws->executed,
60566 + atomic_read_unchecked(&cws->inserted), cws->executed,
60567 tsk->comm);
60568 put_task_struct(tsk);
60569 }
60570 diff -urNp linux-2.6.39.4/lib/bug.c linux-2.6.39.4/lib/bug.c
60571 --- linux-2.6.39.4/lib/bug.c 2011-05-19 00:06:34.000000000 -0400
60572 +++ linux-2.6.39.4/lib/bug.c 2011-08-05 19:44:37.000000000 -0400
60573 @@ -133,6 +133,8 @@ enum bug_trap_type report_bug(unsigned l
60574 return BUG_TRAP_TYPE_NONE;
60575
60576 bug = find_bug(bugaddr);
60577 + if (!bug)
60578 + return BUG_TRAP_TYPE_NONE;
60579
60580 file = NULL;
60581 line = 0;
60582 diff -urNp linux-2.6.39.4/lib/debugobjects.c linux-2.6.39.4/lib/debugobjects.c
60583 --- linux-2.6.39.4/lib/debugobjects.c 2011-07-09 09:18:51.000000000 -0400
60584 +++ linux-2.6.39.4/lib/debugobjects.c 2011-08-05 19:44:37.000000000 -0400
60585 @@ -284,7 +284,7 @@ static void debug_object_is_on_stack(voi
60586 if (limit > 4)
60587 return;
60588
60589 - is_on_stack = object_is_on_stack(addr);
60590 + is_on_stack = object_starts_on_stack(addr);
60591 if (is_on_stack == onstack)
60592 return;
60593
60594 diff -urNp linux-2.6.39.4/lib/dma-debug.c linux-2.6.39.4/lib/dma-debug.c
60595 --- linux-2.6.39.4/lib/dma-debug.c 2011-05-19 00:06:34.000000000 -0400
60596 +++ linux-2.6.39.4/lib/dma-debug.c 2011-08-05 19:44:37.000000000 -0400
60597 @@ -862,7 +862,7 @@ out:
60598
60599 static void check_for_stack(struct device *dev, void *addr)
60600 {
60601 - if (object_is_on_stack(addr))
60602 + if (object_starts_on_stack(addr))
60603 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
60604 "stack [addr=%p]\n", addr);
60605 }
60606 diff -urNp linux-2.6.39.4/lib/inflate.c linux-2.6.39.4/lib/inflate.c
60607 --- linux-2.6.39.4/lib/inflate.c 2011-05-19 00:06:34.000000000 -0400
60608 +++ linux-2.6.39.4/lib/inflate.c 2011-08-05 19:44:37.000000000 -0400
60609 @@ -269,7 +269,7 @@ static void free(void *where)
60610 malloc_ptr = free_mem_ptr;
60611 }
60612 #else
60613 -#define malloc(a) kmalloc(a, GFP_KERNEL)
60614 +#define malloc(a) kmalloc((a), GFP_KERNEL)
60615 #define free(a) kfree(a)
60616 #endif
60617
60618 diff -urNp linux-2.6.39.4/lib/Kconfig.debug linux-2.6.39.4/lib/Kconfig.debug
60619 --- linux-2.6.39.4/lib/Kconfig.debug 2011-05-19 00:06:34.000000000 -0400
60620 +++ linux-2.6.39.4/lib/Kconfig.debug 2011-08-05 19:44:37.000000000 -0400
60621 @@ -1078,6 +1078,7 @@ config LATENCYTOP
60622 depends on DEBUG_KERNEL
60623 depends on STACKTRACE_SUPPORT
60624 depends on PROC_FS
60625 + depends on !GRKERNSEC_HIDESYM
60626 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE
60627 select KALLSYMS
60628 select KALLSYMS_ALL
60629 diff -urNp linux-2.6.39.4/lib/kref.c linux-2.6.39.4/lib/kref.c
60630 --- linux-2.6.39.4/lib/kref.c 2011-05-19 00:06:34.000000000 -0400
60631 +++ linux-2.6.39.4/lib/kref.c 2011-08-05 19:44:37.000000000 -0400
60632 @@ -52,7 +52,7 @@ void kref_get(struct kref *kref)
60633 */
60634 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
60635 {
60636 - WARN_ON(release == NULL);
60637 + BUG_ON(release == NULL);
60638 WARN_ON(release == (void (*)(struct kref *))kfree);
60639
60640 if (atomic_dec_and_test(&kref->refcount)) {
60641 diff -urNp linux-2.6.39.4/lib/radix-tree.c linux-2.6.39.4/lib/radix-tree.c
60642 --- linux-2.6.39.4/lib/radix-tree.c 2011-05-19 00:06:34.000000000 -0400
60643 +++ linux-2.6.39.4/lib/radix-tree.c 2011-08-05 19:44:37.000000000 -0400
60644 @@ -80,7 +80,7 @@ struct radix_tree_preload {
60645 int nr;
60646 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
60647 };
60648 -static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
60649 +static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
60650
60651 static inline void *ptr_to_indirect(void *ptr)
60652 {
60653 diff -urNp linux-2.6.39.4/lib/vsprintf.c linux-2.6.39.4/lib/vsprintf.c
60654 --- linux-2.6.39.4/lib/vsprintf.c 2011-05-19 00:06:34.000000000 -0400
60655 +++ linux-2.6.39.4/lib/vsprintf.c 2011-08-05 19:44:37.000000000 -0400
60656 @@ -16,6 +16,9 @@
60657 * - scnprintf and vscnprintf
60658 */
60659
60660 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60661 +#define __INCLUDED_BY_HIDESYM 1
60662 +#endif
60663 #include <stdarg.h>
60664 #include <linux/module.h>
60665 #include <linux/types.h>
60666 @@ -435,7 +438,7 @@ char *symbol_string(char *buf, char *end
60667 char sym[KSYM_SYMBOL_LEN];
60668 if (ext == 'B')
60669 sprint_backtrace(sym, value);
60670 - else if (ext != 'f' && ext != 's')
60671 + else if (ext != 'f' && ext != 's' && ext != 'a')
60672 sprint_symbol(sym, value);
60673 else
60674 kallsyms_lookup(value, NULL, NULL, NULL, sym);
60675 @@ -797,7 +800,11 @@ char *uuid_string(char *buf, char *end,
60676 return string(buf, end, uuid, spec);
60677 }
60678
60679 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60680 +int kptr_restrict __read_mostly = 2;
60681 +#else
60682 int kptr_restrict __read_mostly;
60683 +#endif
60684
60685 /*
60686 * Show a '%p' thing. A kernel extension is that the '%p' is followed
60687 @@ -811,6 +818,8 @@ int kptr_restrict __read_mostly;
60688 * - 'S' For symbolic direct pointers with offset
60689 * - 's' For symbolic direct pointers without offset
60690 * - 'B' For backtraced symbolic direct pointers with offset
60691 + * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
60692 + * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
60693 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
60694 * - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
60695 * - 'M' For a 6-byte MAC address, it prints the address in the
60696 @@ -855,12 +864,12 @@ char *pointer(const char *fmt, char *buf
60697 {
60698 if (!ptr && *fmt != 'K') {
60699 /*
60700 - * Print (null) with the same width as a pointer so it makes
60701 + * Print (nil) with the same width as a pointer so it makes
60702 * tabular output look nice.
60703 */
60704 if (spec.field_width == -1)
60705 spec.field_width = 2 * sizeof(void *);
60706 - return string(buf, end, "(null)", spec);
60707 + return string(buf, end, "(nil)", spec);
60708 }
60709
60710 switch (*fmt) {
60711 @@ -870,6 +879,13 @@ char *pointer(const char *fmt, char *buf
60712 /* Fallthrough */
60713 case 'S':
60714 case 's':
60715 +#ifdef CONFIG_GRKERNSEC_HIDESYM
60716 + break;
60717 +#else
60718 + return symbol_string(buf, end, ptr, spec, *fmt);
60719 +#endif
60720 + case 'A':
60721 + case 'a':
60722 case 'B':
60723 return symbol_string(buf, end, ptr, spec, *fmt);
60724 case 'R':
60725 @@ -1632,11 +1648,11 @@ int bstr_printf(char *buf, size_t size,
60726 typeof(type) value; \
60727 if (sizeof(type) == 8) { \
60728 args = PTR_ALIGN(args, sizeof(u32)); \
60729 - *(u32 *)&value = *(u32 *)args; \
60730 - *((u32 *)&value + 1) = *(u32 *)(args + 4); \
60731 + *(u32 *)&value = *(const u32 *)args; \
60732 + *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
60733 } else { \
60734 args = PTR_ALIGN(args, sizeof(type)); \
60735 - value = *(typeof(type) *)args; \
60736 + value = *(const typeof(type) *)args; \
60737 } \
60738 args += sizeof(type); \
60739 value; \
60740 @@ -1699,7 +1715,7 @@ int bstr_printf(char *buf, size_t size,
60741 case FORMAT_TYPE_STR: {
60742 const char *str_arg = args;
60743 args += strlen(str_arg) + 1;
60744 - str = string(str, end, (char *)str_arg, spec);
60745 + str = string(str, end, str_arg, spec);
60746 break;
60747 }
60748
60749 diff -urNp linux-2.6.39.4/localversion-grsec linux-2.6.39.4/localversion-grsec
60750 --- linux-2.6.39.4/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
60751 +++ linux-2.6.39.4/localversion-grsec 2011-08-05 19:44:37.000000000 -0400
60752 @@ -0,0 +1 @@
60753 +-grsec
60754 diff -urNp linux-2.6.39.4/Makefile linux-2.6.39.4/Makefile
60755 --- linux-2.6.39.4/Makefile 2011-08-05 21:11:51.000000000 -0400
60756 +++ linux-2.6.39.4/Makefile 2011-08-07 14:17:20.000000000 -0400
60757 @@ -237,8 +237,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
60758
60759 HOSTCC = gcc
60760 HOSTCXX = g++
60761 -HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
60762 -HOSTCXXFLAGS = -O2
60763 +HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
60764 +HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
60765 +HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
60766
60767 # Decide whether to build built-in, modular, or both.
60768 # Normally, just do built-in.
60769 @@ -356,10 +357,12 @@ LINUXINCLUDE := -I$(srctree)/arch/$(h
60770 KBUILD_CPPFLAGS := -D__KERNEL__
60771
60772 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
60773 + -W -Wno-unused-parameter -Wno-missing-field-initializers \
60774 -fno-strict-aliasing -fno-common \
60775 -Werror-implicit-function-declaration \
60776 -Wno-format-security \
60777 -fno-delete-null-pointer-checks
60778 +KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
60779 KBUILD_AFLAGS_KERNEL :=
60780 KBUILD_CFLAGS_KERNEL :=
60781 KBUILD_AFLAGS := -D__ASSEMBLY__
60782 @@ -397,8 +400,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
60783 # Rules shared between *config targets and build targets
60784
60785 # Basic helpers built in scripts/
60786 -PHONY += scripts_basic
60787 -scripts_basic:
60788 +PHONY += scripts_basic gcc-plugins
60789 +scripts_basic: gcc-plugins
60790 $(Q)$(MAKE) $(build)=scripts/basic
60791 $(Q)rm -f .tmp_quiet_recordmcount
60792
60793 @@ -548,6 +551,25 @@ else
60794 KBUILD_CFLAGS += -O2
60795 endif
60796
60797 +ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
60798 +CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so
60799 +ifdef CONFIG_PAX_MEMORY_STACKLEAK
60800 +STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
60801 +endif
60802 +KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
60803 +export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
60804 +gcc-plugins:
60805 + $(Q)$(MAKE) $(build)=tools/gcc
60806 +else
60807 +gcc-plugins:
60808 +ifeq ($(call cc-ifversion, -ge, 0405, y), y)
60809 + $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
60810 +else
60811 + $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
60812 +endif
60813 + $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
60814 +endif
60815 +
60816 include $(srctree)/arch/$(SRCARCH)/Makefile
60817
60818 ifneq ($(CONFIG_FRAME_WARN),0)
60819 @@ -685,7 +707,7 @@ export mod_strip_cmd
60820
60821
60822 ifeq ($(KBUILD_EXTMOD),)
60823 -core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
60824 +core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
60825
60826 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
60827 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
60828 @@ -947,7 +969,7 @@ ifneq ($(KBUILD_SRC),)
60829 endif
60830
60831 # prepare2 creates a makefile if using a separate output directory
60832 -prepare2: prepare3 outputmakefile
60833 +prepare2: prepare3 outputmakefile gcc-plugins
60834
60835 prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \
60836 include/config/auto.conf
60837 @@ -1375,7 +1397,7 @@ clean: $(clean-dirs)
60838 $(call cmd,rmdirs)
60839 $(call cmd,rmfiles)
60840 @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
60841 - \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
60842 + \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
60843 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
60844 -o -name '*.symtypes' -o -name 'modules.order' \
60845 -o -name modules.builtin -o -name '.tmp_*.o.*' \
60846 diff -urNp linux-2.6.39.4/mm/filemap.c linux-2.6.39.4/mm/filemap.c
60847 --- linux-2.6.39.4/mm/filemap.c 2011-05-19 00:06:34.000000000 -0400
60848 +++ linux-2.6.39.4/mm/filemap.c 2011-08-05 19:44:37.000000000 -0400
60849 @@ -1724,7 +1724,7 @@ int generic_file_mmap(struct file * file
60850 struct address_space *mapping = file->f_mapping;
60851
60852 if (!mapping->a_ops->readpage)
60853 - return -ENOEXEC;
60854 + return -ENODEV;
60855 file_accessed(file);
60856 vma->vm_ops = &generic_file_vm_ops;
60857 vma->vm_flags |= VM_CAN_NONLINEAR;
60858 @@ -2120,6 +2120,7 @@ inline int generic_write_checks(struct f
60859 *pos = i_size_read(inode);
60860
60861 if (limit != RLIM_INFINITY) {
60862 + gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
60863 if (*pos >= limit) {
60864 send_sig(SIGXFSZ, current, 0);
60865 return -EFBIG;
60866 diff -urNp linux-2.6.39.4/mm/fremap.c linux-2.6.39.4/mm/fremap.c
60867 --- linux-2.6.39.4/mm/fremap.c 2011-05-19 00:06:34.000000000 -0400
60868 +++ linux-2.6.39.4/mm/fremap.c 2011-08-05 19:44:37.000000000 -0400
60869 @@ -156,6 +156,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
60870 retry:
60871 vma = find_vma(mm, start);
60872
60873 +#ifdef CONFIG_PAX_SEGMEXEC
60874 + if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
60875 + goto out;
60876 +#endif
60877 +
60878 /*
60879 * Make sure the vma is shared, that it supports prefaulting,
60880 * and that the remapped range is valid and fully within
60881 @@ -224,7 +229,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
60882 /*
60883 * drop PG_Mlocked flag for over-mapped range
60884 */
60885 - unsigned int saved_flags = vma->vm_flags;
60886 + unsigned long saved_flags = vma->vm_flags;
60887 munlock_vma_pages_range(vma, start, start + size);
60888 vma->vm_flags = saved_flags;
60889 }
60890 diff -urNp linux-2.6.39.4/mm/highmem.c linux-2.6.39.4/mm/highmem.c
60891 --- linux-2.6.39.4/mm/highmem.c 2011-05-19 00:06:34.000000000 -0400
60892 +++ linux-2.6.39.4/mm/highmem.c 2011-08-05 19:44:37.000000000 -0400
60893 @@ -125,9 +125,10 @@ static void flush_all_zero_pkmaps(void)
60894 * So no dangers, even with speculative execution.
60895 */
60896 page = pte_page(pkmap_page_table[i]);
60897 + pax_open_kernel();
60898 pte_clear(&init_mm, (unsigned long)page_address(page),
60899 &pkmap_page_table[i]);
60900 -
60901 + pax_close_kernel();
60902 set_page_address(page, NULL);
60903 need_flush = 1;
60904 }
60905 @@ -186,9 +187,11 @@ start:
60906 }
60907 }
60908 vaddr = PKMAP_ADDR(last_pkmap_nr);
60909 +
60910 + pax_open_kernel();
60911 set_pte_at(&init_mm, vaddr,
60912 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
60913 -
60914 + pax_close_kernel();
60915 pkmap_count[last_pkmap_nr] = 1;
60916 set_page_address(page, (void *)vaddr);
60917
60918 diff -urNp linux-2.6.39.4/mm/huge_memory.c linux-2.6.39.4/mm/huge_memory.c
60919 --- linux-2.6.39.4/mm/huge_memory.c 2011-05-19 00:06:34.000000000 -0400
60920 +++ linux-2.6.39.4/mm/huge_memory.c 2011-08-05 19:44:37.000000000 -0400
60921 @@ -702,7 +702,7 @@ out:
60922 * run pte_offset_map on the pmd, if an huge pmd could
60923 * materialize from under us from a different thread.
60924 */
60925 - if (unlikely(__pte_alloc(mm, vma, pmd, address)))
60926 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
60927 return VM_FAULT_OOM;
60928 /* if an huge pmd materialized from under us just retry later */
60929 if (unlikely(pmd_trans_huge(*pmd)))
60930 diff -urNp linux-2.6.39.4/mm/hugetlb.c linux-2.6.39.4/mm/hugetlb.c
60931 --- linux-2.6.39.4/mm/hugetlb.c 2011-07-09 09:18:51.000000000 -0400
60932 +++ linux-2.6.39.4/mm/hugetlb.c 2011-08-05 19:44:37.000000000 -0400
60933 @@ -2339,6 +2339,27 @@ static int unmap_ref_private(struct mm_s
60934 return 1;
60935 }
60936
60937 +#ifdef CONFIG_PAX_SEGMEXEC
60938 +static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
60939 +{
60940 + struct mm_struct *mm = vma->vm_mm;
60941 + struct vm_area_struct *vma_m;
60942 + unsigned long address_m;
60943 + pte_t *ptep_m;
60944 +
60945 + vma_m = pax_find_mirror_vma(vma);
60946 + if (!vma_m)
60947 + return;
60948 +
60949 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
60950 + address_m = address + SEGMEXEC_TASK_SIZE;
60951 + ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
60952 + get_page(page_m);
60953 + hugepage_add_anon_rmap(page_m, vma_m, address_m);
60954 + set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
60955 +}
60956 +#endif
60957 +
60958 /*
60959 * Hugetlb_cow() should be called with page lock of the original hugepage held.
60960 */
60961 @@ -2440,6 +2461,11 @@ retry_avoidcopy:
60962 make_huge_pte(vma, new_page, 1));
60963 page_remove_rmap(old_page);
60964 hugepage_add_new_anon_rmap(new_page, vma, address);
60965 +
60966 +#ifdef CONFIG_PAX_SEGMEXEC
60967 + pax_mirror_huge_pte(vma, address, new_page);
60968 +#endif
60969 +
60970 /* Make the old page be freed below */
60971 new_page = old_page;
60972 mmu_notifier_invalidate_range_end(mm,
60973 @@ -2591,6 +2617,10 @@ retry:
60974 && (vma->vm_flags & VM_SHARED)));
60975 set_huge_pte_at(mm, address, ptep, new_pte);
60976
60977 +#ifdef CONFIG_PAX_SEGMEXEC
60978 + pax_mirror_huge_pte(vma, address, page);
60979 +#endif
60980 +
60981 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
60982 /* Optimization, do the COW without a second fault */
60983 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
60984 @@ -2620,6 +2650,10 @@ int hugetlb_fault(struct mm_struct *mm,
60985 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
60986 struct hstate *h = hstate_vma(vma);
60987
60988 +#ifdef CONFIG_PAX_SEGMEXEC
60989 + struct vm_area_struct *vma_m;
60990 +#endif
60991 +
60992 ptep = huge_pte_offset(mm, address);
60993 if (ptep) {
60994 entry = huge_ptep_get(ptep);
60995 @@ -2631,6 +2665,26 @@ int hugetlb_fault(struct mm_struct *mm,
60996 VM_FAULT_SET_HINDEX(h - hstates);
60997 }
60998
60999 +#ifdef CONFIG_PAX_SEGMEXEC
61000 + vma_m = pax_find_mirror_vma(vma);
61001 + if (vma_m) {
61002 + unsigned long address_m;
61003 +
61004 + if (vma->vm_start > vma_m->vm_start) {
61005 + address_m = address;
61006 + address -= SEGMEXEC_TASK_SIZE;
61007 + vma = vma_m;
61008 + h = hstate_vma(vma);
61009 + } else
61010 + address_m = address + SEGMEXEC_TASK_SIZE;
61011 +
61012 + if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
61013 + return VM_FAULT_OOM;
61014 + address_m &= HPAGE_MASK;
61015 + unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
61016 + }
61017 +#endif
61018 +
61019 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
61020 if (!ptep)
61021 return VM_FAULT_OOM;
61022 diff -urNp linux-2.6.39.4/mm/internal.h linux-2.6.39.4/mm/internal.h
61023 --- linux-2.6.39.4/mm/internal.h 2011-05-19 00:06:34.000000000 -0400
61024 +++ linux-2.6.39.4/mm/internal.h 2011-08-05 19:44:37.000000000 -0400
61025 @@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
61026 * in mm/page_alloc.c
61027 */
61028 extern void __free_pages_bootmem(struct page *page, unsigned int order);
61029 +extern void free_compound_page(struct page *page);
61030 extern void prep_compound_page(struct page *page, unsigned long order);
61031 #ifdef CONFIG_MEMORY_FAILURE
61032 extern bool is_free_buddy_page(struct page *page);
61033 diff -urNp linux-2.6.39.4/mm/Kconfig linux-2.6.39.4/mm/Kconfig
61034 --- linux-2.6.39.4/mm/Kconfig 2011-05-19 00:06:34.000000000 -0400
61035 +++ linux-2.6.39.4/mm/Kconfig 2011-08-05 19:44:37.000000000 -0400
61036 @@ -240,7 +240,7 @@ config KSM
61037 config DEFAULT_MMAP_MIN_ADDR
61038 int "Low address space to protect from user allocation"
61039 depends on MMU
61040 - default 4096
61041 + default 65536
61042 help
61043 This is the portion of low virtual memory which should be protected
61044 from userspace allocation. Keeping a user from writing to low pages
61045 diff -urNp linux-2.6.39.4/mm/kmemleak.c linux-2.6.39.4/mm/kmemleak.c
61046 --- linux-2.6.39.4/mm/kmemleak.c 2011-06-03 00:04:14.000000000 -0400
61047 +++ linux-2.6.39.4/mm/kmemleak.c 2011-08-05 19:44:37.000000000 -0400
61048 @@ -357,7 +357,7 @@ static void print_unreferenced(struct se
61049
61050 for (i = 0; i < object->trace_len; i++) {
61051 void *ptr = (void *)object->trace[i];
61052 - seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
61053 + seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
61054 }
61055 }
61056
61057 diff -urNp linux-2.6.39.4/mm/maccess.c linux-2.6.39.4/mm/maccess.c
61058 --- linux-2.6.39.4/mm/maccess.c 2011-05-19 00:06:34.000000000 -0400
61059 +++ linux-2.6.39.4/mm/maccess.c 2011-08-05 19:44:37.000000000 -0400
61060 @@ -15,10 +15,10 @@
61061 * happens, handle that and return -EFAULT.
61062 */
61063
61064 -long __weak probe_kernel_read(void *dst, void *src, size_t size)
61065 +long __weak probe_kernel_read(void *dst, const void *src, size_t size)
61066 __attribute__((alias("__probe_kernel_read")));
61067
61068 -long __probe_kernel_read(void *dst, void *src, size_t size)
61069 +long __probe_kernel_read(void *dst, const void *src, size_t size)
61070 {
61071 long ret;
61072 mm_segment_t old_fs = get_fs();
61073 @@ -43,10 +43,10 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
61074 * Safely write to address @dst from the buffer at @src. If a kernel fault
61075 * happens, handle that and return -EFAULT.
61076 */
61077 -long __weak probe_kernel_write(void *dst, void *src, size_t size)
61078 +long __weak probe_kernel_write(void *dst, const void *src, size_t size)
61079 __attribute__((alias("__probe_kernel_write")));
61080
61081 -long __probe_kernel_write(void *dst, void *src, size_t size)
61082 +long __probe_kernel_write(void *dst, const void *src, size_t size)
61083 {
61084 long ret;
61085 mm_segment_t old_fs = get_fs();
61086 diff -urNp linux-2.6.39.4/mm/madvise.c linux-2.6.39.4/mm/madvise.c
61087 --- linux-2.6.39.4/mm/madvise.c 2011-05-19 00:06:34.000000000 -0400
61088 +++ linux-2.6.39.4/mm/madvise.c 2011-08-05 19:44:37.000000000 -0400
61089 @@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a
61090 pgoff_t pgoff;
61091 unsigned long new_flags = vma->vm_flags;
61092
61093 +#ifdef CONFIG_PAX_SEGMEXEC
61094 + struct vm_area_struct *vma_m;
61095 +#endif
61096 +
61097 switch (behavior) {
61098 case MADV_NORMAL:
61099 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
61100 @@ -110,6 +114,13 @@ success:
61101 /*
61102 * vm_flags is protected by the mmap_sem held in write mode.
61103 */
61104 +
61105 +#ifdef CONFIG_PAX_SEGMEXEC
61106 + vma_m = pax_find_mirror_vma(vma);
61107 + if (vma_m)
61108 + vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
61109 +#endif
61110 +
61111 vma->vm_flags = new_flags;
61112
61113 out:
61114 @@ -168,6 +179,11 @@ static long madvise_dontneed(struct vm_a
61115 struct vm_area_struct ** prev,
61116 unsigned long start, unsigned long end)
61117 {
61118 +
61119 +#ifdef CONFIG_PAX_SEGMEXEC
61120 + struct vm_area_struct *vma_m;
61121 +#endif
61122 +
61123 *prev = vma;
61124 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
61125 return -EINVAL;
61126 @@ -180,6 +196,21 @@ static long madvise_dontneed(struct vm_a
61127 zap_page_range(vma, start, end - start, &details);
61128 } else
61129 zap_page_range(vma, start, end - start, NULL);
61130 +
61131 +#ifdef CONFIG_PAX_SEGMEXEC
61132 + vma_m = pax_find_mirror_vma(vma);
61133 + if (vma_m) {
61134 + if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
61135 + struct zap_details details = {
61136 + .nonlinear_vma = vma_m,
61137 + .last_index = ULONG_MAX,
61138 + };
61139 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
61140 + } else
61141 + zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
61142 + }
61143 +#endif
61144 +
61145 return 0;
61146 }
61147
61148 @@ -376,6 +407,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
61149 if (end < start)
61150 goto out;
61151
61152 +#ifdef CONFIG_PAX_SEGMEXEC
61153 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
61154 + if (end > SEGMEXEC_TASK_SIZE)
61155 + goto out;
61156 + } else
61157 +#endif
61158 +
61159 + if (end > TASK_SIZE)
61160 + goto out;
61161 +
61162 error = 0;
61163 if (end == start)
61164 goto out;
61165 diff -urNp linux-2.6.39.4/mm/memory.c linux-2.6.39.4/mm/memory.c
61166 --- linux-2.6.39.4/mm/memory.c 2011-05-19 00:06:34.000000000 -0400
61167 +++ linux-2.6.39.4/mm/memory.c 2011-08-05 19:44:37.000000000 -0400
61168 @@ -259,8 +259,12 @@ static inline void free_pmd_range(struct
61169 return;
61170
61171 pmd = pmd_offset(pud, start);
61172 +
61173 +#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
61174 pud_clear(pud);
61175 pmd_free_tlb(tlb, pmd, start);
61176 +#endif
61177 +
61178 }
61179
61180 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
61181 @@ -291,9 +295,12 @@ static inline void free_pud_range(struct
61182 if (end - 1 > ceiling - 1)
61183 return;
61184
61185 +#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
61186 pud = pud_offset(pgd, start);
61187 pgd_clear(pgd);
61188 pud_free_tlb(tlb, pud, start);
61189 +#endif
61190 +
61191 }
61192
61193 /*
61194 @@ -1410,12 +1417,6 @@ no_page_table:
61195 return page;
61196 }
61197
61198 -static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
61199 -{
61200 - return stack_guard_page_start(vma, addr) ||
61201 - stack_guard_page_end(vma, addr+PAGE_SIZE);
61202 -}
61203 -
61204 /**
61205 * __get_user_pages() - pin user pages in memory
61206 * @tsk: task_struct of target task
61207 @@ -1488,10 +1489,10 @@ int __get_user_pages(struct task_struct
61208 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
61209 i = 0;
61210
61211 - do {
61212 + while (nr_pages) {
61213 struct vm_area_struct *vma;
61214
61215 - vma = find_extend_vma(mm, start);
61216 + vma = find_vma(mm, start);
61217 if (!vma && in_gate_area(mm, start)) {
61218 unsigned long pg = start & PAGE_MASK;
61219 pgd_t *pgd;
61220 @@ -1539,7 +1540,7 @@ int __get_user_pages(struct task_struct
61221 goto next_page;
61222 }
61223
61224 - if (!vma ||
61225 + if (!vma || start < vma->vm_start ||
61226 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
61227 !(vm_flags & vma->vm_flags))
61228 return i ? : -EFAULT;
61229 @@ -1566,11 +1567,6 @@ int __get_user_pages(struct task_struct
61230 int ret;
61231 unsigned int fault_flags = 0;
61232
61233 - /* For mlock, just skip the stack guard page. */
61234 - if (foll_flags & FOLL_MLOCK) {
61235 - if (stack_guard_page(vma, start))
61236 - goto next_page;
61237 - }
61238 if (foll_flags & FOLL_WRITE)
61239 fault_flags |= FAULT_FLAG_WRITE;
61240 if (nonblocking)
61241 @@ -1644,7 +1640,7 @@ next_page:
61242 start += PAGE_SIZE;
61243 nr_pages--;
61244 } while (nr_pages && start < vma->vm_end);
61245 - } while (nr_pages);
61246 + }
61247 return i;
61248 }
61249 EXPORT_SYMBOL(__get_user_pages);
61250 @@ -1795,6 +1791,10 @@ static int insert_page(struct vm_area_st
61251 page_add_file_rmap(page);
61252 set_pte_at(mm, addr, pte, mk_pte(page, prot));
61253
61254 +#ifdef CONFIG_PAX_SEGMEXEC
61255 + pax_mirror_file_pte(vma, addr, page, ptl);
61256 +#endif
61257 +
61258 retval = 0;
61259 pte_unmap_unlock(pte, ptl);
61260 return retval;
61261 @@ -1829,10 +1829,22 @@ out:
61262 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
61263 struct page *page)
61264 {
61265 +
61266 +#ifdef CONFIG_PAX_SEGMEXEC
61267 + struct vm_area_struct *vma_m;
61268 +#endif
61269 +
61270 if (addr < vma->vm_start || addr >= vma->vm_end)
61271 return -EFAULT;
61272 if (!page_count(page))
61273 return -EINVAL;
61274 +
61275 +#ifdef CONFIG_PAX_SEGMEXEC
61276 + vma_m = pax_find_mirror_vma(vma);
61277 + if (vma_m)
61278 + vma_m->vm_flags |= VM_INSERTPAGE;
61279 +#endif
61280 +
61281 vma->vm_flags |= VM_INSERTPAGE;
61282 return insert_page(vma, addr, page, vma->vm_page_prot);
61283 }
61284 @@ -1918,6 +1930,7 @@ int vm_insert_mixed(struct vm_area_struc
61285 unsigned long pfn)
61286 {
61287 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
61288 + BUG_ON(vma->vm_mirror);
61289
61290 if (addr < vma->vm_start || addr >= vma->vm_end)
61291 return -EFAULT;
61292 @@ -2233,6 +2246,186 @@ static inline void cow_user_page(struct
61293 copy_user_highpage(dst, src, va, vma);
61294 }
61295
61296 +#ifdef CONFIG_PAX_SEGMEXEC
61297 +static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
61298 +{
61299 + struct mm_struct *mm = vma->vm_mm;
61300 + spinlock_t *ptl;
61301 + pte_t *pte, entry;
61302 +
61303 + pte = pte_offset_map_lock(mm, pmd, address, &ptl);
61304 + entry = *pte;
61305 + if (!pte_present(entry)) {
61306 + if (!pte_none(entry)) {
61307 + BUG_ON(pte_file(entry));
61308 + free_swap_and_cache(pte_to_swp_entry(entry));
61309 + pte_clear_not_present_full(mm, address, pte, 0);
61310 + }
61311 + } else {
61312 + struct page *page;
61313 +
61314 + flush_cache_page(vma, address, pte_pfn(entry));
61315 + entry = ptep_clear_flush(vma, address, pte);
61316 + BUG_ON(pte_dirty(entry));
61317 + page = vm_normal_page(vma, address, entry);
61318 + if (page) {
61319 + update_hiwater_rss(mm);
61320 + if (PageAnon(page))
61321 + dec_mm_counter_fast(mm, MM_ANONPAGES);
61322 + else
61323 + dec_mm_counter_fast(mm, MM_FILEPAGES);
61324 + page_remove_rmap(page);
61325 + page_cache_release(page);
61326 + }
61327 + }
61328 + pte_unmap_unlock(pte, ptl);
61329 +}
61330 +
61331 +/* PaX: if vma is mirrored, synchronize the mirror's PTE
61332 + *
61333 + * the ptl of the lower mapped page is held on entry and is not released on exit
61334 + * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
61335 + */
61336 +static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
61337 +{
61338 + struct mm_struct *mm = vma->vm_mm;
61339 + unsigned long address_m;
61340 + spinlock_t *ptl_m;
61341 + struct vm_area_struct *vma_m;
61342 + pmd_t *pmd_m;
61343 + pte_t *pte_m, entry_m;
61344 +
61345 + BUG_ON(!page_m || !PageAnon(page_m));
61346 +
61347 + vma_m = pax_find_mirror_vma(vma);
61348 + if (!vma_m)
61349 + return;
61350 +
61351 + BUG_ON(!PageLocked(page_m));
61352 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61353 + address_m = address + SEGMEXEC_TASK_SIZE;
61354 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61355 + pte_m = pte_offset_map(pmd_m, address_m);
61356 + ptl_m = pte_lockptr(mm, pmd_m);
61357 + if (ptl != ptl_m) {
61358 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61359 + if (!pte_none(*pte_m))
61360 + goto out;
61361 + }
61362 +
61363 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
61364 + page_cache_get(page_m);
61365 + page_add_anon_rmap(page_m, vma_m, address_m);
61366 + inc_mm_counter_fast(mm, MM_ANONPAGES);
61367 + set_pte_at(mm, address_m, pte_m, entry_m);
61368 + update_mmu_cache(vma_m, address_m, entry_m);
61369 +out:
61370 + if (ptl != ptl_m)
61371 + spin_unlock(ptl_m);
61372 + pte_unmap(pte_m);
61373 + unlock_page(page_m);
61374 +}
61375 +
61376 +void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
61377 +{
61378 + struct mm_struct *mm = vma->vm_mm;
61379 + unsigned long address_m;
61380 + spinlock_t *ptl_m;
61381 + struct vm_area_struct *vma_m;
61382 + pmd_t *pmd_m;
61383 + pte_t *pte_m, entry_m;
61384 +
61385 + BUG_ON(!page_m || PageAnon(page_m));
61386 +
61387 + vma_m = pax_find_mirror_vma(vma);
61388 + if (!vma_m)
61389 + return;
61390 +
61391 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61392 + address_m = address + SEGMEXEC_TASK_SIZE;
61393 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61394 + pte_m = pte_offset_map(pmd_m, address_m);
61395 + ptl_m = pte_lockptr(mm, pmd_m);
61396 + if (ptl != ptl_m) {
61397 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61398 + if (!pte_none(*pte_m))
61399 + goto out;
61400 + }
61401 +
61402 + entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
61403 + page_cache_get(page_m);
61404 + page_add_file_rmap(page_m);
61405 + inc_mm_counter_fast(mm, MM_FILEPAGES);
61406 + set_pte_at(mm, address_m, pte_m, entry_m);
61407 + update_mmu_cache(vma_m, address_m, entry_m);
61408 +out:
61409 + if (ptl != ptl_m)
61410 + spin_unlock(ptl_m);
61411 + pte_unmap(pte_m);
61412 +}
61413 +
61414 +static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
61415 +{
61416 + struct mm_struct *mm = vma->vm_mm;
61417 + unsigned long address_m;
61418 + spinlock_t *ptl_m;
61419 + struct vm_area_struct *vma_m;
61420 + pmd_t *pmd_m;
61421 + pte_t *pte_m, entry_m;
61422 +
61423 + vma_m = pax_find_mirror_vma(vma);
61424 + if (!vma_m)
61425 + return;
61426 +
61427 + BUG_ON(address >= SEGMEXEC_TASK_SIZE);
61428 + address_m = address + SEGMEXEC_TASK_SIZE;
61429 + pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
61430 + pte_m = pte_offset_map(pmd_m, address_m);
61431 + ptl_m = pte_lockptr(mm, pmd_m);
61432 + if (ptl != ptl_m) {
61433 + spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
61434 + if (!pte_none(*pte_m))
61435 + goto out;
61436 + }
61437 +
61438 + entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
61439 + set_pte_at(mm, address_m, pte_m, entry_m);
61440 +out:
61441 + if (ptl != ptl_m)
61442 + spin_unlock(ptl_m);
61443 + pte_unmap(pte_m);
61444 +}
61445 +
61446 +static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
61447 +{
61448 + struct page *page_m;
61449 + pte_t entry;
61450 +
61451 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
61452 + goto out;
61453 +
61454 + entry = *pte;
61455 + page_m = vm_normal_page(vma, address, entry);
61456 + if (!page_m)
61457 + pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
61458 + else if (PageAnon(page_m)) {
61459 + if (pax_find_mirror_vma(vma)) {
61460 + pte_unmap_unlock(pte, ptl);
61461 + lock_page(page_m);
61462 + pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
61463 + if (pte_same(entry, *pte))
61464 + pax_mirror_anon_pte(vma, address, page_m, ptl);
61465 + else
61466 + unlock_page(page_m);
61467 + }
61468 + } else
61469 + pax_mirror_file_pte(vma, address, page_m, ptl);
61470 +
61471 +out:
61472 + pte_unmap_unlock(pte, ptl);
61473 +}
61474 +#endif
61475 +
61476 /*
61477 * This routine handles present pages, when users try to write
61478 * to a shared page. It is done by copying the page to a new address
61479 @@ -2444,6 +2637,12 @@ gotten:
61480 */
61481 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
61482 if (likely(pte_same(*page_table, orig_pte))) {
61483 +
61484 +#ifdef CONFIG_PAX_SEGMEXEC
61485 + if (pax_find_mirror_vma(vma))
61486 + BUG_ON(!trylock_page(new_page));
61487 +#endif
61488 +
61489 if (old_page) {
61490 if (!PageAnon(old_page)) {
61491 dec_mm_counter_fast(mm, MM_FILEPAGES);
61492 @@ -2495,6 +2694,10 @@ gotten:
61493 page_remove_rmap(old_page);
61494 }
61495
61496 +#ifdef CONFIG_PAX_SEGMEXEC
61497 + pax_mirror_anon_pte(vma, address, new_page, ptl);
61498 +#endif
61499 +
61500 /* Free the old page.. */
61501 new_page = old_page;
61502 ret |= VM_FAULT_WRITE;
61503 @@ -2905,6 +3108,11 @@ static int do_swap_page(struct mm_struct
61504 swap_free(entry);
61505 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
61506 try_to_free_swap(page);
61507 +
61508 +#ifdef CONFIG_PAX_SEGMEXEC
61509 + if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
61510 +#endif
61511 +
61512 unlock_page(page);
61513 if (swapcache) {
61514 /*
61515 @@ -2928,6 +3136,11 @@ static int do_swap_page(struct mm_struct
61516
61517 /* No need to invalidate - it was non-present before */
61518 update_mmu_cache(vma, address, page_table);
61519 +
61520 +#ifdef CONFIG_PAX_SEGMEXEC
61521 + pax_mirror_anon_pte(vma, address, page, ptl);
61522 +#endif
61523 +
61524 unlock:
61525 pte_unmap_unlock(page_table, ptl);
61526 out:
61527 @@ -2947,40 +3160,6 @@ out_release:
61528 }
61529
61530 /*
61531 - * This is like a special single-page "expand_{down|up}wards()",
61532 - * except we must first make sure that 'address{-|+}PAGE_SIZE'
61533 - * doesn't hit another vma.
61534 - */
61535 -static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
61536 -{
61537 - address &= PAGE_MASK;
61538 - if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
61539 - struct vm_area_struct *prev = vma->vm_prev;
61540 -
61541 - /*
61542 - * Is there a mapping abutting this one below?
61543 - *
61544 - * That's only ok if it's the same stack mapping
61545 - * that has gotten split..
61546 - */
61547 - if (prev && prev->vm_end == address)
61548 - return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
61549 -
61550 - expand_stack(vma, address - PAGE_SIZE);
61551 - }
61552 - if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
61553 - struct vm_area_struct *next = vma->vm_next;
61554 -
61555 - /* As VM_GROWSDOWN but s/below/above/ */
61556 - if (next && next->vm_start == address + PAGE_SIZE)
61557 - return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
61558 -
61559 - expand_upwards(vma, address + PAGE_SIZE);
61560 - }
61561 - return 0;
61562 -}
61563 -
61564 -/*
61565 * We enter with non-exclusive mmap_sem (to exclude vma changes,
61566 * but allow concurrent faults), and pte mapped but not yet locked.
61567 * We return with mmap_sem still held, but pte unmapped and unlocked.
61568 @@ -2989,27 +3168,23 @@ static int do_anonymous_page(struct mm_s
61569 unsigned long address, pte_t *page_table, pmd_t *pmd,
61570 unsigned int flags)
61571 {
61572 - struct page *page;
61573 + struct page *page = NULL;
61574 spinlock_t *ptl;
61575 pte_t entry;
61576
61577 - pte_unmap(page_table);
61578 -
61579 - /* Check if we need to add a guard page to the stack */
61580 - if (check_stack_guard_page(vma, address) < 0)
61581 - return VM_FAULT_SIGBUS;
61582 -
61583 - /* Use the zero-page for reads */
61584 if (!(flags & FAULT_FLAG_WRITE)) {
61585 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
61586 vma->vm_page_prot));
61587 - page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
61588 + ptl = pte_lockptr(mm, pmd);
61589 + spin_lock(ptl);
61590 if (!pte_none(*page_table))
61591 goto unlock;
61592 goto setpte;
61593 }
61594
61595 /* Allocate our own private page. */
61596 + pte_unmap(page_table);
61597 +
61598 if (unlikely(anon_vma_prepare(vma)))
61599 goto oom;
61600 page = alloc_zeroed_user_highpage_movable(vma, address);
61601 @@ -3028,6 +3203,11 @@ static int do_anonymous_page(struct mm_s
61602 if (!pte_none(*page_table))
61603 goto release;
61604
61605 +#ifdef CONFIG_PAX_SEGMEXEC
61606 + if (pax_find_mirror_vma(vma))
61607 + BUG_ON(!trylock_page(page));
61608 +#endif
61609 +
61610 inc_mm_counter_fast(mm, MM_ANONPAGES);
61611 page_add_new_anon_rmap(page, vma, address);
61612 setpte:
61613 @@ -3035,6 +3215,12 @@ setpte:
61614
61615 /* No need to invalidate - it was non-present before */
61616 update_mmu_cache(vma, address, page_table);
61617 +
61618 +#ifdef CONFIG_PAX_SEGMEXEC
61619 + if (page)
61620 + pax_mirror_anon_pte(vma, address, page, ptl);
61621 +#endif
61622 +
61623 unlock:
61624 pte_unmap_unlock(page_table, ptl);
61625 return 0;
61626 @@ -3172,6 +3358,12 @@ static int __do_fault(struct mm_struct *
61627 */
61628 /* Only go through if we didn't race with anybody else... */
61629 if (likely(pte_same(*page_table, orig_pte))) {
61630 +
61631 +#ifdef CONFIG_PAX_SEGMEXEC
61632 + if (anon && pax_find_mirror_vma(vma))
61633 + BUG_ON(!trylock_page(page));
61634 +#endif
61635 +
61636 flush_icache_page(vma, page);
61637 entry = mk_pte(page, vma->vm_page_prot);
61638 if (flags & FAULT_FLAG_WRITE)
61639 @@ -3191,6 +3383,14 @@ static int __do_fault(struct mm_struct *
61640
61641 /* no need to invalidate: a not-present page won't be cached */
61642 update_mmu_cache(vma, address, page_table);
61643 +
61644 +#ifdef CONFIG_PAX_SEGMEXEC
61645 + if (anon)
61646 + pax_mirror_anon_pte(vma, address, page, ptl);
61647 + else
61648 + pax_mirror_file_pte(vma, address, page, ptl);
61649 +#endif
61650 +
61651 } else {
61652 if (charged)
61653 mem_cgroup_uncharge_page(page);
61654 @@ -3338,6 +3538,12 @@ int handle_pte_fault(struct mm_struct *m
61655 if (flags & FAULT_FLAG_WRITE)
61656 flush_tlb_fix_spurious_fault(vma, address);
61657 }
61658 +
61659 +#ifdef CONFIG_PAX_SEGMEXEC
61660 + pax_mirror_pte(vma, address, pte, pmd, ptl);
61661 + return 0;
61662 +#endif
61663 +
61664 unlock:
61665 pte_unmap_unlock(pte, ptl);
61666 return 0;
61667 @@ -3354,6 +3560,10 @@ int handle_mm_fault(struct mm_struct *mm
61668 pmd_t *pmd;
61669 pte_t *pte;
61670
61671 +#ifdef CONFIG_PAX_SEGMEXEC
61672 + struct vm_area_struct *vma_m;
61673 +#endif
61674 +
61675 __set_current_state(TASK_RUNNING);
61676
61677 count_vm_event(PGFAULT);
61678 @@ -3364,6 +3574,34 @@ int handle_mm_fault(struct mm_struct *mm
61679 if (unlikely(is_vm_hugetlb_page(vma)))
61680 return hugetlb_fault(mm, vma, address, flags);
61681
61682 +#ifdef CONFIG_PAX_SEGMEXEC
61683 + vma_m = pax_find_mirror_vma(vma);
61684 + if (vma_m) {
61685 + unsigned long address_m;
61686 + pgd_t *pgd_m;
61687 + pud_t *pud_m;
61688 + pmd_t *pmd_m;
61689 +
61690 + if (vma->vm_start > vma_m->vm_start) {
61691 + address_m = address;
61692 + address -= SEGMEXEC_TASK_SIZE;
61693 + vma = vma_m;
61694 + } else
61695 + address_m = address + SEGMEXEC_TASK_SIZE;
61696 +
61697 + pgd_m = pgd_offset(mm, address_m);
61698 + pud_m = pud_alloc(mm, pgd_m, address_m);
61699 + if (!pud_m)
61700 + return VM_FAULT_OOM;
61701 + pmd_m = pmd_alloc(mm, pud_m, address_m);
61702 + if (!pmd_m)
61703 + return VM_FAULT_OOM;
61704 + if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
61705 + return VM_FAULT_OOM;
61706 + pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
61707 + }
61708 +#endif
61709 +
61710 pgd = pgd_offset(mm, address);
61711 pud = pud_alloc(mm, pgd, address);
61712 if (!pud)
61713 @@ -3393,7 +3631,7 @@ int handle_mm_fault(struct mm_struct *mm
61714 * run pte_offset_map on the pmd, if an huge pmd could
61715 * materialize from under us from a different thread.
61716 */
61717 - if (unlikely(pmd_none(*pmd)) && __pte_alloc(mm, vma, pmd, address))
61718 + if (unlikely(pmd_none(*pmd) && __pte_alloc(mm, vma, pmd, address)))
61719 return VM_FAULT_OOM;
61720 /* if an huge pmd materialized from under us just retry later */
61721 if (unlikely(pmd_trans_huge(*pmd)))
61722 @@ -3497,7 +3735,7 @@ static int __init gate_vma_init(void)
61723 gate_vma.vm_start = FIXADDR_USER_START;
61724 gate_vma.vm_end = FIXADDR_USER_END;
61725 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
61726 - gate_vma.vm_page_prot = __P101;
61727 + gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
61728 /*
61729 * Make sure the vDSO gets into every core dump.
61730 * Dumping its contents makes post-mortem fully interpretable later
61731 diff -urNp linux-2.6.39.4/mm/memory-failure.c linux-2.6.39.4/mm/memory-failure.c
61732 --- linux-2.6.39.4/mm/memory-failure.c 2011-07-09 09:18:51.000000000 -0400
61733 +++ linux-2.6.39.4/mm/memory-failure.c 2011-08-05 19:44:37.000000000 -0400
61734 @@ -59,7 +59,7 @@ int sysctl_memory_failure_early_kill __r
61735
61736 int sysctl_memory_failure_recovery __read_mostly = 1;
61737
61738 -atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
61739 +atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
61740
61741 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
61742
61743 @@ -1013,7 +1013,7 @@ int __memory_failure(unsigned long pfn,
61744 }
61745
61746 nr_pages = 1 << compound_trans_order(hpage);
61747 - atomic_long_add(nr_pages, &mce_bad_pages);
61748 + atomic_long_add_unchecked(nr_pages, &mce_bad_pages);
61749
61750 /*
61751 * We need/can do nothing about count=0 pages.
61752 @@ -1043,7 +1043,7 @@ int __memory_failure(unsigned long pfn,
61753 if (!PageHWPoison(hpage)
61754 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
61755 || (p != hpage && TestSetPageHWPoison(hpage))) {
61756 - atomic_long_sub(nr_pages, &mce_bad_pages);
61757 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61758 return 0;
61759 }
61760 set_page_hwpoison_huge_page(hpage);
61761 @@ -1101,7 +1101,7 @@ int __memory_failure(unsigned long pfn,
61762 }
61763 if (hwpoison_filter(p)) {
61764 if (TestClearPageHWPoison(p))
61765 - atomic_long_sub(nr_pages, &mce_bad_pages);
61766 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61767 unlock_page(hpage);
61768 put_page(hpage);
61769 return 0;
61770 @@ -1227,7 +1227,7 @@ int unpoison_memory(unsigned long pfn)
61771 return 0;
61772 }
61773 if (TestClearPageHWPoison(p))
61774 - atomic_long_sub(nr_pages, &mce_bad_pages);
61775 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61776 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
61777 return 0;
61778 }
61779 @@ -1241,7 +1241,7 @@ int unpoison_memory(unsigned long pfn)
61780 */
61781 if (TestClearPageHWPoison(page)) {
61782 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
61783 - atomic_long_sub(nr_pages, &mce_bad_pages);
61784 + atomic_long_sub_unchecked(nr_pages, &mce_bad_pages);
61785 freeit = 1;
61786 if (PageHuge(page))
61787 clear_page_hwpoison_huge_page(page);
61788 @@ -1354,7 +1354,7 @@ static int soft_offline_huge_page(struct
61789 }
61790 done:
61791 if (!PageHWPoison(hpage))
61792 - atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages);
61793 + atomic_long_add_unchecked(1 << compound_trans_order(hpage), &mce_bad_pages);
61794 set_page_hwpoison_huge_page(hpage);
61795 dequeue_hwpoisoned_huge_page(hpage);
61796 /* keep elevated page count for bad page */
61797 @@ -1484,7 +1484,7 @@ int soft_offline_page(struct page *page,
61798 return ret;
61799
61800 done:
61801 - atomic_long_add(1, &mce_bad_pages);
61802 + atomic_long_add_unchecked(1, &mce_bad_pages);
61803 SetPageHWPoison(page);
61804 /* keep elevated page count for bad page */
61805 return ret;
61806 diff -urNp linux-2.6.39.4/mm/mempolicy.c linux-2.6.39.4/mm/mempolicy.c
61807 --- linux-2.6.39.4/mm/mempolicy.c 2011-05-19 00:06:34.000000000 -0400
61808 +++ linux-2.6.39.4/mm/mempolicy.c 2011-08-05 19:44:37.000000000 -0400
61809 @@ -643,6 +643,10 @@ static int mbind_range(struct mm_struct
61810 unsigned long vmstart;
61811 unsigned long vmend;
61812
61813 +#ifdef CONFIG_PAX_SEGMEXEC
61814 + struct vm_area_struct *vma_m;
61815 +#endif
61816 +
61817 vma = find_vma_prev(mm, start, &prev);
61818 if (!vma || vma->vm_start > start)
61819 return -EFAULT;
61820 @@ -673,6 +677,16 @@ static int mbind_range(struct mm_struct
61821 err = policy_vma(vma, new_pol);
61822 if (err)
61823 goto out;
61824 +
61825 +#ifdef CONFIG_PAX_SEGMEXEC
61826 + vma_m = pax_find_mirror_vma(vma);
61827 + if (vma_m) {
61828 + err = policy_vma(vma_m, new_pol);
61829 + if (err)
61830 + goto out;
61831 + }
61832 +#endif
61833 +
61834 }
61835
61836 out:
61837 @@ -1106,6 +1120,17 @@ static long do_mbind(unsigned long start
61838
61839 if (end < start)
61840 return -EINVAL;
61841 +
61842 +#ifdef CONFIG_PAX_SEGMEXEC
61843 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
61844 + if (end > SEGMEXEC_TASK_SIZE)
61845 + return -EINVAL;
61846 + } else
61847 +#endif
61848 +
61849 + if (end > TASK_SIZE)
61850 + return -EINVAL;
61851 +
61852 if (end == start)
61853 return 0;
61854
61855 @@ -1324,6 +1349,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
61856 if (!mm)
61857 goto out;
61858
61859 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61860 + if (mm != current->mm &&
61861 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
61862 + err = -EPERM;
61863 + goto out;
61864 + }
61865 +#endif
61866 +
61867 /*
61868 * Check if this process has the right to modify the specified
61869 * process. The right exists if the process has administrative
61870 @@ -1333,8 +1366,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
61871 rcu_read_lock();
61872 tcred = __task_cred(task);
61873 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
61874 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
61875 - !capable(CAP_SYS_NICE)) {
61876 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
61877 rcu_read_unlock();
61878 err = -EPERM;
61879 goto out;
61880 @@ -2634,7 +2666,7 @@ int show_numa_map(struct seq_file *m, vo
61881
61882 if (file) {
61883 seq_printf(m, " file=");
61884 - seq_path(m, &file->f_path, "\n\t= ");
61885 + seq_path(m, &file->f_path, "\n\t\\= ");
61886 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
61887 seq_printf(m, " heap");
61888 } else if (vma->vm_start <= mm->start_stack &&
61889 diff -urNp linux-2.6.39.4/mm/migrate.c linux-2.6.39.4/mm/migrate.c
61890 --- linux-2.6.39.4/mm/migrate.c 2011-07-09 09:18:51.000000000 -0400
61891 +++ linux-2.6.39.4/mm/migrate.c 2011-08-05 19:44:37.000000000 -0400
61892 @@ -1133,6 +1133,8 @@ static int do_pages_move(struct mm_struc
61893 unsigned long chunk_start;
61894 int err;
61895
61896 + pax_track_stack();
61897 +
61898 task_nodes = cpuset_mems_allowed(task);
61899
61900 err = -ENOMEM;
61901 @@ -1317,6 +1319,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
61902 if (!mm)
61903 return -EINVAL;
61904
61905 +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
61906 + if (mm != current->mm &&
61907 + (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
61908 + err = -EPERM;
61909 + goto out;
61910 + }
61911 +#endif
61912 +
61913 /*
61914 * Check if this process has the right to modify the specified
61915 * process. The right exists if the process has administrative
61916 @@ -1326,8 +1336,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
61917 rcu_read_lock();
61918 tcred = __task_cred(task);
61919 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
61920 - cred->uid != tcred->suid && cred->uid != tcred->uid &&
61921 - !capable(CAP_SYS_NICE)) {
61922 + cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
61923 rcu_read_unlock();
61924 err = -EPERM;
61925 goto out;
61926 diff -urNp linux-2.6.39.4/mm/mlock.c linux-2.6.39.4/mm/mlock.c
61927 --- linux-2.6.39.4/mm/mlock.c 2011-05-19 00:06:34.000000000 -0400
61928 +++ linux-2.6.39.4/mm/mlock.c 2011-08-05 19:44:37.000000000 -0400
61929 @@ -13,6 +13,7 @@
61930 #include <linux/pagemap.h>
61931 #include <linux/mempolicy.h>
61932 #include <linux/syscalls.h>
61933 +#include <linux/security.h>
61934 #include <linux/sched.h>
61935 #include <linux/module.h>
61936 #include <linux/rmap.h>
61937 @@ -377,6 +378,9 @@ static int do_mlock(unsigned long start,
61938 return -EINVAL;
61939 if (end == start)
61940 return 0;
61941 + if (end > TASK_SIZE)
61942 + return -EINVAL;
61943 +
61944 vma = find_vma_prev(current->mm, start, &prev);
61945 if (!vma || vma->vm_start > start)
61946 return -ENOMEM;
61947 @@ -387,6 +391,11 @@ static int do_mlock(unsigned long start,
61948 for (nstart = start ; ; ) {
61949 unsigned int newflags;
61950
61951 +#ifdef CONFIG_PAX_SEGMEXEC
61952 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
61953 + break;
61954 +#endif
61955 +
61956 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
61957
61958 newflags = vma->vm_flags | VM_LOCKED;
61959 @@ -492,6 +501,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
61960 lock_limit >>= PAGE_SHIFT;
61961
61962 /* check against resource limits */
61963 + gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
61964 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
61965 error = do_mlock(start, len, 1);
61966 up_write(&current->mm->mmap_sem);
61967 @@ -515,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
61968 static int do_mlockall(int flags)
61969 {
61970 struct vm_area_struct * vma, * prev = NULL;
61971 - unsigned int def_flags = 0;
61972
61973 if (flags & MCL_FUTURE)
61974 - def_flags = VM_LOCKED;
61975 - current->mm->def_flags = def_flags;
61976 + current->mm->def_flags |= VM_LOCKED;
61977 + else
61978 + current->mm->def_flags &= ~VM_LOCKED;
61979 if (flags == MCL_FUTURE)
61980 goto out;
61981
61982 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
61983 - unsigned int newflags;
61984 + unsigned long newflags;
61985 +
61986 +#ifdef CONFIG_PAX_SEGMEXEC
61987 + if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
61988 + break;
61989 +#endif
61990
61991 + BUG_ON(vma->vm_end > TASK_SIZE);
61992 newflags = vma->vm_flags | VM_LOCKED;
61993 if (!(flags & MCL_CURRENT))
61994 newflags &= ~VM_LOCKED;
61995 @@ -557,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
61996 lock_limit >>= PAGE_SHIFT;
61997
61998 ret = -ENOMEM;
61999 + gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
62000 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
62001 capable(CAP_IPC_LOCK))
62002 ret = do_mlockall(flags);
62003 diff -urNp linux-2.6.39.4/mm/mmap.c linux-2.6.39.4/mm/mmap.c
62004 --- linux-2.6.39.4/mm/mmap.c 2011-05-19 00:06:34.000000000 -0400
62005 +++ linux-2.6.39.4/mm/mmap.c 2011-08-05 20:34:06.000000000 -0400
62006 @@ -46,6 +46,16 @@
62007 #define arch_rebalance_pgtables(addr, len) (addr)
62008 #endif
62009
62010 +static inline void verify_mm_writelocked(struct mm_struct *mm)
62011 +{
62012 +#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
62013 + if (unlikely(down_read_trylock(&mm->mmap_sem))) {
62014 + up_read(&mm->mmap_sem);
62015 + BUG();
62016 + }
62017 +#endif
62018 +}
62019 +
62020 static void unmap_region(struct mm_struct *mm,
62021 struct vm_area_struct *vma, struct vm_area_struct *prev,
62022 unsigned long start, unsigned long end);
62023 @@ -71,22 +81,32 @@ static void unmap_region(struct mm_struc
62024 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
62025 *
62026 */
62027 -pgprot_t protection_map[16] = {
62028 +pgprot_t protection_map[16] __read_only = {
62029 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
62030 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
62031 };
62032
62033 pgprot_t vm_get_page_prot(unsigned long vm_flags)
62034 {
62035 - return __pgprot(pgprot_val(protection_map[vm_flags &
62036 + pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
62037 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
62038 pgprot_val(arch_vm_get_page_prot(vm_flags)));
62039 +
62040 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62041 + if (!(__supported_pte_mask & _PAGE_NX) &&
62042 + (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
62043 + (vm_flags & (VM_READ | VM_WRITE)))
62044 + prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
62045 +#endif
62046 +
62047 + return prot;
62048 }
62049 EXPORT_SYMBOL(vm_get_page_prot);
62050
62051 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
62052 int sysctl_overcommit_ratio = 50; /* default is 50% */
62053 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
62054 +unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
62055 struct percpu_counter vm_committed_as;
62056
62057 /*
62058 @@ -232,6 +252,7 @@ static struct vm_area_struct *remove_vma
62059 struct vm_area_struct *next = vma->vm_next;
62060
62061 might_sleep();
62062 + BUG_ON(vma->vm_mirror);
62063 if (vma->vm_ops && vma->vm_ops->close)
62064 vma->vm_ops->close(vma);
62065 if (vma->vm_file) {
62066 @@ -276,6 +297,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
62067 * not page aligned -Ram Gupta
62068 */
62069 rlim = rlimit(RLIMIT_DATA);
62070 + gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
62071 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
62072 (mm->end_data - mm->start_data) > rlim)
62073 goto out;
62074 @@ -719,6 +741,12 @@ static int
62075 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
62076 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
62077 {
62078 +
62079 +#ifdef CONFIG_PAX_SEGMEXEC
62080 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
62081 + return 0;
62082 +#endif
62083 +
62084 if (is_mergeable_vma(vma, file, vm_flags) &&
62085 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
62086 if (vma->vm_pgoff == vm_pgoff)
62087 @@ -738,6 +766,12 @@ static int
62088 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
62089 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
62090 {
62091 +
62092 +#ifdef CONFIG_PAX_SEGMEXEC
62093 + if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
62094 + return 0;
62095 +#endif
62096 +
62097 if (is_mergeable_vma(vma, file, vm_flags) &&
62098 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
62099 pgoff_t vm_pglen;
62100 @@ -780,13 +814,20 @@ can_vma_merge_after(struct vm_area_struc
62101 struct vm_area_struct *vma_merge(struct mm_struct *mm,
62102 struct vm_area_struct *prev, unsigned long addr,
62103 unsigned long end, unsigned long vm_flags,
62104 - struct anon_vma *anon_vma, struct file *file,
62105 + struct anon_vma *anon_vma, struct file *file,
62106 pgoff_t pgoff, struct mempolicy *policy)
62107 {
62108 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
62109 struct vm_area_struct *area, *next;
62110 int err;
62111
62112 +#ifdef CONFIG_PAX_SEGMEXEC
62113 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
62114 + struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
62115 +
62116 + BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
62117 +#endif
62118 +
62119 /*
62120 * We later require that vma->vm_flags == vm_flags,
62121 * so this tests vma->vm_flags & VM_SPECIAL, too.
62122 @@ -802,6 +843,15 @@ struct vm_area_struct *vma_merge(struct
62123 if (next && next->vm_end == end) /* cases 6, 7, 8 */
62124 next = next->vm_next;
62125
62126 +#ifdef CONFIG_PAX_SEGMEXEC
62127 + if (prev)
62128 + prev_m = pax_find_mirror_vma(prev);
62129 + if (area)
62130 + area_m = pax_find_mirror_vma(area);
62131 + if (next)
62132 + next_m = pax_find_mirror_vma(next);
62133 +#endif
62134 +
62135 /*
62136 * Can it merge with the predecessor?
62137 */
62138 @@ -821,9 +871,24 @@ struct vm_area_struct *vma_merge(struct
62139 /* cases 1, 6 */
62140 err = vma_adjust(prev, prev->vm_start,
62141 next->vm_end, prev->vm_pgoff, NULL);
62142 - } else /* cases 2, 5, 7 */
62143 +
62144 +#ifdef CONFIG_PAX_SEGMEXEC
62145 + if (!err && prev_m)
62146 + err = vma_adjust(prev_m, prev_m->vm_start,
62147 + next_m->vm_end, prev_m->vm_pgoff, NULL);
62148 +#endif
62149 +
62150 + } else { /* cases 2, 5, 7 */
62151 err = vma_adjust(prev, prev->vm_start,
62152 end, prev->vm_pgoff, NULL);
62153 +
62154 +#ifdef CONFIG_PAX_SEGMEXEC
62155 + if (!err && prev_m)
62156 + err = vma_adjust(prev_m, prev_m->vm_start,
62157 + end_m, prev_m->vm_pgoff, NULL);
62158 +#endif
62159 +
62160 + }
62161 if (err)
62162 return NULL;
62163 khugepaged_enter_vma_merge(prev);
62164 @@ -837,12 +902,27 @@ struct vm_area_struct *vma_merge(struct
62165 mpol_equal(policy, vma_policy(next)) &&
62166 can_vma_merge_before(next, vm_flags,
62167 anon_vma, file, pgoff+pglen)) {
62168 - if (prev && addr < prev->vm_end) /* case 4 */
62169 + if (prev && addr < prev->vm_end) { /* case 4 */
62170 err = vma_adjust(prev, prev->vm_start,
62171 addr, prev->vm_pgoff, NULL);
62172 - else /* cases 3, 8 */
62173 +
62174 +#ifdef CONFIG_PAX_SEGMEXEC
62175 + if (!err && prev_m)
62176 + err = vma_adjust(prev_m, prev_m->vm_start,
62177 + addr_m, prev_m->vm_pgoff, NULL);
62178 +#endif
62179 +
62180 + } else { /* cases 3, 8 */
62181 err = vma_adjust(area, addr, next->vm_end,
62182 next->vm_pgoff - pglen, NULL);
62183 +
62184 +#ifdef CONFIG_PAX_SEGMEXEC
62185 + if (!err && area_m)
62186 + err = vma_adjust(area_m, addr_m, next_m->vm_end,
62187 + next_m->vm_pgoff - pglen, NULL);
62188 +#endif
62189 +
62190 + }
62191 if (err)
62192 return NULL;
62193 khugepaged_enter_vma_merge(area);
62194 @@ -958,14 +1038,11 @@ none:
62195 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
62196 struct file *file, long pages)
62197 {
62198 - const unsigned long stack_flags
62199 - = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
62200 -
62201 if (file) {
62202 mm->shared_vm += pages;
62203 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
62204 mm->exec_vm += pages;
62205 - } else if (flags & stack_flags)
62206 + } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
62207 mm->stack_vm += pages;
62208 if (flags & (VM_RESERVED|VM_IO))
62209 mm->reserved_vm += pages;
62210 @@ -992,7 +1069,7 @@ unsigned long do_mmap_pgoff(struct file
62211 * (the exception is when the underlying filesystem is noexec
62212 * mounted, in which case we dont add PROT_EXEC.)
62213 */
62214 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
62215 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
62216 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
62217 prot |= PROT_EXEC;
62218
62219 @@ -1018,7 +1095,7 @@ unsigned long do_mmap_pgoff(struct file
62220 /* Obtain the address to map to. we verify (or select) it and ensure
62221 * that it represents a valid section of the address space.
62222 */
62223 - addr = get_unmapped_area(file, addr, len, pgoff, flags);
62224 + addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
62225 if (addr & ~PAGE_MASK)
62226 return addr;
62227
62228 @@ -1029,6 +1106,36 @@ unsigned long do_mmap_pgoff(struct file
62229 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
62230 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
62231
62232 +#ifdef CONFIG_PAX_MPROTECT
62233 + if (mm->pax_flags & MF_PAX_MPROTECT) {
62234 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
62235 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
62236 + gr_log_rwxmmap(file);
62237 +
62238 +#ifdef CONFIG_PAX_EMUPLT
62239 + vm_flags &= ~VM_EXEC;
62240 +#else
62241 + return -EPERM;
62242 +#endif
62243 +
62244 + }
62245 +
62246 + if (!(vm_flags & VM_EXEC))
62247 + vm_flags &= ~VM_MAYEXEC;
62248 +#else
62249 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
62250 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
62251 +#endif
62252 + else
62253 + vm_flags &= ~VM_MAYWRITE;
62254 + }
62255 +#endif
62256 +
62257 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62258 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
62259 + vm_flags &= ~VM_PAGEEXEC;
62260 +#endif
62261 +
62262 if (flags & MAP_LOCKED)
62263 if (!can_do_mlock())
62264 return -EPERM;
62265 @@ -1040,6 +1147,7 @@ unsigned long do_mmap_pgoff(struct file
62266 locked += mm->locked_vm;
62267 lock_limit = rlimit(RLIMIT_MEMLOCK);
62268 lock_limit >>= PAGE_SHIFT;
62269 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
62270 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
62271 return -EAGAIN;
62272 }
62273 @@ -1110,6 +1218,9 @@ unsigned long do_mmap_pgoff(struct file
62274 if (error)
62275 return error;
62276
62277 + if (!gr_acl_handle_mmap(file, prot))
62278 + return -EACCES;
62279 +
62280 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
62281 }
62282 EXPORT_SYMBOL(do_mmap_pgoff);
62283 @@ -1187,10 +1298,10 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_ar
62284 */
62285 int vma_wants_writenotify(struct vm_area_struct *vma)
62286 {
62287 - unsigned int vm_flags = vma->vm_flags;
62288 + unsigned long vm_flags = vma->vm_flags;
62289
62290 /* If it was private or non-writable, the write bit is already clear */
62291 - if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
62292 + if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
62293 return 0;
62294
62295 /* The backer wishes to know when pages are first written to? */
62296 @@ -1239,14 +1350,24 @@ unsigned long mmap_region(struct file *f
62297 unsigned long charged = 0;
62298 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
62299
62300 +#ifdef CONFIG_PAX_SEGMEXEC
62301 + struct vm_area_struct *vma_m = NULL;
62302 +#endif
62303 +
62304 + /*
62305 + * mm->mmap_sem is required to protect against another thread
62306 + * changing the mappings in case we sleep.
62307 + */
62308 + verify_mm_writelocked(mm);
62309 +
62310 /* Clear old maps */
62311 error = -ENOMEM;
62312 -munmap_back:
62313 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62314 if (vma && vma->vm_start < addr + len) {
62315 if (do_munmap(mm, addr, len))
62316 return -ENOMEM;
62317 - goto munmap_back;
62318 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
62319 + BUG_ON(vma && vma->vm_start < addr + len);
62320 }
62321
62322 /* Check against address space limit. */
62323 @@ -1295,6 +1416,16 @@ munmap_back:
62324 goto unacct_error;
62325 }
62326
62327 +#ifdef CONFIG_PAX_SEGMEXEC
62328 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
62329 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
62330 + if (!vma_m) {
62331 + error = -ENOMEM;
62332 + goto free_vma;
62333 + }
62334 + }
62335 +#endif
62336 +
62337 vma->vm_mm = mm;
62338 vma->vm_start = addr;
62339 vma->vm_end = addr + len;
62340 @@ -1318,6 +1449,19 @@ munmap_back:
62341 error = file->f_op->mmap(file, vma);
62342 if (error)
62343 goto unmap_and_free_vma;
62344 +
62345 +#ifdef CONFIG_PAX_SEGMEXEC
62346 + if (vma_m && (vm_flags & VM_EXECUTABLE))
62347 + added_exe_file_vma(mm);
62348 +#endif
62349 +
62350 +#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
62351 + if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
62352 + vma->vm_flags |= VM_PAGEEXEC;
62353 + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
62354 + }
62355 +#endif
62356 +
62357 if (vm_flags & VM_EXECUTABLE)
62358 added_exe_file_vma(mm);
62359
62360 @@ -1353,6 +1497,11 @@ munmap_back:
62361 vma_link(mm, vma, prev, rb_link, rb_parent);
62362 file = vma->vm_file;
62363
62364 +#ifdef CONFIG_PAX_SEGMEXEC
62365 + if (vma_m)
62366 + BUG_ON(pax_mirror_vma(vma_m, vma));
62367 +#endif
62368 +
62369 /* Once vma denies write, undo our temporary denial count */
62370 if (correct_wcount)
62371 atomic_inc(&inode->i_writecount);
62372 @@ -1361,6 +1510,7 @@ out:
62373
62374 mm->total_vm += len >> PAGE_SHIFT;
62375 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
62376 + track_exec_limit(mm, addr, addr + len, vm_flags);
62377 if (vm_flags & VM_LOCKED) {
62378 if (!mlock_vma_pages_range(vma, addr, addr + len))
62379 mm->locked_vm += (len >> PAGE_SHIFT);
62380 @@ -1378,6 +1528,12 @@ unmap_and_free_vma:
62381 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
62382 charged = 0;
62383 free_vma:
62384 +
62385 +#ifdef CONFIG_PAX_SEGMEXEC
62386 + if (vma_m)
62387 + kmem_cache_free(vm_area_cachep, vma_m);
62388 +#endif
62389 +
62390 kmem_cache_free(vm_area_cachep, vma);
62391 unacct_error:
62392 if (charged)
62393 @@ -1385,6 +1541,44 @@ unacct_error:
62394 return error;
62395 }
62396
62397 +bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
62398 +{
62399 + if (!vma) {
62400 +#ifdef CONFIG_STACK_GROWSUP
62401 + if (addr > sysctl_heap_stack_gap)
62402 + vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
62403 + else
62404 + vma = find_vma(current->mm, 0);
62405 + if (vma && (vma->vm_flags & VM_GROWSUP))
62406 + return false;
62407 +#endif
62408 + return true;
62409 + }
62410 +
62411 + if (addr + len > vma->vm_start)
62412 + return false;
62413 +
62414 + if (vma->vm_flags & VM_GROWSDOWN)
62415 + return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
62416 +#ifdef CONFIG_STACK_GROWSUP
62417 + else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
62418 + return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
62419 +#endif
62420 +
62421 + return true;
62422 +}
62423 +
62424 +unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
62425 +{
62426 + if (vma->vm_start < len)
62427 + return -ENOMEM;
62428 + if (!(vma->vm_flags & VM_GROWSDOWN))
62429 + return vma->vm_start - len;
62430 + if (sysctl_heap_stack_gap <= vma->vm_start - len)
62431 + return vma->vm_start - len - sysctl_heap_stack_gap;
62432 + return -ENOMEM;
62433 +}
62434 +
62435 /* Get an address range which is currently unmapped.
62436 * For shmat() with addr=0.
62437 *
62438 @@ -1411,18 +1605,23 @@ arch_get_unmapped_area(struct file *filp
62439 if (flags & MAP_FIXED)
62440 return addr;
62441
62442 +#ifdef CONFIG_PAX_RANDMMAP
62443 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
62444 +#endif
62445 +
62446 if (addr) {
62447 addr = PAGE_ALIGN(addr);
62448 - vma = find_vma(mm, addr);
62449 - if (TASK_SIZE - len >= addr &&
62450 - (!vma || addr + len <= vma->vm_start))
62451 - return addr;
62452 + if (TASK_SIZE - len >= addr) {
62453 + vma = find_vma(mm, addr);
62454 + if (check_heap_stack_gap(vma, addr, len))
62455 + return addr;
62456 + }
62457 }
62458 if (len > mm->cached_hole_size) {
62459 - start_addr = addr = mm->free_area_cache;
62460 + start_addr = addr = mm->free_area_cache;
62461 } else {
62462 - start_addr = addr = TASK_UNMAPPED_BASE;
62463 - mm->cached_hole_size = 0;
62464 + start_addr = addr = mm->mmap_base;
62465 + mm->cached_hole_size = 0;
62466 }
62467
62468 full_search:
62469 @@ -1433,34 +1632,40 @@ full_search:
62470 * Start a new search - just in case we missed
62471 * some holes.
62472 */
62473 - if (start_addr != TASK_UNMAPPED_BASE) {
62474 - addr = TASK_UNMAPPED_BASE;
62475 - start_addr = addr;
62476 + if (start_addr != mm->mmap_base) {
62477 + start_addr = addr = mm->mmap_base;
62478 mm->cached_hole_size = 0;
62479 goto full_search;
62480 }
62481 return -ENOMEM;
62482 }
62483 - if (!vma || addr + len <= vma->vm_start) {
62484 - /*
62485 - * Remember the place where we stopped the search:
62486 - */
62487 - mm->free_area_cache = addr + len;
62488 - return addr;
62489 - }
62490 + if (check_heap_stack_gap(vma, addr, len))
62491 + break;
62492 if (addr + mm->cached_hole_size < vma->vm_start)
62493 mm->cached_hole_size = vma->vm_start - addr;
62494 addr = vma->vm_end;
62495 }
62496 +
62497 + /*
62498 + * Remember the place where we stopped the search:
62499 + */
62500 + mm->free_area_cache = addr + len;
62501 + return addr;
62502 }
62503 #endif
62504
62505 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
62506 {
62507 +
62508 +#ifdef CONFIG_PAX_SEGMEXEC
62509 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
62510 + return;
62511 +#endif
62512 +
62513 /*
62514 * Is this a new hole at the lowest possible address?
62515 */
62516 - if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
62517 + if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
62518 mm->free_area_cache = addr;
62519 mm->cached_hole_size = ~0UL;
62520 }
62521 @@ -1478,7 +1683,7 @@ arch_get_unmapped_area_topdown(struct fi
62522 {
62523 struct vm_area_struct *vma;
62524 struct mm_struct *mm = current->mm;
62525 - unsigned long addr = addr0;
62526 + unsigned long base = mm->mmap_base, addr = addr0;
62527
62528 /* requested length too big for entire address space */
62529 if (len > TASK_SIZE)
62530 @@ -1487,13 +1692,18 @@ arch_get_unmapped_area_topdown(struct fi
62531 if (flags & MAP_FIXED)
62532 return addr;
62533
62534 +#ifdef CONFIG_PAX_RANDMMAP
62535 + if (!(mm->pax_flags & MF_PAX_RANDMMAP))
62536 +#endif
62537 +
62538 /* requesting a specific address */
62539 if (addr) {
62540 addr = PAGE_ALIGN(addr);
62541 - vma = find_vma(mm, addr);
62542 - if (TASK_SIZE - len >= addr &&
62543 - (!vma || addr + len <= vma->vm_start))
62544 - return addr;
62545 + if (TASK_SIZE - len >= addr) {
62546 + vma = find_vma(mm, addr);
62547 + if (check_heap_stack_gap(vma, addr, len))
62548 + return addr;
62549 + }
62550 }
62551
62552 /* check if free_area_cache is useful for us */
62553 @@ -1508,7 +1718,7 @@ arch_get_unmapped_area_topdown(struct fi
62554 /* make sure it can fit in the remaining address space */
62555 if (addr > len) {
62556 vma = find_vma(mm, addr-len);
62557 - if (!vma || addr <= vma->vm_start)
62558 + if (check_heap_stack_gap(vma, addr - len, len))
62559 /* remember the address as a hint for next time */
62560 return (mm->free_area_cache = addr-len);
62561 }
62562 @@ -1525,7 +1735,7 @@ arch_get_unmapped_area_topdown(struct fi
62563 * return with success:
62564 */
62565 vma = find_vma(mm, addr);
62566 - if (!vma || addr+len <= vma->vm_start)
62567 + if (check_heap_stack_gap(vma, addr, len))
62568 /* remember the address as a hint for next time */
62569 return (mm->free_area_cache = addr);
62570
62571 @@ -1534,8 +1744,8 @@ arch_get_unmapped_area_topdown(struct fi
62572 mm->cached_hole_size = vma->vm_start - addr;
62573
62574 /* try just below the current vma->vm_start */
62575 - addr = vma->vm_start-len;
62576 - } while (len < vma->vm_start);
62577 + addr = skip_heap_stack_gap(vma, len);
62578 + } while (!IS_ERR_VALUE(addr));
62579
62580 bottomup:
62581 /*
62582 @@ -1544,13 +1754,21 @@ bottomup:
62583 * can happen with large stack limits and large mmap()
62584 * allocations.
62585 */
62586 + mm->mmap_base = TASK_UNMAPPED_BASE;
62587 +
62588 +#ifdef CONFIG_PAX_RANDMMAP
62589 + if (mm->pax_flags & MF_PAX_RANDMMAP)
62590 + mm->mmap_base += mm->delta_mmap;
62591 +#endif
62592 +
62593 + mm->free_area_cache = mm->mmap_base;
62594 mm->cached_hole_size = ~0UL;
62595 - mm->free_area_cache = TASK_UNMAPPED_BASE;
62596 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
62597 /*
62598 * Restore the topdown base:
62599 */
62600 - mm->free_area_cache = mm->mmap_base;
62601 + mm->mmap_base = base;
62602 + mm->free_area_cache = base;
62603 mm->cached_hole_size = ~0UL;
62604
62605 return addr;
62606 @@ -1559,6 +1777,12 @@ bottomup:
62607
62608 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
62609 {
62610 +
62611 +#ifdef CONFIG_PAX_SEGMEXEC
62612 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
62613 + return;
62614 +#endif
62615 +
62616 /*
62617 * Is this a new hole at the highest possible address?
62618 */
62619 @@ -1566,8 +1790,10 @@ void arch_unmap_area_topdown(struct mm_s
62620 mm->free_area_cache = addr;
62621
62622 /* dont allow allocations above current base */
62623 - if (mm->free_area_cache > mm->mmap_base)
62624 + if (mm->free_area_cache > mm->mmap_base) {
62625 mm->free_area_cache = mm->mmap_base;
62626 + mm->cached_hole_size = ~0UL;
62627 + }
62628 }
62629
62630 unsigned long
62631 @@ -1675,6 +1901,28 @@ out:
62632 return prev ? prev->vm_next : vma;
62633 }
62634
62635 +#ifdef CONFIG_PAX_SEGMEXEC
62636 +struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
62637 +{
62638 + struct vm_area_struct *vma_m;
62639 +
62640 + BUG_ON(!vma || vma->vm_start >= vma->vm_end);
62641 + if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
62642 + BUG_ON(vma->vm_mirror);
62643 + return NULL;
62644 + }
62645 + BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
62646 + vma_m = vma->vm_mirror;
62647 + BUG_ON(!vma_m || vma_m->vm_mirror != vma);
62648 + BUG_ON(vma->vm_file != vma_m->vm_file);
62649 + BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
62650 + BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
62651 + BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
62652 + BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
62653 + return vma_m;
62654 +}
62655 +#endif
62656 +
62657 /*
62658 * Verify that the stack growth is acceptable and
62659 * update accounting. This is shared with both the
62660 @@ -1691,6 +1939,7 @@ static int acct_stack_growth(struct vm_a
62661 return -ENOMEM;
62662
62663 /* Stack limit test */
62664 + gr_learn_resource(current, RLIMIT_STACK, size, 1);
62665 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
62666 return -ENOMEM;
62667
62668 @@ -1701,6 +1950,7 @@ static int acct_stack_growth(struct vm_a
62669 locked = mm->locked_vm + grow;
62670 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
62671 limit >>= PAGE_SHIFT;
62672 + gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
62673 if (locked > limit && !capable(CAP_IPC_LOCK))
62674 return -ENOMEM;
62675 }
62676 @@ -1731,37 +1981,48 @@ static int acct_stack_growth(struct vm_a
62677 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
62678 * vma is the last one with address > vma->vm_end. Have to extend vma.
62679 */
62680 +#ifndef CONFIG_IA64
62681 +static
62682 +#endif
62683 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
62684 {
62685 int error;
62686 + bool locknext;
62687
62688 if (!(vma->vm_flags & VM_GROWSUP))
62689 return -EFAULT;
62690
62691 + /* Also guard against wrapping around to address 0. */
62692 + if (address < PAGE_ALIGN(address+1))
62693 + address = PAGE_ALIGN(address+1);
62694 + else
62695 + return -ENOMEM;
62696 +
62697 /*
62698 * We must make sure the anon_vma is allocated
62699 * so that the anon_vma locking is not a noop.
62700 */
62701 if (unlikely(anon_vma_prepare(vma)))
62702 return -ENOMEM;
62703 + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
62704 + if (locknext && anon_vma_prepare(vma->vm_next))
62705 + return -ENOMEM;
62706 vma_lock_anon_vma(vma);
62707 + if (locknext)
62708 + vma_lock_anon_vma(vma->vm_next);
62709
62710 /*
62711 * vma->vm_start/vm_end cannot change under us because the caller
62712 * is required to hold the mmap_sem in read mode. We need the
62713 - * anon_vma lock to serialize against concurrent expand_stacks.
62714 - * Also guard against wrapping around to address 0.
62715 + * anon_vma locks to serialize against concurrent expand_stacks
62716 + * and expand_upwards.
62717 */
62718 - if (address < PAGE_ALIGN(address+4))
62719 - address = PAGE_ALIGN(address+4);
62720 - else {
62721 - vma_unlock_anon_vma(vma);
62722 - return -ENOMEM;
62723 - }
62724 error = 0;
62725
62726 /* Somebody else might have raced and expanded it already */
62727 - if (address > vma->vm_end) {
62728 + if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
62729 + error = -ENOMEM;
62730 + else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
62731 unsigned long size, grow;
62732
62733 size = address - vma->vm_start;
62734 @@ -1776,6 +2037,8 @@ int expand_upwards(struct vm_area_struct
62735 }
62736 }
62737 }
62738 + if (locknext)
62739 + vma_unlock_anon_vma(vma->vm_next);
62740 vma_unlock_anon_vma(vma);
62741 khugepaged_enter_vma_merge(vma);
62742 return error;
62743 @@ -1789,6 +2052,8 @@ static int expand_downwards(struct vm_ar
62744 unsigned long address)
62745 {
62746 int error;
62747 + bool lockprev = false;
62748 + struct vm_area_struct *prev;
62749
62750 /*
62751 * We must make sure the anon_vma is allocated
62752 @@ -1802,6 +2067,15 @@ static int expand_downwards(struct vm_ar
62753 if (error)
62754 return error;
62755
62756 + prev = vma->vm_prev;
62757 +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
62758 + lockprev = prev && (prev->vm_flags & VM_GROWSUP);
62759 +#endif
62760 + if (lockprev && anon_vma_prepare(prev))
62761 + return -ENOMEM;
62762 + if (lockprev)
62763 + vma_lock_anon_vma(prev);
62764 +
62765 vma_lock_anon_vma(vma);
62766
62767 /*
62768 @@ -1811,9 +2085,17 @@ static int expand_downwards(struct vm_ar
62769 */
62770
62771 /* Somebody else might have raced and expanded it already */
62772 - if (address < vma->vm_start) {
62773 + if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
62774 + error = -ENOMEM;
62775 + else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
62776 unsigned long size, grow;
62777
62778 +#ifdef CONFIG_PAX_SEGMEXEC
62779 + struct vm_area_struct *vma_m;
62780 +
62781 + vma_m = pax_find_mirror_vma(vma);
62782 +#endif
62783 +
62784 size = vma->vm_end - address;
62785 grow = (vma->vm_start - address) >> PAGE_SHIFT;
62786
62787 @@ -1823,11 +2105,22 @@ static int expand_downwards(struct vm_ar
62788 if (!error) {
62789 vma->vm_start = address;
62790 vma->vm_pgoff -= grow;
62791 + track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
62792 +
62793 +#ifdef CONFIG_PAX_SEGMEXEC
62794 + if (vma_m) {
62795 + vma_m->vm_start -= grow << PAGE_SHIFT;
62796 + vma_m->vm_pgoff -= grow;
62797 + }
62798 +#endif
62799 +
62800 perf_event_mmap(vma);
62801 }
62802 }
62803 }
62804 vma_unlock_anon_vma(vma);
62805 + if (lockprev)
62806 + vma_unlock_anon_vma(prev);
62807 khugepaged_enter_vma_merge(vma);
62808 return error;
62809 }
62810 @@ -1902,6 +2195,13 @@ static void remove_vma_list(struct mm_st
62811 do {
62812 long nrpages = vma_pages(vma);
62813
62814 +#ifdef CONFIG_PAX_SEGMEXEC
62815 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
62816 + vma = remove_vma(vma);
62817 + continue;
62818 + }
62819 +#endif
62820 +
62821 mm->total_vm -= nrpages;
62822 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
62823 vma = remove_vma(vma);
62824 @@ -1947,6 +2247,16 @@ detach_vmas_to_be_unmapped(struct mm_str
62825 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
62826 vma->vm_prev = NULL;
62827 do {
62828 +
62829 +#ifdef CONFIG_PAX_SEGMEXEC
62830 + if (vma->vm_mirror) {
62831 + BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
62832 + vma->vm_mirror->vm_mirror = NULL;
62833 + vma->vm_mirror->vm_flags &= ~VM_EXEC;
62834 + vma->vm_mirror = NULL;
62835 + }
62836 +#endif
62837 +
62838 rb_erase(&vma->vm_rb, &mm->mm_rb);
62839 mm->map_count--;
62840 tail_vma = vma;
62841 @@ -1975,14 +2285,33 @@ static int __split_vma(struct mm_struct
62842 struct vm_area_struct *new;
62843 int err = -ENOMEM;
62844
62845 +#ifdef CONFIG_PAX_SEGMEXEC
62846 + struct vm_area_struct *vma_m, *new_m = NULL;
62847 + unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
62848 +#endif
62849 +
62850 if (is_vm_hugetlb_page(vma) && (addr &
62851 ~(huge_page_mask(hstate_vma(vma)))))
62852 return -EINVAL;
62853
62854 +#ifdef CONFIG_PAX_SEGMEXEC
62855 + vma_m = pax_find_mirror_vma(vma);
62856 +#endif
62857 +
62858 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62859 if (!new)
62860 goto out_err;
62861
62862 +#ifdef CONFIG_PAX_SEGMEXEC
62863 + if (vma_m) {
62864 + new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
62865 + if (!new_m) {
62866 + kmem_cache_free(vm_area_cachep, new);
62867 + goto out_err;
62868 + }
62869 + }
62870 +#endif
62871 +
62872 /* most fields are the same, copy all, and then fixup */
62873 *new = *vma;
62874
62875 @@ -1995,6 +2324,22 @@ static int __split_vma(struct mm_struct
62876 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
62877 }
62878
62879 +#ifdef CONFIG_PAX_SEGMEXEC
62880 + if (vma_m) {
62881 + *new_m = *vma_m;
62882 + INIT_LIST_HEAD(&new_m->anon_vma_chain);
62883 + new_m->vm_mirror = new;
62884 + new->vm_mirror = new_m;
62885 +
62886 + if (new_below)
62887 + new_m->vm_end = addr_m;
62888 + else {
62889 + new_m->vm_start = addr_m;
62890 + new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
62891 + }
62892 + }
62893 +#endif
62894 +
62895 pol = mpol_dup(vma_policy(vma));
62896 if (IS_ERR(pol)) {
62897 err = PTR_ERR(pol);
62898 @@ -2020,6 +2365,42 @@ static int __split_vma(struct mm_struct
62899 else
62900 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
62901
62902 +#ifdef CONFIG_PAX_SEGMEXEC
62903 + if (!err && vma_m) {
62904 + if (anon_vma_clone(new_m, vma_m))
62905 + goto out_free_mpol;
62906 +
62907 + mpol_get(pol);
62908 + vma_set_policy(new_m, pol);
62909 +
62910 + if (new_m->vm_file) {
62911 + get_file(new_m->vm_file);
62912 + if (vma_m->vm_flags & VM_EXECUTABLE)
62913 + added_exe_file_vma(mm);
62914 + }
62915 +
62916 + if (new_m->vm_ops && new_m->vm_ops->open)
62917 + new_m->vm_ops->open(new_m);
62918 +
62919 + if (new_below)
62920 + err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
62921 + ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
62922 + else
62923 + err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
62924 +
62925 + if (err) {
62926 + if (new_m->vm_ops && new_m->vm_ops->close)
62927 + new_m->vm_ops->close(new_m);
62928 + if (new_m->vm_file) {
62929 + if (vma_m->vm_flags & VM_EXECUTABLE)
62930 + removed_exe_file_vma(mm);
62931 + fput(new_m->vm_file);
62932 + }
62933 + mpol_put(pol);
62934 + }
62935 + }
62936 +#endif
62937 +
62938 /* Success. */
62939 if (!err)
62940 return 0;
62941 @@ -2032,10 +2413,18 @@ static int __split_vma(struct mm_struct
62942 removed_exe_file_vma(mm);
62943 fput(new->vm_file);
62944 }
62945 - unlink_anon_vmas(new);
62946 out_free_mpol:
62947 mpol_put(pol);
62948 out_free_vma:
62949 +
62950 +#ifdef CONFIG_PAX_SEGMEXEC
62951 + if (new_m) {
62952 + unlink_anon_vmas(new_m);
62953 + kmem_cache_free(vm_area_cachep, new_m);
62954 + }
62955 +#endif
62956 +
62957 + unlink_anon_vmas(new);
62958 kmem_cache_free(vm_area_cachep, new);
62959 out_err:
62960 return err;
62961 @@ -2048,6 +2437,15 @@ static int __split_vma(struct mm_struct
62962 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
62963 unsigned long addr, int new_below)
62964 {
62965 +
62966 +#ifdef CONFIG_PAX_SEGMEXEC
62967 + if (mm->pax_flags & MF_PAX_SEGMEXEC) {
62968 + BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
62969 + if (mm->map_count >= sysctl_max_map_count-1)
62970 + return -ENOMEM;
62971 + } else
62972 +#endif
62973 +
62974 if (mm->map_count >= sysctl_max_map_count)
62975 return -ENOMEM;
62976
62977 @@ -2059,11 +2457,30 @@ int split_vma(struct mm_struct *mm, stru
62978 * work. This now handles partial unmappings.
62979 * Jeremy Fitzhardinge <jeremy@goop.org>
62980 */
62981 +#ifdef CONFIG_PAX_SEGMEXEC
62982 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62983 {
62984 + int ret = __do_munmap(mm, start, len);
62985 + if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
62986 + return ret;
62987 +
62988 + return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
62989 +}
62990 +
62991 +int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62992 +#else
62993 +int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
62994 +#endif
62995 +{
62996 unsigned long end;
62997 struct vm_area_struct *vma, *prev, *last;
62998
62999 + /*
63000 + * mm->mmap_sem is required to protect against another thread
63001 + * changing the mappings in case we sleep.
63002 + */
63003 + verify_mm_writelocked(mm);
63004 +
63005 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
63006 return -EINVAL;
63007
63008 @@ -2137,6 +2554,8 @@ int do_munmap(struct mm_struct *mm, unsi
63009 /* Fix up all other VM information */
63010 remove_vma_list(mm, vma);
63011
63012 + track_exec_limit(mm, start, end, 0UL);
63013 +
63014 return 0;
63015 }
63016
63017 @@ -2149,22 +2568,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
63018
63019 profile_munmap(addr);
63020
63021 +#ifdef CONFIG_PAX_SEGMEXEC
63022 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
63023 + (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
63024 + return -EINVAL;
63025 +#endif
63026 +
63027 down_write(&mm->mmap_sem);
63028 ret = do_munmap(mm, addr, len);
63029 up_write(&mm->mmap_sem);
63030 return ret;
63031 }
63032
63033 -static inline void verify_mm_writelocked(struct mm_struct *mm)
63034 -{
63035 -#ifdef CONFIG_DEBUG_VM
63036 - if (unlikely(down_read_trylock(&mm->mmap_sem))) {
63037 - WARN_ON(1);
63038 - up_read(&mm->mmap_sem);
63039 - }
63040 -#endif
63041 -}
63042 -
63043 /*
63044 * this is really a simplified "do_mmap". it only handles
63045 * anonymous maps. eventually we may be able to do some
63046 @@ -2178,6 +2593,7 @@ unsigned long do_brk(unsigned long addr,
63047 struct rb_node ** rb_link, * rb_parent;
63048 pgoff_t pgoff = addr >> PAGE_SHIFT;
63049 int error;
63050 + unsigned long charged;
63051
63052 len = PAGE_ALIGN(len);
63053 if (!len)
63054 @@ -2189,16 +2605,30 @@ unsigned long do_brk(unsigned long addr,
63055
63056 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
63057
63058 +#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
63059 + if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
63060 + flags &= ~VM_EXEC;
63061 +
63062 +#ifdef CONFIG_PAX_MPROTECT
63063 + if (mm->pax_flags & MF_PAX_MPROTECT)
63064 + flags &= ~VM_MAYEXEC;
63065 +#endif
63066 +
63067 + }
63068 +#endif
63069 +
63070 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
63071 if (error & ~PAGE_MASK)
63072 return error;
63073
63074 + charged = len >> PAGE_SHIFT;
63075 +
63076 /*
63077 * mlock MCL_FUTURE?
63078 */
63079 if (mm->def_flags & VM_LOCKED) {
63080 unsigned long locked, lock_limit;
63081 - locked = len >> PAGE_SHIFT;
63082 + locked = charged;
63083 locked += mm->locked_vm;
63084 lock_limit = rlimit(RLIMIT_MEMLOCK);
63085 lock_limit >>= PAGE_SHIFT;
63086 @@ -2215,22 +2645,22 @@ unsigned long do_brk(unsigned long addr,
63087 /*
63088 * Clear old maps. this also does some error checking for us
63089 */
63090 - munmap_back:
63091 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
63092 if (vma && vma->vm_start < addr + len) {
63093 if (do_munmap(mm, addr, len))
63094 return -ENOMEM;
63095 - goto munmap_back;
63096 + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
63097 + BUG_ON(vma && vma->vm_start < addr + len);
63098 }
63099
63100 /* Check against address space limits *after* clearing old maps... */
63101 - if (!may_expand_vm(mm, len >> PAGE_SHIFT))
63102 + if (!may_expand_vm(mm, charged))
63103 return -ENOMEM;
63104
63105 if (mm->map_count > sysctl_max_map_count)
63106 return -ENOMEM;
63107
63108 - if (security_vm_enough_memory(len >> PAGE_SHIFT))
63109 + if (security_vm_enough_memory(charged))
63110 return -ENOMEM;
63111
63112 /* Can we just expand an old private anonymous mapping? */
63113 @@ -2244,7 +2674,7 @@ unsigned long do_brk(unsigned long addr,
63114 */
63115 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
63116 if (!vma) {
63117 - vm_unacct_memory(len >> PAGE_SHIFT);
63118 + vm_unacct_memory(charged);
63119 return -ENOMEM;
63120 }
63121
63122 @@ -2258,11 +2688,12 @@ unsigned long do_brk(unsigned long addr,
63123 vma_link(mm, vma, prev, rb_link, rb_parent);
63124 out:
63125 perf_event_mmap(vma);
63126 - mm->total_vm += len >> PAGE_SHIFT;
63127 + mm->total_vm += charged;
63128 if (flags & VM_LOCKED) {
63129 if (!mlock_vma_pages_range(vma, addr, addr + len))
63130 - mm->locked_vm += (len >> PAGE_SHIFT);
63131 + mm->locked_vm += charged;
63132 }
63133 + track_exec_limit(mm, addr, addr + len, flags);
63134 return addr;
63135 }
63136
63137 @@ -2309,8 +2740,10 @@ void exit_mmap(struct mm_struct *mm)
63138 * Walk the list again, actually closing and freeing it,
63139 * with preemption enabled, without holding any MM locks.
63140 */
63141 - while (vma)
63142 + while (vma) {
63143 + vma->vm_mirror = NULL;
63144 vma = remove_vma(vma);
63145 + }
63146
63147 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
63148 }
63149 @@ -2324,6 +2757,13 @@ int insert_vm_struct(struct mm_struct *
63150 struct vm_area_struct * __vma, * prev;
63151 struct rb_node ** rb_link, * rb_parent;
63152
63153 +#ifdef CONFIG_PAX_SEGMEXEC
63154 + struct vm_area_struct *vma_m = NULL;
63155 +#endif
63156 +
63157 + if (security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1))
63158 + return -EPERM;
63159 +
63160 /*
63161 * The vm_pgoff of a purely anonymous vma should be irrelevant
63162 * until its first write fault, when page's anon_vma and index
63163 @@ -2346,7 +2786,22 @@ int insert_vm_struct(struct mm_struct *
63164 if ((vma->vm_flags & VM_ACCOUNT) &&
63165 security_vm_enough_memory_mm(mm, vma_pages(vma)))
63166 return -ENOMEM;
63167 +
63168 +#ifdef CONFIG_PAX_SEGMEXEC
63169 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
63170 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
63171 + if (!vma_m)
63172 + return -ENOMEM;
63173 + }
63174 +#endif
63175 +
63176 vma_link(mm, vma, prev, rb_link, rb_parent);
63177 +
63178 +#ifdef CONFIG_PAX_SEGMEXEC
63179 + if (vma_m)
63180 + BUG_ON(pax_mirror_vma(vma_m, vma));
63181 +#endif
63182 +
63183 return 0;
63184 }
63185
63186 @@ -2364,6 +2819,8 @@ struct vm_area_struct *copy_vma(struct v
63187 struct rb_node **rb_link, *rb_parent;
63188 struct mempolicy *pol;
63189
63190 + BUG_ON(vma->vm_mirror);
63191 +
63192 /*
63193 * If anonymous vma has not yet been faulted, update new pgoff
63194 * to match new location, to increase its chance of merging.
63195 @@ -2414,6 +2871,39 @@ struct vm_area_struct *copy_vma(struct v
63196 return NULL;
63197 }
63198
63199 +#ifdef CONFIG_PAX_SEGMEXEC
63200 +long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
63201 +{
63202 + struct vm_area_struct *prev_m;
63203 + struct rb_node **rb_link_m, *rb_parent_m;
63204 + struct mempolicy *pol_m;
63205 +
63206 + BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
63207 + BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
63208 + BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
63209 + *vma_m = *vma;
63210 + INIT_LIST_HEAD(&vma_m->anon_vma_chain);
63211 + if (anon_vma_clone(vma_m, vma))
63212 + return -ENOMEM;
63213 + pol_m = vma_policy(vma_m);
63214 + mpol_get(pol_m);
63215 + vma_set_policy(vma_m, pol_m);
63216 + vma_m->vm_start += SEGMEXEC_TASK_SIZE;
63217 + vma_m->vm_end += SEGMEXEC_TASK_SIZE;
63218 + vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
63219 + vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
63220 + if (vma_m->vm_file)
63221 + get_file(vma_m->vm_file);
63222 + if (vma_m->vm_ops && vma_m->vm_ops->open)
63223 + vma_m->vm_ops->open(vma_m);
63224 + find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
63225 + vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
63226 + vma_m->vm_mirror = vma;
63227 + vma->vm_mirror = vma_m;
63228 + return 0;
63229 +}
63230 +#endif
63231 +
63232 /*
63233 * Return true if the calling process may expand its vm space by the passed
63234 * number of pages
63235 @@ -2424,7 +2914,7 @@ int may_expand_vm(struct mm_struct *mm,
63236 unsigned long lim;
63237
63238 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
63239 -
63240 + gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
63241 if (cur + npages > lim)
63242 return 0;
63243 return 1;
63244 @@ -2495,6 +2985,22 @@ int install_special_mapping(struct mm_st
63245 vma->vm_start = addr;
63246 vma->vm_end = addr + len;
63247
63248 +#ifdef CONFIG_PAX_MPROTECT
63249 + if (mm->pax_flags & MF_PAX_MPROTECT) {
63250 +#ifndef CONFIG_PAX_MPROTECT_COMPAT
63251 + if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
63252 + return -EPERM;
63253 + if (!(vm_flags & VM_EXEC))
63254 + vm_flags &= ~VM_MAYEXEC;
63255 +#else
63256 + if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
63257 + vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
63258 +#endif
63259 + else
63260 + vm_flags &= ~VM_MAYWRITE;
63261 + }
63262 +#endif
63263 +
63264 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
63265 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
63266
63267 diff -urNp linux-2.6.39.4/mm/mprotect.c linux-2.6.39.4/mm/mprotect.c
63268 --- linux-2.6.39.4/mm/mprotect.c 2011-05-19 00:06:34.000000000 -0400
63269 +++ linux-2.6.39.4/mm/mprotect.c 2011-08-05 19:44:37.000000000 -0400
63270 @@ -23,10 +23,16 @@
63271 #include <linux/mmu_notifier.h>
63272 #include <linux/migrate.h>
63273 #include <linux/perf_event.h>
63274 +
63275 +#ifdef CONFIG_PAX_MPROTECT
63276 +#include <linux/elf.h>
63277 +#endif
63278 +
63279 #include <asm/uaccess.h>
63280 #include <asm/pgtable.h>
63281 #include <asm/cacheflush.h>
63282 #include <asm/tlbflush.h>
63283 +#include <asm/mmu_context.h>
63284
63285 #ifndef pgprot_modify
63286 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
63287 @@ -141,6 +147,48 @@ static void change_protection(struct vm_
63288 flush_tlb_range(vma, start, end);
63289 }
63290
63291 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63292 +/* called while holding the mmap semaphor for writing except stack expansion */
63293 +void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
63294 +{
63295 + unsigned long oldlimit, newlimit = 0UL;
63296 +
63297 + if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
63298 + return;
63299 +
63300 + spin_lock(&mm->page_table_lock);
63301 + oldlimit = mm->context.user_cs_limit;
63302 + if ((prot & VM_EXEC) && oldlimit < end)
63303 + /* USER_CS limit moved up */
63304 + newlimit = end;
63305 + else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
63306 + /* USER_CS limit moved down */
63307 + newlimit = start;
63308 +
63309 + if (newlimit) {
63310 + mm->context.user_cs_limit = newlimit;
63311 +
63312 +#ifdef CONFIG_SMP
63313 + wmb();
63314 + cpus_clear(mm->context.cpu_user_cs_mask);
63315 + cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
63316 +#endif
63317 +
63318 + set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
63319 + }
63320 + spin_unlock(&mm->page_table_lock);
63321 + if (newlimit == end) {
63322 + struct vm_area_struct *vma = find_vma(mm, oldlimit);
63323 +
63324 + for (; vma && vma->vm_start < end; vma = vma->vm_next)
63325 + if (is_vm_hugetlb_page(vma))
63326 + hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
63327 + else
63328 + change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
63329 + }
63330 +}
63331 +#endif
63332 +
63333 int
63334 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
63335 unsigned long start, unsigned long end, unsigned long newflags)
63336 @@ -153,11 +201,29 @@ mprotect_fixup(struct vm_area_struct *vm
63337 int error;
63338 int dirty_accountable = 0;
63339
63340 +#ifdef CONFIG_PAX_SEGMEXEC
63341 + struct vm_area_struct *vma_m = NULL;
63342 + unsigned long start_m, end_m;
63343 +
63344 + start_m = start + SEGMEXEC_TASK_SIZE;
63345 + end_m = end + SEGMEXEC_TASK_SIZE;
63346 +#endif
63347 +
63348 if (newflags == oldflags) {
63349 *pprev = vma;
63350 return 0;
63351 }
63352
63353 + if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
63354 + struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
63355 +
63356 + if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
63357 + return -ENOMEM;
63358 +
63359 + if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
63360 + return -ENOMEM;
63361 + }
63362 +
63363 /*
63364 * If we make a private mapping writable we increase our commit;
63365 * but (without finer accounting) cannot reduce our commit if we
63366 @@ -174,6 +240,42 @@ mprotect_fixup(struct vm_area_struct *vm
63367 }
63368 }
63369
63370 +#ifdef CONFIG_PAX_SEGMEXEC
63371 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
63372 + if (start != vma->vm_start) {
63373 + error = split_vma(mm, vma, start, 1);
63374 + if (error)
63375 + goto fail;
63376 + BUG_ON(!*pprev || (*pprev)->vm_next == vma);
63377 + *pprev = (*pprev)->vm_next;
63378 + }
63379 +
63380 + if (end != vma->vm_end) {
63381 + error = split_vma(mm, vma, end, 0);
63382 + if (error)
63383 + goto fail;
63384 + }
63385 +
63386 + if (pax_find_mirror_vma(vma)) {
63387 + error = __do_munmap(mm, start_m, end_m - start_m);
63388 + if (error)
63389 + goto fail;
63390 + } else {
63391 + vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
63392 + if (!vma_m) {
63393 + error = -ENOMEM;
63394 + goto fail;
63395 + }
63396 + vma->vm_flags = newflags;
63397 + error = pax_mirror_vma(vma_m, vma);
63398 + if (error) {
63399 + vma->vm_flags = oldflags;
63400 + goto fail;
63401 + }
63402 + }
63403 + }
63404 +#endif
63405 +
63406 /*
63407 * First try to merge with previous and/or next vma.
63408 */
63409 @@ -204,9 +306,21 @@ success:
63410 * vm_flags and vm_page_prot are protected by the mmap_sem
63411 * held in write mode.
63412 */
63413 +
63414 +#ifdef CONFIG_PAX_SEGMEXEC
63415 + if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
63416 + pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
63417 +#endif
63418 +
63419 vma->vm_flags = newflags;
63420 +
63421 +#ifdef CONFIG_PAX_MPROTECT
63422 + if (mm->binfmt && mm->binfmt->handle_mprotect)
63423 + mm->binfmt->handle_mprotect(vma, newflags);
63424 +#endif
63425 +
63426 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
63427 - vm_get_page_prot(newflags));
63428 + vm_get_page_prot(vma->vm_flags));
63429
63430 if (vma_wants_writenotify(vma)) {
63431 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
63432 @@ -248,6 +362,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63433 end = start + len;
63434 if (end <= start)
63435 return -ENOMEM;
63436 +
63437 +#ifdef CONFIG_PAX_SEGMEXEC
63438 + if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
63439 + if (end > SEGMEXEC_TASK_SIZE)
63440 + return -EINVAL;
63441 + } else
63442 +#endif
63443 +
63444 + if (end > TASK_SIZE)
63445 + return -EINVAL;
63446 +
63447 if (!arch_validate_prot(prot))
63448 return -EINVAL;
63449
63450 @@ -255,7 +380,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63451 /*
63452 * Does the application expect PROT_READ to imply PROT_EXEC:
63453 */
63454 - if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
63455 + if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
63456 prot |= PROT_EXEC;
63457
63458 vm_flags = calc_vm_prot_bits(prot);
63459 @@ -287,6 +412,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63460 if (start > vma->vm_start)
63461 prev = vma;
63462
63463 +#ifdef CONFIG_PAX_MPROTECT
63464 + if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
63465 + current->mm->binfmt->handle_mprotect(vma, vm_flags);
63466 +#endif
63467 +
63468 for (nstart = start ; ; ) {
63469 unsigned long newflags;
63470
63471 @@ -296,6 +426,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63472
63473 /* newflags >> 4 shift VM_MAY% in place of VM_% */
63474 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
63475 + if (prot & (PROT_WRITE | PROT_EXEC))
63476 + gr_log_rwxmprotect(vma->vm_file);
63477 +
63478 + error = -EACCES;
63479 + goto out;
63480 + }
63481 +
63482 + if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
63483 error = -EACCES;
63484 goto out;
63485 }
63486 @@ -310,6 +448,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
63487 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
63488 if (error)
63489 goto out;
63490 +
63491 + track_exec_limit(current->mm, nstart, tmp, vm_flags);
63492 +
63493 nstart = tmp;
63494
63495 if (nstart < prev->vm_end)
63496 diff -urNp linux-2.6.39.4/mm/mremap.c linux-2.6.39.4/mm/mremap.c
63497 --- linux-2.6.39.4/mm/mremap.c 2011-05-19 00:06:34.000000000 -0400
63498 +++ linux-2.6.39.4/mm/mremap.c 2011-08-05 19:44:37.000000000 -0400
63499 @@ -114,6 +114,12 @@ static void move_ptes(struct vm_area_str
63500 continue;
63501 pte = ptep_clear_flush(vma, old_addr, old_pte);
63502 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
63503 +
63504 +#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
63505 + if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
63506 + pte = pte_exprotect(pte);
63507 +#endif
63508 +
63509 set_pte_at(mm, new_addr, new_pte, pte);
63510 }
63511
63512 @@ -273,6 +279,11 @@ static struct vm_area_struct *vma_to_res
63513 if (is_vm_hugetlb_page(vma))
63514 goto Einval;
63515
63516 +#ifdef CONFIG_PAX_SEGMEXEC
63517 + if (pax_find_mirror_vma(vma))
63518 + goto Einval;
63519 +#endif
63520 +
63521 /* We can't remap across vm area boundaries */
63522 if (old_len > vma->vm_end - addr)
63523 goto Efault;
63524 @@ -329,20 +340,25 @@ static unsigned long mremap_to(unsigned
63525 unsigned long ret = -EINVAL;
63526 unsigned long charged = 0;
63527 unsigned long map_flags;
63528 + unsigned long pax_task_size = TASK_SIZE;
63529
63530 if (new_addr & ~PAGE_MASK)
63531 goto out;
63532
63533 - if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
63534 +#ifdef CONFIG_PAX_SEGMEXEC
63535 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
63536 + pax_task_size = SEGMEXEC_TASK_SIZE;
63537 +#endif
63538 +
63539 + pax_task_size -= PAGE_SIZE;
63540 +
63541 + if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
63542 goto out;
63543
63544 /* Check if the location we're moving into overlaps the
63545 * old location at all, and fail if it does.
63546 */
63547 - if ((new_addr <= addr) && (new_addr+new_len) > addr)
63548 - goto out;
63549 -
63550 - if ((addr <= new_addr) && (addr+old_len) > new_addr)
63551 + if (addr + old_len > new_addr && new_addr + new_len > addr)
63552 goto out;
63553
63554 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
63555 @@ -414,6 +430,7 @@ unsigned long do_mremap(unsigned long ad
63556 struct vm_area_struct *vma;
63557 unsigned long ret = -EINVAL;
63558 unsigned long charged = 0;
63559 + unsigned long pax_task_size = TASK_SIZE;
63560
63561 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
63562 goto out;
63563 @@ -432,6 +449,17 @@ unsigned long do_mremap(unsigned long ad
63564 if (!new_len)
63565 goto out;
63566
63567 +#ifdef CONFIG_PAX_SEGMEXEC
63568 + if (mm->pax_flags & MF_PAX_SEGMEXEC)
63569 + pax_task_size = SEGMEXEC_TASK_SIZE;
63570 +#endif
63571 +
63572 + pax_task_size -= PAGE_SIZE;
63573 +
63574 + if (new_len > pax_task_size || addr > pax_task_size-new_len ||
63575 + old_len > pax_task_size || addr > pax_task_size-old_len)
63576 + goto out;
63577 +
63578 if (flags & MREMAP_FIXED) {
63579 if (flags & MREMAP_MAYMOVE)
63580 ret = mremap_to(addr, old_len, new_addr, new_len);
63581 @@ -481,6 +509,7 @@ unsigned long do_mremap(unsigned long ad
63582 addr + new_len);
63583 }
63584 ret = addr;
63585 + track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
63586 goto out;
63587 }
63588 }
63589 @@ -507,7 +536,13 @@ unsigned long do_mremap(unsigned long ad
63590 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
63591 if (ret)
63592 goto out;
63593 +
63594 + map_flags = vma->vm_flags;
63595 ret = move_vma(vma, addr, old_len, new_len, new_addr);
63596 + if (!(ret & ~PAGE_MASK)) {
63597 + track_exec_limit(current->mm, addr, addr + old_len, 0UL);
63598 + track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
63599 + }
63600 }
63601 out:
63602 if (ret & ~PAGE_MASK)
63603 diff -urNp linux-2.6.39.4/mm/nobootmem.c linux-2.6.39.4/mm/nobootmem.c
63604 --- linux-2.6.39.4/mm/nobootmem.c 2011-05-19 00:06:34.000000000 -0400
63605 +++ linux-2.6.39.4/mm/nobootmem.c 2011-08-05 19:44:37.000000000 -0400
63606 @@ -110,19 +110,30 @@ static void __init __free_pages_memory(u
63607 unsigned long __init free_all_memory_core_early(int nodeid)
63608 {
63609 int i;
63610 - u64 start, end;
63611 + u64 start, end, startrange, endrange;
63612 unsigned long count = 0;
63613 - struct range *range = NULL;
63614 + struct range *range = NULL, rangerange = { 0, 0 };
63615 int nr_range;
63616
63617 nr_range = get_free_all_memory_range(&range, nodeid);
63618 + startrange = __pa(range) >> PAGE_SHIFT;
63619 + endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT;
63620
63621 for (i = 0; i < nr_range; i++) {
63622 start = range[i].start;
63623 end = range[i].end;
63624 + if (start <= endrange && startrange < end) {
63625 + BUG_ON(rangerange.start | rangerange.end);
63626 + rangerange = range[i];
63627 + continue;
63628 + }
63629 count += end - start;
63630 __free_pages_memory(start, end);
63631 }
63632 + start = rangerange.start;
63633 + end = rangerange.end;
63634 + count += end - start;
63635 + __free_pages_memory(start, end);
63636
63637 return count;
63638 }
63639 diff -urNp linux-2.6.39.4/mm/nommu.c linux-2.6.39.4/mm/nommu.c
63640 --- linux-2.6.39.4/mm/nommu.c 2011-08-05 21:11:51.000000000 -0400
63641 +++ linux-2.6.39.4/mm/nommu.c 2011-08-05 21:12:20.000000000 -0400
63642 @@ -63,7 +63,6 @@ int sysctl_overcommit_memory = OVERCOMMI
63643 int sysctl_overcommit_ratio = 50; /* default is 50% */
63644 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
63645 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
63646 -int heap_stack_gap = 0;
63647
63648 atomic_long_t mmap_pages_allocated;
63649
63650 @@ -833,15 +832,6 @@ struct vm_area_struct *find_vma(struct m
63651 EXPORT_SYMBOL(find_vma);
63652
63653 /*
63654 - * find a VMA
63655 - * - we don't extend stack VMAs under NOMMU conditions
63656 - */
63657 -struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
63658 -{
63659 - return find_vma(mm, addr);
63660 -}
63661 -
63662 -/*
63663 * expand a stack to a given address
63664 * - not supported under NOMMU conditions
63665 */
63666 @@ -1563,6 +1553,7 @@ int split_vma(struct mm_struct *mm, stru
63667
63668 /* most fields are the same, copy all, and then fixup */
63669 *new = *vma;
63670 + INIT_LIST_HEAD(&new->anon_vma_chain);
63671 *region = *vma->vm_region;
63672 new->vm_region = region;
63673
63674 diff -urNp linux-2.6.39.4/mm/page_alloc.c linux-2.6.39.4/mm/page_alloc.c
63675 --- linux-2.6.39.4/mm/page_alloc.c 2011-06-03 00:04:14.000000000 -0400
63676 +++ linux-2.6.39.4/mm/page_alloc.c 2011-08-05 19:44:37.000000000 -0400
63677 @@ -337,7 +337,7 @@ out:
63678 * This usage means that zero-order pages may not be compound.
63679 */
63680
63681 -static void free_compound_page(struct page *page)
63682 +void free_compound_page(struct page *page)
63683 {
63684 __free_pages_ok(page, compound_order(page));
63685 }
63686 @@ -650,6 +650,10 @@ static bool free_pages_prepare(struct pa
63687 int i;
63688 int bad = 0;
63689
63690 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
63691 + unsigned long index = 1UL << order;
63692 +#endif
63693 +
63694 trace_mm_page_free_direct(page, order);
63695 kmemcheck_free_shadow(page, order);
63696
63697 @@ -665,6 +669,12 @@ static bool free_pages_prepare(struct pa
63698 debug_check_no_obj_freed(page_address(page),
63699 PAGE_SIZE << order);
63700 }
63701 +
63702 +#ifdef CONFIG_PAX_MEMORY_SANITIZE
63703 + for (; index; --index)
63704 + sanitize_highpage(page + index - 1);
63705 +#endif
63706 +
63707 arch_free_page(page, order);
63708 kernel_map_pages(page, 1 << order, 0);
63709
63710 @@ -780,8 +790,10 @@ static int prep_new_page(struct page *pa
63711 arch_alloc_page(page, order);
63712 kernel_map_pages(page, 1 << order, 1);
63713
63714 +#ifndef CONFIG_PAX_MEMORY_SANITIZE
63715 if (gfp_flags & __GFP_ZERO)
63716 prep_zero_page(page, order, gfp_flags);
63717 +#endif
63718
63719 if (order && (gfp_flags & __GFP_COMP))
63720 prep_compound_page(page, order);
63721 @@ -2504,6 +2516,8 @@ void __show_free_areas(unsigned int filt
63722 int cpu;
63723 struct zone *zone;
63724
63725 + pax_track_stack();
63726 +
63727 for_each_populated_zone(zone) {
63728 if (skip_free_areas_zone(filter, zone))
63729 continue;
63730 diff -urNp linux-2.6.39.4/mm/percpu.c linux-2.6.39.4/mm/percpu.c
63731 --- linux-2.6.39.4/mm/percpu.c 2011-05-19 00:06:34.000000000 -0400
63732 +++ linux-2.6.39.4/mm/percpu.c 2011-08-05 19:44:37.000000000 -0400
63733 @@ -121,7 +121,7 @@ static unsigned int pcpu_first_unit_cpu
63734 static unsigned int pcpu_last_unit_cpu __read_mostly;
63735
63736 /* the address of the first chunk which starts with the kernel static area */
63737 -void *pcpu_base_addr __read_mostly;
63738 +void *pcpu_base_addr __read_only;
63739 EXPORT_SYMBOL_GPL(pcpu_base_addr);
63740
63741 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
63742 diff -urNp linux-2.6.39.4/mm/rmap.c linux-2.6.39.4/mm/rmap.c
63743 --- linux-2.6.39.4/mm/rmap.c 2011-05-19 00:06:34.000000000 -0400
63744 +++ linux-2.6.39.4/mm/rmap.c 2011-08-05 19:44:37.000000000 -0400
63745 @@ -131,6 +131,10 @@ int anon_vma_prepare(struct vm_area_stru
63746 struct anon_vma *anon_vma = vma->anon_vma;
63747 struct anon_vma_chain *avc;
63748
63749 +#ifdef CONFIG_PAX_SEGMEXEC
63750 + struct anon_vma_chain *avc_m = NULL;
63751 +#endif
63752 +
63753 might_sleep();
63754 if (unlikely(!anon_vma)) {
63755 struct mm_struct *mm = vma->vm_mm;
63756 @@ -140,6 +144,12 @@ int anon_vma_prepare(struct vm_area_stru
63757 if (!avc)
63758 goto out_enomem;
63759
63760 +#ifdef CONFIG_PAX_SEGMEXEC
63761 + avc_m = anon_vma_chain_alloc();
63762 + if (!avc_m)
63763 + goto out_enomem_free_avc;
63764 +#endif
63765 +
63766 anon_vma = find_mergeable_anon_vma(vma);
63767 allocated = NULL;
63768 if (!anon_vma) {
63769 @@ -153,6 +163,21 @@ int anon_vma_prepare(struct vm_area_stru
63770 /* page_table_lock to protect against threads */
63771 spin_lock(&mm->page_table_lock);
63772 if (likely(!vma->anon_vma)) {
63773 +
63774 +#ifdef CONFIG_PAX_SEGMEXEC
63775 + struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
63776 +
63777 + if (vma_m) {
63778 + BUG_ON(vma_m->anon_vma);
63779 + vma_m->anon_vma = anon_vma;
63780 + avc_m->anon_vma = anon_vma;
63781 + avc_m->vma = vma;
63782 + list_add(&avc_m->same_vma, &vma_m->anon_vma_chain);
63783 + list_add(&avc_m->same_anon_vma, &anon_vma->head);
63784 + avc_m = NULL;
63785 + }
63786 +#endif
63787 +
63788 vma->anon_vma = anon_vma;
63789 avc->anon_vma = anon_vma;
63790 avc->vma = vma;
63791 @@ -166,12 +191,24 @@ int anon_vma_prepare(struct vm_area_stru
63792
63793 if (unlikely(allocated))
63794 put_anon_vma(allocated);
63795 +
63796 +#ifdef CONFIG_PAX_SEGMEXEC
63797 + if (unlikely(avc_m))
63798 + anon_vma_chain_free(avc_m);
63799 +#endif
63800 +
63801 if (unlikely(avc))
63802 anon_vma_chain_free(avc);
63803 }
63804 return 0;
63805
63806 out_enomem_free_avc:
63807 +
63808 +#ifdef CONFIG_PAX_SEGMEXEC
63809 + if (avc_m)
63810 + anon_vma_chain_free(avc_m);
63811 +#endif
63812 +
63813 anon_vma_chain_free(avc);
63814 out_enomem:
63815 return -ENOMEM;
63816 @@ -198,7 +235,7 @@ static void anon_vma_chain_link(struct v
63817 * Attach the anon_vmas from src to dst.
63818 * Returns 0 on success, -ENOMEM on failure.
63819 */
63820 -int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
63821 +int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
63822 {
63823 struct anon_vma_chain *avc, *pavc;
63824
63825 @@ -220,7 +257,7 @@ int anon_vma_clone(struct vm_area_struct
63826 * the corresponding VMA in the parent process is attached to.
63827 * Returns 0 on success, non-zero on failure.
63828 */
63829 -int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
63830 +int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
63831 {
63832 struct anon_vma_chain *avc;
63833 struct anon_vma *anon_vma;
63834 diff -urNp linux-2.6.39.4/mm/shmem.c linux-2.6.39.4/mm/shmem.c
63835 --- linux-2.6.39.4/mm/shmem.c 2011-06-03 00:04:14.000000000 -0400
63836 +++ linux-2.6.39.4/mm/shmem.c 2011-08-05 19:44:37.000000000 -0400
63837 @@ -31,7 +31,7 @@
63838 #include <linux/percpu_counter.h>
63839 #include <linux/swap.h>
63840
63841 -static struct vfsmount *shm_mnt;
63842 +struct vfsmount *shm_mnt;
63843
63844 #ifdef CONFIG_SHMEM
63845 /*
63846 @@ -1087,6 +1087,8 @@ static int shmem_writepage(struct page *
63847 goto unlock;
63848 }
63849 entry = shmem_swp_entry(info, index, NULL);
63850 + if (!entry)
63851 + goto unlock;
63852 if (entry->val) {
63853 /*
63854 * The more uptodate page coming down from a stacked
63855 @@ -1158,6 +1160,8 @@ static struct page *shmem_swapin(swp_ent
63856 struct vm_area_struct pvma;
63857 struct page *page;
63858
63859 + pax_track_stack();
63860 +
63861 spol = mpol_cond_copy(&mpol,
63862 mpol_shared_policy_lookup(&info->policy, idx));
63863
63864 @@ -2014,7 +2018,7 @@ static int shmem_symlink(struct inode *d
63865
63866 info = SHMEM_I(inode);
63867 inode->i_size = len-1;
63868 - if (len <= (char *)inode - (char *)info) {
63869 + if (len <= (char *)inode - (char *)info && len <= 64) {
63870 /* do it inline */
63871 memcpy(info, symname, len);
63872 inode->i_op = &shmem_symlink_inline_operations;
63873 @@ -2362,8 +2366,7 @@ int shmem_fill_super(struct super_block
63874 int err = -ENOMEM;
63875
63876 /* Round up to L1_CACHE_BYTES to resist false sharing */
63877 - sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
63878 - L1_CACHE_BYTES), GFP_KERNEL);
63879 + sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
63880 if (!sbinfo)
63881 return -ENOMEM;
63882
63883 diff -urNp linux-2.6.39.4/mm/slab.c linux-2.6.39.4/mm/slab.c
63884 --- linux-2.6.39.4/mm/slab.c 2011-05-19 00:06:34.000000000 -0400
63885 +++ linux-2.6.39.4/mm/slab.c 2011-08-05 19:44:37.000000000 -0400
63886 @@ -150,7 +150,7 @@
63887
63888 /* Legal flag mask for kmem_cache_create(). */
63889 #if DEBUG
63890 -# define CREATE_MASK (SLAB_RED_ZONE | \
63891 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
63892 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
63893 SLAB_CACHE_DMA | \
63894 SLAB_STORE_USER | \
63895 @@ -158,7 +158,7 @@
63896 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
63897 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
63898 #else
63899 -# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
63900 +# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
63901 SLAB_CACHE_DMA | \
63902 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
63903 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
63904 @@ -287,7 +287,7 @@ struct kmem_list3 {
63905 * Need this for bootstrapping a per node allocator.
63906 */
63907 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
63908 -static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
63909 +static struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
63910 #define CACHE_CACHE 0
63911 #define SIZE_AC MAX_NUMNODES
63912 #define SIZE_L3 (2 * MAX_NUMNODES)
63913 @@ -388,10 +388,10 @@ static void kmem_list3_init(struct kmem_
63914 if ((x)->max_freeable < i) \
63915 (x)->max_freeable = i; \
63916 } while (0)
63917 -#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
63918 -#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
63919 -#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
63920 -#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
63921 +#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
63922 +#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
63923 +#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
63924 +#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
63925 #else
63926 #define STATS_INC_ACTIVE(x) do { } while (0)
63927 #define STATS_DEC_ACTIVE(x) do { } while (0)
63928 @@ -537,7 +537,7 @@ static inline void *index_to_obj(struct
63929 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
63930 */
63931 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
63932 - const struct slab *slab, void *obj)
63933 + const struct slab *slab, const void *obj)
63934 {
63935 u32 offset = (obj - slab->s_mem);
63936 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
63937 @@ -563,7 +563,7 @@ struct cache_names {
63938 static struct cache_names __initdata cache_names[] = {
63939 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
63940 #include <linux/kmalloc_sizes.h>
63941 - {NULL,}
63942 + {NULL}
63943 #undef CACHE
63944 };
63945
63946 @@ -1529,7 +1529,7 @@ void __init kmem_cache_init(void)
63947 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
63948 sizes[INDEX_AC].cs_size,
63949 ARCH_KMALLOC_MINALIGN,
63950 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63951 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63952 NULL);
63953
63954 if (INDEX_AC != INDEX_L3) {
63955 @@ -1537,7 +1537,7 @@ void __init kmem_cache_init(void)
63956 kmem_cache_create(names[INDEX_L3].name,
63957 sizes[INDEX_L3].cs_size,
63958 ARCH_KMALLOC_MINALIGN,
63959 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63960 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63961 NULL);
63962 }
63963
63964 @@ -1555,7 +1555,7 @@ void __init kmem_cache_init(void)
63965 sizes->cs_cachep = kmem_cache_create(names->name,
63966 sizes->cs_size,
63967 ARCH_KMALLOC_MINALIGN,
63968 - ARCH_KMALLOC_FLAGS|SLAB_PANIC,
63969 + ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
63970 NULL);
63971 }
63972 #ifdef CONFIG_ZONE_DMA
63973 @@ -4270,10 +4270,10 @@ static int s_show(struct seq_file *m, vo
63974 }
63975 /* cpu stats */
63976 {
63977 - unsigned long allochit = atomic_read(&cachep->allochit);
63978 - unsigned long allocmiss = atomic_read(&cachep->allocmiss);
63979 - unsigned long freehit = atomic_read(&cachep->freehit);
63980 - unsigned long freemiss = atomic_read(&cachep->freemiss);
63981 + unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
63982 + unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
63983 + unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
63984 + unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
63985
63986 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
63987 allochit, allocmiss, freehit, freemiss);
63988 @@ -4530,15 +4530,66 @@ static const struct file_operations proc
63989
63990 static int __init slab_proc_init(void)
63991 {
63992 - proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
63993 + mode_t gr_mode = S_IRUGO;
63994 +
63995 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
63996 + gr_mode = S_IRUSR;
63997 +#endif
63998 +
63999 + proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
64000 #ifdef CONFIG_DEBUG_SLAB_LEAK
64001 - proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
64002 + proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
64003 #endif
64004 return 0;
64005 }
64006 module_init(slab_proc_init);
64007 #endif
64008
64009 +void check_object_size(const void *ptr, unsigned long n, bool to)
64010 +{
64011 +
64012 +#ifdef CONFIG_PAX_USERCOPY
64013 + struct page *page;
64014 + struct kmem_cache *cachep = NULL;
64015 + struct slab *slabp;
64016 + unsigned int objnr;
64017 + unsigned long offset;
64018 +
64019 + if (!n)
64020 + return;
64021 +
64022 + if (ZERO_OR_NULL_PTR(ptr))
64023 + goto report;
64024 +
64025 + if (!virt_addr_valid(ptr))
64026 + return;
64027 +
64028 + page = virt_to_head_page(ptr);
64029 +
64030 + if (!PageSlab(page)) {
64031 + if (object_is_on_stack(ptr, n) == -1)
64032 + goto report;
64033 + return;
64034 + }
64035 +
64036 + cachep = page_get_cache(page);
64037 + if (!(cachep->flags & SLAB_USERCOPY))
64038 + goto report;
64039 +
64040 + slabp = page_get_slab(page);
64041 + objnr = obj_to_index(cachep, slabp, ptr);
64042 + BUG_ON(objnr >= cachep->num);
64043 + offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
64044 + if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
64045 + return;
64046 +
64047 +report:
64048 + pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
64049 +#endif
64050 +
64051 +}
64052 +EXPORT_SYMBOL(check_object_size);
64053 +
64054 /**
64055 * ksize - get the actual amount of memory allocated for a given object
64056 * @objp: Pointer to the object
64057 diff -urNp linux-2.6.39.4/mm/slob.c linux-2.6.39.4/mm/slob.c
64058 --- linux-2.6.39.4/mm/slob.c 2011-05-19 00:06:34.000000000 -0400
64059 +++ linux-2.6.39.4/mm/slob.c 2011-08-05 19:44:37.000000000 -0400
64060 @@ -29,7 +29,7 @@
64061 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
64062 * alloc_pages() directly, allocating compound pages so the page order
64063 * does not have to be separately tracked, and also stores the exact
64064 - * allocation size in page->private so that it can be used to accurately
64065 + * allocation size in slob_page->size so that it can be used to accurately
64066 * provide ksize(). These objects are detected in kfree() because slob_page()
64067 * is false for them.
64068 *
64069 @@ -58,6 +58,7 @@
64070 */
64071
64072 #include <linux/kernel.h>
64073 +#include <linux/sched.h>
64074 #include <linux/slab.h>
64075 #include <linux/mm.h>
64076 #include <linux/swap.h> /* struct reclaim_state */
64077 @@ -102,7 +103,8 @@ struct slob_page {
64078 unsigned long flags; /* mandatory */
64079 atomic_t _count; /* mandatory */
64080 slobidx_t units; /* free units left in page */
64081 - unsigned long pad[2];
64082 + unsigned long pad[1];
64083 + unsigned long size; /* size when >=PAGE_SIZE */
64084 slob_t *free; /* first free slob_t in page */
64085 struct list_head list; /* linked list of free pages */
64086 };
64087 @@ -135,7 +137,7 @@ static LIST_HEAD(free_slob_large);
64088 */
64089 static inline int is_slob_page(struct slob_page *sp)
64090 {
64091 - return PageSlab((struct page *)sp);
64092 + return PageSlab((struct page *)sp) && !sp->size;
64093 }
64094
64095 static inline void set_slob_page(struct slob_page *sp)
64096 @@ -150,7 +152,7 @@ static inline void clear_slob_page(struc
64097
64098 static inline struct slob_page *slob_page(const void *addr)
64099 {
64100 - return (struct slob_page *)virt_to_page(addr);
64101 + return (struct slob_page *)virt_to_head_page(addr);
64102 }
64103
64104 /*
64105 @@ -210,7 +212,7 @@ static void set_slob(slob_t *s, slobidx_
64106 /*
64107 * Return the size of a slob block.
64108 */
64109 -static slobidx_t slob_units(slob_t *s)
64110 +static slobidx_t slob_units(const slob_t *s)
64111 {
64112 if (s->units > 0)
64113 return s->units;
64114 @@ -220,7 +222,7 @@ static slobidx_t slob_units(slob_t *s)
64115 /*
64116 * Return the next free slob block pointer after this one.
64117 */
64118 -static slob_t *slob_next(slob_t *s)
64119 +static slob_t *slob_next(const slob_t *s)
64120 {
64121 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
64122 slobidx_t next;
64123 @@ -235,7 +237,7 @@ static slob_t *slob_next(slob_t *s)
64124 /*
64125 * Returns true if s is the last free block in its page.
64126 */
64127 -static int slob_last(slob_t *s)
64128 +static int slob_last(const slob_t *s)
64129 {
64130 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
64131 }
64132 @@ -254,6 +256,7 @@ static void *slob_new_pages(gfp_t gfp, i
64133 if (!page)
64134 return NULL;
64135
64136 + set_slob_page(page);
64137 return page_address(page);
64138 }
64139
64140 @@ -370,11 +373,11 @@ static void *slob_alloc(size_t size, gfp
64141 if (!b)
64142 return NULL;
64143 sp = slob_page(b);
64144 - set_slob_page(sp);
64145
64146 spin_lock_irqsave(&slob_lock, flags);
64147 sp->units = SLOB_UNITS(PAGE_SIZE);
64148 sp->free = b;
64149 + sp->size = 0;
64150 INIT_LIST_HEAD(&sp->list);
64151 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
64152 set_slob_page_free(sp, slob_list);
64153 @@ -476,10 +479,9 @@ out:
64154 * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend.
64155 */
64156
64157 -void *__kmalloc_node(size_t size, gfp_t gfp, int node)
64158 +static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
64159 {
64160 - unsigned int *m;
64161 - int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64162 + slob_t *m;
64163 void *ret;
64164
64165 lockdep_trace_alloc(gfp);
64166 @@ -492,7 +494,10 @@ void *__kmalloc_node(size_t size, gfp_t
64167
64168 if (!m)
64169 return NULL;
64170 - *m = size;
64171 + BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
64172 + BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
64173 + m[0].units = size;
64174 + m[1].units = align;
64175 ret = (void *)m + align;
64176
64177 trace_kmalloc_node(_RET_IP_, ret,
64178 @@ -504,16 +509,25 @@ void *__kmalloc_node(size_t size, gfp_t
64179 gfp |= __GFP_COMP;
64180 ret = slob_new_pages(gfp, order, node);
64181 if (ret) {
64182 - struct page *page;
64183 - page = virt_to_page(ret);
64184 - page->private = size;
64185 + struct slob_page *sp;
64186 + sp = slob_page(ret);
64187 + sp->size = size;
64188 }
64189
64190 trace_kmalloc_node(_RET_IP_, ret,
64191 size, PAGE_SIZE << order, gfp, node);
64192 }
64193
64194 - kmemleak_alloc(ret, size, 1, gfp);
64195 + return ret;
64196 +}
64197 +
64198 +void *__kmalloc_node(size_t size, gfp_t gfp, int node)
64199 +{
64200 + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64201 + void *ret = __kmalloc_node_align(size, gfp, node, align);
64202 +
64203 + if (!ZERO_OR_NULL_PTR(ret))
64204 + kmemleak_alloc(ret, size, 1, gfp);
64205 return ret;
64206 }
64207 EXPORT_SYMBOL(__kmalloc_node);
64208 @@ -531,13 +545,88 @@ void kfree(const void *block)
64209 sp = slob_page(block);
64210 if (is_slob_page(sp)) {
64211 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64212 - unsigned int *m = (unsigned int *)(block - align);
64213 - slob_free(m, *m + align);
64214 - } else
64215 + slob_t *m = (slob_t *)(block - align);
64216 + slob_free(m, m[0].units + align);
64217 + } else {
64218 + clear_slob_page(sp);
64219 + free_slob_page(sp);
64220 + sp->size = 0;
64221 put_page(&sp->page);
64222 + }
64223 }
64224 EXPORT_SYMBOL(kfree);
64225
64226 +void check_object_size(const void *ptr, unsigned long n, bool to)
64227 +{
64228 +
64229 +#ifdef CONFIG_PAX_USERCOPY
64230 + struct slob_page *sp;
64231 + const slob_t *free;
64232 + const void *base;
64233 + unsigned long flags;
64234 +
64235 + if (!n)
64236 + return;
64237 +
64238 + if (ZERO_OR_NULL_PTR(ptr))
64239 + goto report;
64240 +
64241 + if (!virt_addr_valid(ptr))
64242 + return;
64243 +
64244 + sp = slob_page(ptr);
64245 + if (!PageSlab((struct page*)sp)) {
64246 + if (object_is_on_stack(ptr, n) == -1)
64247 + goto report;
64248 + return;
64249 + }
64250 +
64251 + if (sp->size) {
64252 + base = page_address(&sp->page);
64253 + if (base <= ptr && n <= sp->size - (ptr - base))
64254 + return;
64255 + goto report;
64256 + }
64257 +
64258 + /* some tricky double walking to find the chunk */
64259 + spin_lock_irqsave(&slob_lock, flags);
64260 + base = (void *)((unsigned long)ptr & PAGE_MASK);
64261 + free = sp->free;
64262 +
64263 + while (!slob_last(free) && (void *)free <= ptr) {
64264 + base = free + slob_units(free);
64265 + free = slob_next(free);
64266 + }
64267 +
64268 + while (base < (void *)free) {
64269 + slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
64270 + int size = SLOB_UNIT * SLOB_UNITS(m + align);
64271 + int offset;
64272 +
64273 + if (ptr < base + align)
64274 + break;
64275 +
64276 + offset = ptr - base - align;
64277 + if (offset >= m) {
64278 + base += size;
64279 + continue;
64280 + }
64281 +
64282 + if (n > m - offset)
64283 + break;
64284 +
64285 + spin_unlock_irqrestore(&slob_lock, flags);
64286 + return;
64287 + }
64288 +
64289 + spin_unlock_irqrestore(&slob_lock, flags);
64290 +report:
64291 + pax_report_usercopy(ptr, n, to, NULL);
64292 +#endif
64293 +
64294 +}
64295 +EXPORT_SYMBOL(check_object_size);
64296 +
64297 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
64298 size_t ksize(const void *block)
64299 {
64300 @@ -550,10 +639,10 @@ size_t ksize(const void *block)
64301 sp = slob_page(block);
64302 if (is_slob_page(sp)) {
64303 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
64304 - unsigned int *m = (unsigned int *)(block - align);
64305 - return SLOB_UNITS(*m) * SLOB_UNIT;
64306 + slob_t *m = (slob_t *)(block - align);
64307 + return SLOB_UNITS(m[0].units) * SLOB_UNIT;
64308 } else
64309 - return sp->page.private;
64310 + return sp->size;
64311 }
64312 EXPORT_SYMBOL(ksize);
64313
64314 @@ -569,8 +658,13 @@ struct kmem_cache *kmem_cache_create(con
64315 {
64316 struct kmem_cache *c;
64317
64318 +#ifdef CONFIG_PAX_USERCOPY
64319 + c = __kmalloc_node_align(sizeof(struct kmem_cache),
64320 + GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
64321 +#else
64322 c = slob_alloc(sizeof(struct kmem_cache),
64323 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
64324 +#endif
64325
64326 if (c) {
64327 c->name = name;
64328 @@ -608,17 +702,25 @@ void *kmem_cache_alloc_node(struct kmem_
64329 {
64330 void *b;
64331
64332 +#ifdef CONFIG_PAX_USERCOPY
64333 + b = __kmalloc_node_align(c->size, flags, node, c->align);
64334 +#else
64335 if (c->size < PAGE_SIZE) {
64336 b = slob_alloc(c->size, flags, c->align, node);
64337 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
64338 SLOB_UNITS(c->size) * SLOB_UNIT,
64339 flags, node);
64340 } else {
64341 + struct slob_page *sp;
64342 +
64343 b = slob_new_pages(flags, get_order(c->size), node);
64344 + sp = slob_page(b);
64345 + sp->size = c->size;
64346 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
64347 PAGE_SIZE << get_order(c->size),
64348 flags, node);
64349 }
64350 +#endif
64351
64352 if (c->ctor)
64353 c->ctor(b);
64354 @@ -630,10 +732,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
64355
64356 static void __kmem_cache_free(void *b, int size)
64357 {
64358 - if (size < PAGE_SIZE)
64359 + struct slob_page *sp = slob_page(b);
64360 +
64361 + if (is_slob_page(sp))
64362 slob_free(b, size);
64363 - else
64364 + else {
64365 + clear_slob_page(sp);
64366 + free_slob_page(sp);
64367 + sp->size = 0;
64368 slob_free_pages(b, get_order(size));
64369 + }
64370 }
64371
64372 static void kmem_rcu_free(struct rcu_head *head)
64373 @@ -646,17 +754,31 @@ static void kmem_rcu_free(struct rcu_hea
64374
64375 void kmem_cache_free(struct kmem_cache *c, void *b)
64376 {
64377 + int size = c->size;
64378 +
64379 +#ifdef CONFIG_PAX_USERCOPY
64380 + if (size + c->align < PAGE_SIZE) {
64381 + size += c->align;
64382 + b -= c->align;
64383 + }
64384 +#endif
64385 +
64386 kmemleak_free_recursive(b, c->flags);
64387 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
64388 struct slob_rcu *slob_rcu;
64389 - slob_rcu = b + (c->size - sizeof(struct slob_rcu));
64390 - slob_rcu->size = c->size;
64391 + slob_rcu = b + (size - sizeof(struct slob_rcu));
64392 + slob_rcu->size = size;
64393 call_rcu(&slob_rcu->head, kmem_rcu_free);
64394 } else {
64395 - __kmem_cache_free(b, c->size);
64396 + __kmem_cache_free(b, size);
64397 }
64398
64399 +#ifdef CONFIG_PAX_USERCOPY
64400 + trace_kfree(_RET_IP_, b);
64401 +#else
64402 trace_kmem_cache_free(_RET_IP_, b);
64403 +#endif
64404 +
64405 }
64406 EXPORT_SYMBOL(kmem_cache_free);
64407
64408 diff -urNp linux-2.6.39.4/mm/slub.c linux-2.6.39.4/mm/slub.c
64409 --- linux-2.6.39.4/mm/slub.c 2011-06-03 00:04:14.000000000 -0400
64410 +++ linux-2.6.39.4/mm/slub.c 2011-08-05 19:44:37.000000000 -0400
64411 @@ -431,7 +431,7 @@ static void print_track(const char *s, s
64412 if (!t->addr)
64413 return;
64414
64415 - printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
64416 + printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
64417 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
64418 }
64419
64420 @@ -2183,6 +2183,8 @@ void kmem_cache_free(struct kmem_cache *
64421
64422 page = virt_to_head_page(x);
64423
64424 + BUG_ON(!PageSlab(page));
64425 +
64426 slab_free(s, page, x, _RET_IP_);
64427
64428 trace_kmem_cache_free(_RET_IP_, x);
64429 @@ -2216,7 +2218,7 @@ static int slub_min_objects;
64430 * Merge control. If this is set then no merging of slab caches will occur.
64431 * (Could be removed. This was introduced to pacify the merge skeptics.)
64432 */
64433 -static int slub_nomerge;
64434 +static int slub_nomerge = 1;
64435
64436 /*
64437 * Calculate the order of allocation given an slab object size.
64438 @@ -2644,7 +2646,7 @@ static int kmem_cache_open(struct kmem_c
64439 * list to avoid pounding the page allocator excessively.
64440 */
64441 set_min_partial(s, ilog2(s->size));
64442 - s->refcount = 1;
64443 + atomic_set(&s->refcount, 1);
64444 #ifdef CONFIG_NUMA
64445 s->remote_node_defrag_ratio = 1000;
64446 #endif
64447 @@ -2750,8 +2752,7 @@ static inline int kmem_cache_close(struc
64448 void kmem_cache_destroy(struct kmem_cache *s)
64449 {
64450 down_write(&slub_lock);
64451 - s->refcount--;
64452 - if (!s->refcount) {
64453 + if (atomic_dec_and_test(&s->refcount)) {
64454 list_del(&s->list);
64455 if (kmem_cache_close(s)) {
64456 printk(KERN_ERR "SLUB %s: %s called for cache that "
64457 @@ -2961,6 +2962,46 @@ void *__kmalloc_node(size_t size, gfp_t
64458 EXPORT_SYMBOL(__kmalloc_node);
64459 #endif
64460
64461 +void check_object_size(const void *ptr, unsigned long n, bool to)
64462 +{
64463 +
64464 +#ifdef CONFIG_PAX_USERCOPY
64465 + struct page *page;
64466 + struct kmem_cache *s = NULL;
64467 + unsigned long offset;
64468 +
64469 + if (!n)
64470 + return;
64471 +
64472 + if (ZERO_OR_NULL_PTR(ptr))
64473 + goto report;
64474 +
64475 + if (!virt_addr_valid(ptr))
64476 + return;
64477 +
64478 + page = virt_to_head_page(ptr);
64479 +
64480 + if (!PageSlab(page)) {
64481 + if (object_is_on_stack(ptr, n) == -1)
64482 + goto report;
64483 + return;
64484 + }
64485 +
64486 + s = page->slab;
64487 + if (!(s->flags & SLAB_USERCOPY))
64488 + goto report;
64489 +
64490 + offset = (ptr - page_address(page)) % s->size;
64491 + if (offset <= s->objsize && n <= s->objsize - offset)
64492 + return;
64493 +
64494 +report:
64495 + pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
64496 +#endif
64497 +
64498 +}
64499 +EXPORT_SYMBOL(check_object_size);
64500 +
64501 size_t ksize(const void *object)
64502 {
64503 struct page *page;
64504 @@ -3205,7 +3246,7 @@ static void __init kmem_cache_bootstrap_
64505 int node;
64506
64507 list_add(&s->list, &slab_caches);
64508 - s->refcount = -1;
64509 + atomic_set(&s->refcount, -1);
64510
64511 for_each_node_state(node, N_NORMAL_MEMORY) {
64512 struct kmem_cache_node *n = get_node(s, node);
64513 @@ -3322,17 +3363,17 @@ void __init kmem_cache_init(void)
64514
64515 /* Caches that are not of the two-to-the-power-of size */
64516 if (KMALLOC_MIN_SIZE <= 32) {
64517 - kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, 0);
64518 + kmalloc_caches[1] = create_kmalloc_cache("kmalloc-96", 96, SLAB_USERCOPY);
64519 caches++;
64520 }
64521
64522 if (KMALLOC_MIN_SIZE <= 64) {
64523 - kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, 0);
64524 + kmalloc_caches[2] = create_kmalloc_cache("kmalloc-192", 192, SLAB_USERCOPY);
64525 caches++;
64526 }
64527
64528 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
64529 - kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, 0);
64530 + kmalloc_caches[i] = create_kmalloc_cache("kmalloc", 1 << i, SLAB_USERCOPY);
64531 caches++;
64532 }
64533
64534 @@ -3400,7 +3441,7 @@ static int slab_unmergeable(struct kmem_
64535 /*
64536 * We may have set a slab to be unmergeable during bootstrap.
64537 */
64538 - if (s->refcount < 0)
64539 + if (atomic_read(&s->refcount) < 0)
64540 return 1;
64541
64542 return 0;
64543 @@ -3459,7 +3500,7 @@ struct kmem_cache *kmem_cache_create(con
64544 down_write(&slub_lock);
64545 s = find_mergeable(size, align, flags, name, ctor);
64546 if (s) {
64547 - s->refcount++;
64548 + atomic_inc(&s->refcount);
64549 /*
64550 * Adjust the object sizes so that we clear
64551 * the complete object on kzalloc.
64552 @@ -3468,7 +3509,7 @@ struct kmem_cache *kmem_cache_create(con
64553 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
64554
64555 if (sysfs_slab_alias(s, name)) {
64556 - s->refcount--;
64557 + atomic_dec(&s->refcount);
64558 goto err;
64559 }
64560 up_write(&slub_lock);
64561 @@ -4201,7 +4242,7 @@ SLAB_ATTR_RO(ctor);
64562
64563 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
64564 {
64565 - return sprintf(buf, "%d\n", s->refcount - 1);
64566 + return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
64567 }
64568 SLAB_ATTR_RO(aliases);
64569
64570 @@ -4945,7 +4986,13 @@ static const struct file_operations proc
64571
64572 static int __init slab_proc_init(void)
64573 {
64574 - proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
64575 + mode_t gr_mode = S_IRUGO;
64576 +
64577 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
64578 + gr_mode = S_IRUSR;
64579 +#endif
64580 +
64581 + proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
64582 return 0;
64583 }
64584 module_init(slab_proc_init);
64585 diff -urNp linux-2.6.39.4/mm/swap.c linux-2.6.39.4/mm/swap.c
64586 --- linux-2.6.39.4/mm/swap.c 2011-05-19 00:06:34.000000000 -0400
64587 +++ linux-2.6.39.4/mm/swap.c 2011-08-05 19:44:37.000000000 -0400
64588 @@ -31,6 +31,7 @@
64589 #include <linux/backing-dev.h>
64590 #include <linux/memcontrol.h>
64591 #include <linux/gfp.h>
64592 +#include <linux/hugetlb.h>
64593
64594 #include "internal.h"
64595
64596 @@ -71,6 +72,8 @@ static void __put_compound_page(struct p
64597
64598 __page_cache_release(page);
64599 dtor = get_compound_page_dtor(page);
64600 + if (!PageHuge(page))
64601 + BUG_ON(dtor != free_compound_page);
64602 (*dtor)(page);
64603 }
64604
64605 diff -urNp linux-2.6.39.4/mm/swapfile.c linux-2.6.39.4/mm/swapfile.c
64606 --- linux-2.6.39.4/mm/swapfile.c 2011-05-19 00:06:34.000000000 -0400
64607 +++ linux-2.6.39.4/mm/swapfile.c 2011-08-05 19:44:37.000000000 -0400
64608 @@ -61,7 +61,7 @@ static DEFINE_MUTEX(swapon_mutex);
64609
64610 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
64611 /* Activity counter to indicate that a swapon or swapoff has occurred */
64612 -static atomic_t proc_poll_event = ATOMIC_INIT(0);
64613 +static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
64614
64615 static inline unsigned char swap_count(unsigned char ent)
64616 {
64617 @@ -1669,7 +1669,7 @@ SYSCALL_DEFINE1(swapoff, const char __us
64618 }
64619 filp_close(swap_file, NULL);
64620 err = 0;
64621 - atomic_inc(&proc_poll_event);
64622 + atomic_inc_unchecked(&proc_poll_event);
64623 wake_up_interruptible(&proc_poll_wait);
64624
64625 out_dput:
64626 @@ -1690,8 +1690,8 @@ static unsigned swaps_poll(struct file *
64627
64628 poll_wait(file, &proc_poll_wait, wait);
64629
64630 - if (s->event != atomic_read(&proc_poll_event)) {
64631 - s->event = atomic_read(&proc_poll_event);
64632 + if (s->event != atomic_read_unchecked(&proc_poll_event)) {
64633 + s->event = atomic_read_unchecked(&proc_poll_event);
64634 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
64635 }
64636
64637 @@ -1797,7 +1797,7 @@ static int swaps_open(struct inode *inod
64638 }
64639
64640 s->seq.private = s;
64641 - s->event = atomic_read(&proc_poll_event);
64642 + s->event = atomic_read_unchecked(&proc_poll_event);
64643 return ret;
64644 }
64645
64646 @@ -2131,7 +2131,7 @@ SYSCALL_DEFINE2(swapon, const char __use
64647 (p->flags & SWP_DISCARDABLE) ? "D" : "");
64648
64649 mutex_unlock(&swapon_mutex);
64650 - atomic_inc(&proc_poll_event);
64651 + atomic_inc_unchecked(&proc_poll_event);
64652 wake_up_interruptible(&proc_poll_wait);
64653
64654 if (S_ISREG(inode->i_mode))
64655 diff -urNp linux-2.6.39.4/mm/util.c linux-2.6.39.4/mm/util.c
64656 --- linux-2.6.39.4/mm/util.c 2011-05-19 00:06:34.000000000 -0400
64657 +++ linux-2.6.39.4/mm/util.c 2011-08-05 19:44:37.000000000 -0400
64658 @@ -112,6 +112,7 @@ EXPORT_SYMBOL(memdup_user);
64659 * allocated buffer. Use this if you don't want to free the buffer immediately
64660 * like, for example, with RCU.
64661 */
64662 +#undef __krealloc
64663 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
64664 {
64665 void *ret;
64666 @@ -145,6 +146,7 @@ EXPORT_SYMBOL(__krealloc);
64667 * behaves exactly like kmalloc(). If @size is 0 and @p is not a
64668 * %NULL pointer, the object pointed to is freed.
64669 */
64670 +#undef krealloc
64671 void *krealloc(const void *p, size_t new_size, gfp_t flags)
64672 {
64673 void *ret;
64674 @@ -219,6 +221,12 @@ EXPORT_SYMBOL(strndup_user);
64675 void arch_pick_mmap_layout(struct mm_struct *mm)
64676 {
64677 mm->mmap_base = TASK_UNMAPPED_BASE;
64678 +
64679 +#ifdef CONFIG_PAX_RANDMMAP
64680 + if (mm->pax_flags & MF_PAX_RANDMMAP)
64681 + mm->mmap_base += mm->delta_mmap;
64682 +#endif
64683 +
64684 mm->get_unmapped_area = arch_get_unmapped_area;
64685 mm->unmap_area = arch_unmap_area;
64686 }
64687 diff -urNp linux-2.6.39.4/mm/vmalloc.c linux-2.6.39.4/mm/vmalloc.c
64688 --- linux-2.6.39.4/mm/vmalloc.c 2011-05-19 00:06:34.000000000 -0400
64689 +++ linux-2.6.39.4/mm/vmalloc.c 2011-08-05 19:44:37.000000000 -0400
64690 @@ -39,8 +39,19 @@ static void vunmap_pte_range(pmd_t *pmd,
64691
64692 pte = pte_offset_kernel(pmd, addr);
64693 do {
64694 - pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
64695 - WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64696 +
64697 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64698 + if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
64699 + BUG_ON(!pte_exec(*pte));
64700 + set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
64701 + continue;
64702 + }
64703 +#endif
64704 +
64705 + {
64706 + pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
64707 + WARN_ON(!pte_none(ptent) && !pte_present(ptent));
64708 + }
64709 } while (pte++, addr += PAGE_SIZE, addr != end);
64710 }
64711
64712 @@ -91,6 +102,7 @@ static int vmap_pte_range(pmd_t *pmd, un
64713 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
64714 {
64715 pte_t *pte;
64716 + int ret = -ENOMEM;
64717
64718 /*
64719 * nr is a running index into the array which helps higher level
64720 @@ -100,17 +112,30 @@ static int vmap_pte_range(pmd_t *pmd, un
64721 pte = pte_alloc_kernel(pmd, addr);
64722 if (!pte)
64723 return -ENOMEM;
64724 +
64725 + pax_open_kernel();
64726 do {
64727 struct page *page = pages[*nr];
64728
64729 - if (WARN_ON(!pte_none(*pte)))
64730 - return -EBUSY;
64731 - if (WARN_ON(!page))
64732 - return -ENOMEM;
64733 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64734 + if (pgprot_val(prot) & _PAGE_NX)
64735 +#endif
64736 +
64737 + if (WARN_ON(!pte_none(*pte))) {
64738 + ret = -EBUSY;
64739 + goto out;
64740 + }
64741 + if (WARN_ON(!page)) {
64742 + ret = -ENOMEM;
64743 + goto out;
64744 + }
64745 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
64746 (*nr)++;
64747 } while (pte++, addr += PAGE_SIZE, addr != end);
64748 - return 0;
64749 + ret = 0;
64750 +out:
64751 + pax_close_kernel();
64752 + return ret;
64753 }
64754
64755 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
64756 @@ -191,11 +216,20 @@ int is_vmalloc_or_module_addr(const void
64757 * and fall back on vmalloc() if that fails. Others
64758 * just put it in the vmalloc space.
64759 */
64760 -#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
64761 +#ifdef CONFIG_MODULES
64762 +#ifdef MODULES_VADDR
64763 unsigned long addr = (unsigned long)x;
64764 if (addr >= MODULES_VADDR && addr < MODULES_END)
64765 return 1;
64766 #endif
64767 +
64768 +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
64769 + if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
64770 + return 1;
64771 +#endif
64772 +
64773 +#endif
64774 +
64775 return is_vmalloc_addr(x);
64776 }
64777
64778 @@ -216,8 +250,14 @@ struct page *vmalloc_to_page(const void
64779
64780 if (!pgd_none(*pgd)) {
64781 pud_t *pud = pud_offset(pgd, addr);
64782 +#ifdef CONFIG_X86
64783 + if (!pud_large(*pud))
64784 +#endif
64785 if (!pud_none(*pud)) {
64786 pmd_t *pmd = pmd_offset(pud, addr);
64787 +#ifdef CONFIG_X86
64788 + if (!pmd_large(*pmd))
64789 +#endif
64790 if (!pmd_none(*pmd)) {
64791 pte_t *ptep, pte;
64792
64793 @@ -1296,6 +1336,16 @@ static struct vm_struct *__get_vm_area_n
64794 struct vm_struct *area;
64795
64796 BUG_ON(in_interrupt());
64797 +
64798 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64799 + if (flags & VM_KERNEXEC) {
64800 + if (start != VMALLOC_START || end != VMALLOC_END)
64801 + return NULL;
64802 + start = (unsigned long)MODULES_EXEC_VADDR;
64803 + end = (unsigned long)MODULES_EXEC_END;
64804 + }
64805 +#endif
64806 +
64807 if (flags & VM_IOREMAP) {
64808 int bit = fls(size);
64809
64810 @@ -1514,6 +1564,11 @@ void *vmap(struct page **pages, unsigned
64811 if (count > totalram_pages)
64812 return NULL;
64813
64814 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64815 + if (!(pgprot_val(prot) & _PAGE_NX))
64816 + flags |= VM_KERNEXEC;
64817 +#endif
64818 +
64819 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
64820 __builtin_return_address(0));
64821 if (!area)
64822 @@ -1610,6 +1665,13 @@ void *__vmalloc_node_range(unsigned long
64823 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
64824 return NULL;
64825
64826 +#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
64827 + if (!(pgprot_val(prot) & _PAGE_NX))
64828 + area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
64829 + node, gfp_mask, caller);
64830 + else
64831 +#endif
64832 +
64833 area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,
64834 gfp_mask, caller);
64835
64836 @@ -1649,6 +1711,7 @@ static void *__vmalloc_node(unsigned lon
64837 gfp_mask, prot, node, caller);
64838 }
64839
64840 +#undef __vmalloc
64841 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
64842 {
64843 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
64844 @@ -1672,6 +1735,7 @@ static inline void *__vmalloc_node_flags
64845 * For tight control over page level allocator and protection flags
64846 * use __vmalloc() instead.
64847 */
64848 +#undef vmalloc
64849 void *vmalloc(unsigned long size)
64850 {
64851 return __vmalloc_node_flags(size, -1, GFP_KERNEL | __GFP_HIGHMEM);
64852 @@ -1688,6 +1752,7 @@ EXPORT_SYMBOL(vmalloc);
64853 * For tight control over page level allocator and protection flags
64854 * use __vmalloc() instead.
64855 */
64856 +#undef vzalloc
64857 void *vzalloc(unsigned long size)
64858 {
64859 return __vmalloc_node_flags(size, -1,
64860 @@ -1702,6 +1767,7 @@ EXPORT_SYMBOL(vzalloc);
64861 * The resulting memory area is zeroed so it can be mapped to userspace
64862 * without leaking data.
64863 */
64864 +#undef vmalloc_user
64865 void *vmalloc_user(unsigned long size)
64866 {
64867 struct vm_struct *area;
64868 @@ -1729,6 +1795,7 @@ EXPORT_SYMBOL(vmalloc_user);
64869 * For tight control over page level allocator and protection flags
64870 * use __vmalloc() instead.
64871 */
64872 +#undef vmalloc_node
64873 void *vmalloc_node(unsigned long size, int node)
64874 {
64875 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
64876 @@ -1748,6 +1815,7 @@ EXPORT_SYMBOL(vmalloc_node);
64877 * For tight control over page level allocator and protection flags
64878 * use __vmalloc_node() instead.
64879 */
64880 +#undef vzalloc_node
64881 void *vzalloc_node(unsigned long size, int node)
64882 {
64883 return __vmalloc_node_flags(size, node,
64884 @@ -1770,10 +1838,10 @@ EXPORT_SYMBOL(vzalloc_node);
64885 * For tight control over page level allocator and protection flags
64886 * use __vmalloc() instead.
64887 */
64888 -
64889 +#undef vmalloc_exec
64890 void *vmalloc_exec(unsigned long size)
64891 {
64892 - return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
64893 + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
64894 -1, __builtin_return_address(0));
64895 }
64896
64897 @@ -1792,6 +1860,7 @@ void *vmalloc_exec(unsigned long size)
64898 * Allocate enough 32bit PA addressable pages to cover @size from the
64899 * page level allocator and map them into contiguous kernel virtual space.
64900 */
64901 +#undef vmalloc_32
64902 void *vmalloc_32(unsigned long size)
64903 {
64904 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
64905 @@ -1806,6 +1875,7 @@ EXPORT_SYMBOL(vmalloc_32);
64906 * The resulting memory area is 32bit addressable and zeroed so it can be
64907 * mapped to userspace without leaking data.
64908 */
64909 +#undef vmalloc_32_user
64910 void *vmalloc_32_user(unsigned long size)
64911 {
64912 struct vm_struct *area;
64913 @@ -2068,6 +2138,8 @@ int remap_vmalloc_range(struct vm_area_s
64914 unsigned long uaddr = vma->vm_start;
64915 unsigned long usize = vma->vm_end - vma->vm_start;
64916
64917 + BUG_ON(vma->vm_mirror);
64918 +
64919 if ((PAGE_SIZE-1) & (unsigned long)addr)
64920 return -EINVAL;
64921
64922 diff -urNp linux-2.6.39.4/mm/vmstat.c linux-2.6.39.4/mm/vmstat.c
64923 --- linux-2.6.39.4/mm/vmstat.c 2011-05-19 00:06:34.000000000 -0400
64924 +++ linux-2.6.39.4/mm/vmstat.c 2011-08-05 19:44:37.000000000 -0400
64925 @@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
64926 *
64927 * vm_stat contains the global counters
64928 */
64929 -atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64930 +atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
64931 EXPORT_SYMBOL(vm_stat);
64932
64933 #ifdef CONFIG_SMP
64934 @@ -454,7 +454,7 @@ void refresh_cpu_vm_stats(int cpu)
64935 v = p->vm_stat_diff[i];
64936 p->vm_stat_diff[i] = 0;
64937 local_irq_restore(flags);
64938 - atomic_long_add(v, &zone->vm_stat[i]);
64939 + atomic_long_add_unchecked(v, &zone->vm_stat[i]);
64940 global_diff[i] += v;
64941 #ifdef CONFIG_NUMA
64942 /* 3 seconds idle till flush */
64943 @@ -492,7 +492,7 @@ void refresh_cpu_vm_stats(int cpu)
64944
64945 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
64946 if (global_diff[i])
64947 - atomic_long_add(global_diff[i], &vm_stat[i]);
64948 + atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
64949 }
64950
64951 #endif
64952 @@ -1205,10 +1205,20 @@ static int __init setup_vmstat(void)
64953 start_cpu_timer(cpu);
64954 #endif
64955 #ifdef CONFIG_PROC_FS
64956 - proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
64957 - proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
64958 - proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
64959 - proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
64960 + {
64961 + mode_t gr_mode = S_IRUGO;
64962 +#ifdef CONFIG_GRKERNSEC_PROC_ADD
64963 + gr_mode = S_IRUSR;
64964 +#endif
64965 + proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
64966 + proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
64967 +#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
64968 + proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
64969 +#else
64970 + proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
64971 +#endif
64972 + proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
64973 + }
64974 #endif
64975 return 0;
64976 }
64977 diff -urNp linux-2.6.39.4/net/8021q/vlan.c linux-2.6.39.4/net/8021q/vlan.c
64978 --- linux-2.6.39.4/net/8021q/vlan.c 2011-05-19 00:06:34.000000000 -0400
64979 +++ linux-2.6.39.4/net/8021q/vlan.c 2011-08-05 19:44:37.000000000 -0400
64980 @@ -592,8 +592,7 @@ static int vlan_ioctl_handler(struct net
64981 err = -EPERM;
64982 if (!capable(CAP_NET_ADMIN))
64983 break;
64984 - if ((args.u.name_type >= 0) &&
64985 - (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
64986 + if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
64987 struct vlan_net *vn;
64988
64989 vn = net_generic(net, vlan_net_id);
64990 diff -urNp linux-2.6.39.4/net/atm/atm_misc.c linux-2.6.39.4/net/atm/atm_misc.c
64991 --- linux-2.6.39.4/net/atm/atm_misc.c 2011-05-19 00:06:34.000000000 -0400
64992 +++ linux-2.6.39.4/net/atm/atm_misc.c 2011-08-05 19:44:37.000000000 -0400
64993 @@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int
64994 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
64995 return 1;
64996 atm_return(vcc, truesize);
64997 - atomic_inc(&vcc->stats->rx_drop);
64998 + atomic_inc_unchecked(&vcc->stats->rx_drop);
64999 return 0;
65000 }
65001 EXPORT_SYMBOL(atm_charge);
65002 @@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct
65003 }
65004 }
65005 atm_return(vcc, guess);
65006 - atomic_inc(&vcc->stats->rx_drop);
65007 + atomic_inc_unchecked(&vcc->stats->rx_drop);
65008 return NULL;
65009 }
65010 EXPORT_SYMBOL(atm_alloc_charge);
65011 @@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
65012
65013 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
65014 {
65015 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
65016 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
65017 __SONET_ITEMS
65018 #undef __HANDLE_ITEM
65019 }
65020 @@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
65021
65022 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
65023 {
65024 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
65025 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
65026 __SONET_ITEMS
65027 #undef __HANDLE_ITEM
65028 }
65029 diff -urNp linux-2.6.39.4/net/atm/lec.h linux-2.6.39.4/net/atm/lec.h
65030 --- linux-2.6.39.4/net/atm/lec.h 2011-05-19 00:06:34.000000000 -0400
65031 +++ linux-2.6.39.4/net/atm/lec.h 2011-08-05 20:34:06.000000000 -0400
65032 @@ -48,7 +48,7 @@ struct lane2_ops {
65033 const u8 *tlvs, u32 sizeoftlvs);
65034 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
65035 const u8 *tlvs, u32 sizeoftlvs);
65036 -};
65037 +} __no_const;
65038
65039 /*
65040 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
65041 diff -urNp linux-2.6.39.4/net/atm/mpc.h linux-2.6.39.4/net/atm/mpc.h
65042 --- linux-2.6.39.4/net/atm/mpc.h 2011-05-19 00:06:34.000000000 -0400
65043 +++ linux-2.6.39.4/net/atm/mpc.h 2011-08-05 20:34:06.000000000 -0400
65044 @@ -33,7 +33,7 @@ struct mpoa_client {
65045 struct mpc_parameters parameters; /* parameters for this client */
65046
65047 const struct net_device_ops *old_ops;
65048 - struct net_device_ops new_ops;
65049 + net_device_ops_no_const new_ops;
65050 };
65051
65052
65053 diff -urNp linux-2.6.39.4/net/atm/mpoa_caches.c linux-2.6.39.4/net/atm/mpoa_caches.c
65054 --- linux-2.6.39.4/net/atm/mpoa_caches.c 2011-05-19 00:06:34.000000000 -0400
65055 +++ linux-2.6.39.4/net/atm/mpoa_caches.c 2011-08-05 19:44:37.000000000 -0400
65056 @@ -255,6 +255,8 @@ static void check_resolving_entries(stru
65057 struct timeval now;
65058 struct k_message msg;
65059
65060 + pax_track_stack();
65061 +
65062 do_gettimeofday(&now);
65063
65064 read_lock_bh(&client->ingress_lock);
65065 diff -urNp linux-2.6.39.4/net/atm/proc.c linux-2.6.39.4/net/atm/proc.c
65066 --- linux-2.6.39.4/net/atm/proc.c 2011-05-19 00:06:34.000000000 -0400
65067 +++ linux-2.6.39.4/net/atm/proc.c 2011-08-05 19:44:37.000000000 -0400
65068 @@ -45,9 +45,9 @@ static void add_stats(struct seq_file *s
65069 const struct k_atm_aal_stats *stats)
65070 {
65071 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
65072 - atomic_read(&stats->tx), atomic_read(&stats->tx_err),
65073 - atomic_read(&stats->rx), atomic_read(&stats->rx_err),
65074 - atomic_read(&stats->rx_drop));
65075 + atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
65076 + atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
65077 + atomic_read_unchecked(&stats->rx_drop));
65078 }
65079
65080 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
65081 @@ -191,7 +191,12 @@ static void vcc_info(struct seq_file *se
65082 {
65083 struct sock *sk = sk_atm(vcc);
65084
65085 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65086 + seq_printf(seq, "%p ", NULL);
65087 +#else
65088 seq_printf(seq, "%p ", vcc);
65089 +#endif
65090 +
65091 if (!vcc->dev)
65092 seq_printf(seq, "Unassigned ");
65093 else
65094 @@ -218,7 +223,11 @@ static void svc_info(struct seq_file *se
65095 {
65096 if (!vcc->dev)
65097 seq_printf(seq, sizeof(void *) == 4 ?
65098 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65099 + "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
65100 +#else
65101 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
65102 +#endif
65103 else
65104 seq_printf(seq, "%3d %3d %5d ",
65105 vcc->dev->number, vcc->vpi, vcc->vci);
65106 diff -urNp linux-2.6.39.4/net/atm/resources.c linux-2.6.39.4/net/atm/resources.c
65107 --- linux-2.6.39.4/net/atm/resources.c 2011-05-19 00:06:34.000000000 -0400
65108 +++ linux-2.6.39.4/net/atm/resources.c 2011-08-05 19:44:37.000000000 -0400
65109 @@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
65110 static void copy_aal_stats(struct k_atm_aal_stats *from,
65111 struct atm_aal_stats *to)
65112 {
65113 -#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
65114 +#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
65115 __AAL_STAT_ITEMS
65116 #undef __HANDLE_ITEM
65117 }
65118 @@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_
65119 static void subtract_aal_stats(struct k_atm_aal_stats *from,
65120 struct atm_aal_stats *to)
65121 {
65122 -#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
65123 +#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
65124 __AAL_STAT_ITEMS
65125 #undef __HANDLE_ITEM
65126 }
65127 diff -urNp linux-2.6.39.4/net/batman-adv/hard-interface.c linux-2.6.39.4/net/batman-adv/hard-interface.c
65128 --- linux-2.6.39.4/net/batman-adv/hard-interface.c 2011-05-19 00:06:34.000000000 -0400
65129 +++ linux-2.6.39.4/net/batman-adv/hard-interface.c 2011-08-05 19:44:37.000000000 -0400
65130 @@ -339,8 +339,8 @@ int hardif_enable_interface(struct hard_
65131 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
65132 dev_add_pack(&hard_iface->batman_adv_ptype);
65133
65134 - atomic_set(&hard_iface->seqno, 1);
65135 - atomic_set(&hard_iface->frag_seqno, 1);
65136 + atomic_set_unchecked(&hard_iface->seqno, 1);
65137 + atomic_set_unchecked(&hard_iface->frag_seqno, 1);
65138 bat_info(hard_iface->soft_iface, "Adding interface: %s\n",
65139 hard_iface->net_dev->name);
65140
65141 diff -urNp linux-2.6.39.4/net/batman-adv/routing.c linux-2.6.39.4/net/batman-adv/routing.c
65142 --- linux-2.6.39.4/net/batman-adv/routing.c 2011-05-19 00:06:34.000000000 -0400
65143 +++ linux-2.6.39.4/net/batman-adv/routing.c 2011-08-05 19:44:37.000000000 -0400
65144 @@ -625,7 +625,7 @@ void receive_bat_packet(struct ethhdr *e
65145 return;
65146
65147 /* could be changed by schedule_own_packet() */
65148 - if_incoming_seqno = atomic_read(&if_incoming->seqno);
65149 + if_incoming_seqno = atomic_read_unchecked(&if_incoming->seqno);
65150
65151 has_directlink_flag = (batman_packet->flags & DIRECTLINK ? 1 : 0);
65152
65153 diff -urNp linux-2.6.39.4/net/batman-adv/send.c linux-2.6.39.4/net/batman-adv/send.c
65154 --- linux-2.6.39.4/net/batman-adv/send.c 2011-05-19 00:06:34.000000000 -0400
65155 +++ linux-2.6.39.4/net/batman-adv/send.c 2011-08-05 19:44:37.000000000 -0400
65156 @@ -277,7 +277,7 @@ void schedule_own_packet(struct hard_ifa
65157
65158 /* change sequence number to network order */
65159 batman_packet->seqno =
65160 - htonl((uint32_t)atomic_read(&hard_iface->seqno));
65161 + htonl((uint32_t)atomic_read_unchecked(&hard_iface->seqno));
65162
65163 if (vis_server == VIS_TYPE_SERVER_SYNC)
65164 batman_packet->flags |= VIS_SERVER;
65165 @@ -291,7 +291,7 @@ void schedule_own_packet(struct hard_ifa
65166 else
65167 batman_packet->gw_flags = 0;
65168
65169 - atomic_inc(&hard_iface->seqno);
65170 + atomic_inc_unchecked(&hard_iface->seqno);
65171
65172 slide_own_bcast_window(hard_iface);
65173 send_time = own_send_time(bat_priv);
65174 diff -urNp linux-2.6.39.4/net/batman-adv/soft-interface.c linux-2.6.39.4/net/batman-adv/soft-interface.c
65175 --- linux-2.6.39.4/net/batman-adv/soft-interface.c 2011-05-19 00:06:34.000000000 -0400
65176 +++ linux-2.6.39.4/net/batman-adv/soft-interface.c 2011-08-05 19:44:37.000000000 -0400
65177 @@ -386,7 +386,7 @@ int interface_tx(struct sk_buff *skb, st
65178
65179 /* set broadcast sequence number */
65180 bcast_packet->seqno =
65181 - htonl(atomic_inc_return(&bat_priv->bcast_seqno));
65182 + htonl(atomic_inc_return_unchecked(&bat_priv->bcast_seqno));
65183
65184 add_bcast_packet_to_list(bat_priv, skb);
65185
65186 @@ -579,7 +579,7 @@ struct net_device *softif_create(char *n
65187 atomic_set(&bat_priv->batman_queue_left, BATMAN_QUEUE_LEN);
65188
65189 atomic_set(&bat_priv->mesh_state, MESH_INACTIVE);
65190 - atomic_set(&bat_priv->bcast_seqno, 1);
65191 + atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
65192 atomic_set(&bat_priv->hna_local_changed, 0);
65193
65194 bat_priv->primary_if = NULL;
65195 diff -urNp linux-2.6.39.4/net/batman-adv/types.h linux-2.6.39.4/net/batman-adv/types.h
65196 --- linux-2.6.39.4/net/batman-adv/types.h 2011-05-19 00:06:34.000000000 -0400
65197 +++ linux-2.6.39.4/net/batman-adv/types.h 2011-08-05 19:44:37.000000000 -0400
65198 @@ -38,8 +38,8 @@ struct hard_iface {
65199 int16_t if_num;
65200 char if_status;
65201 struct net_device *net_dev;
65202 - atomic_t seqno;
65203 - atomic_t frag_seqno;
65204 + atomic_unchecked_t seqno;
65205 + atomic_unchecked_t frag_seqno;
65206 unsigned char *packet_buff;
65207 int packet_len;
65208 struct kobject *hardif_obj;
65209 @@ -141,7 +141,7 @@ struct bat_priv {
65210 atomic_t orig_interval; /* uint */
65211 atomic_t hop_penalty; /* uint */
65212 atomic_t log_level; /* uint */
65213 - atomic_t bcast_seqno;
65214 + atomic_unchecked_t bcast_seqno;
65215 atomic_t bcast_queue_left;
65216 atomic_t batman_queue_left;
65217 char num_ifaces;
65218 diff -urNp linux-2.6.39.4/net/batman-adv/unicast.c linux-2.6.39.4/net/batman-adv/unicast.c
65219 --- linux-2.6.39.4/net/batman-adv/unicast.c 2011-05-19 00:06:34.000000000 -0400
65220 +++ linux-2.6.39.4/net/batman-adv/unicast.c 2011-08-05 19:44:37.000000000 -0400
65221 @@ -263,7 +263,7 @@ int frag_send_skb(struct sk_buff *skb, s
65222 frag1->flags = UNI_FRAG_HEAD | large_tail;
65223 frag2->flags = large_tail;
65224
65225 - seqno = atomic_add_return(2, &hard_iface->frag_seqno);
65226 + seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
65227 frag1->seqno = htons(seqno - 1);
65228 frag2->seqno = htons(seqno);
65229
65230 diff -urNp linux-2.6.39.4/net/bluetooth/l2cap_core.c linux-2.6.39.4/net/bluetooth/l2cap_core.c
65231 --- linux-2.6.39.4/net/bluetooth/l2cap_core.c 2011-05-19 00:06:34.000000000 -0400
65232 +++ linux-2.6.39.4/net/bluetooth/l2cap_core.c 2011-08-05 19:44:37.000000000 -0400
65233 @@ -2202,7 +2202,7 @@ static inline int l2cap_config_req(struc
65234
65235 /* Reject if config buffer is too small. */
65236 len = cmd_len - sizeof(*req);
65237 - if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
65238 + if (len < 0 || l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
65239 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
65240 l2cap_build_conf_rsp(sk, rsp,
65241 L2CAP_CONF_REJECT, flags), rsp);
65242 diff -urNp linux-2.6.39.4/net/bluetooth/l2cap_sock.c linux-2.6.39.4/net/bluetooth/l2cap_sock.c
65243 --- linux-2.6.39.4/net/bluetooth/l2cap_sock.c 2011-05-19 00:06:34.000000000 -0400
65244 +++ linux-2.6.39.4/net/bluetooth/l2cap_sock.c 2011-08-05 19:44:37.000000000 -0400
65245 @@ -446,6 +446,7 @@ static int l2cap_sock_getsockopt_old(str
65246 break;
65247 }
65248
65249 + memset(&cinfo, 0, sizeof(cinfo));
65250 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
65251 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
65252
65253 diff -urNp linux-2.6.39.4/net/bluetooth/rfcomm/sock.c linux-2.6.39.4/net/bluetooth/rfcomm/sock.c
65254 --- linux-2.6.39.4/net/bluetooth/rfcomm/sock.c 2011-05-19 00:06:34.000000000 -0400
65255 +++ linux-2.6.39.4/net/bluetooth/rfcomm/sock.c 2011-08-05 19:44:37.000000000 -0400
65256 @@ -787,6 +787,7 @@ static int rfcomm_sock_getsockopt_old(st
65257
65258 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
65259
65260 + memset(&cinfo, 0, sizeof(cinfo));
65261 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
65262 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
65263
65264 diff -urNp linux-2.6.39.4/net/bridge/br_multicast.c linux-2.6.39.4/net/bridge/br_multicast.c
65265 --- linux-2.6.39.4/net/bridge/br_multicast.c 2011-05-19 00:06:34.000000000 -0400
65266 +++ linux-2.6.39.4/net/bridge/br_multicast.c 2011-08-05 19:44:37.000000000 -0400
65267 @@ -1482,7 +1482,7 @@ static int br_multicast_ipv6_rcv(struct
65268 nexthdr = ip6h->nexthdr;
65269 offset = ipv6_skip_exthdr(skb, sizeof(*ip6h), &nexthdr);
65270
65271 - if (offset < 0 || nexthdr != IPPROTO_ICMPV6)
65272 + if (nexthdr != IPPROTO_ICMPV6)
65273 return 0;
65274
65275 /* Okay, we found ICMPv6 header */
65276 diff -urNp linux-2.6.39.4/net/bridge/netfilter/ebtables.c linux-2.6.39.4/net/bridge/netfilter/ebtables.c
65277 --- linux-2.6.39.4/net/bridge/netfilter/ebtables.c 2011-05-19 00:06:34.000000000 -0400
65278 +++ linux-2.6.39.4/net/bridge/netfilter/ebtables.c 2011-08-05 19:44:37.000000000 -0400
65279 @@ -1512,7 +1512,7 @@ static int do_ebt_get_ctl(struct sock *s
65280 tmp.valid_hooks = t->table->valid_hooks;
65281 }
65282 mutex_unlock(&ebt_mutex);
65283 - if (copy_to_user(user, &tmp, *len) != 0){
65284 + if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
65285 BUGPRINT("c2u Didn't work\n");
65286 ret = -EFAULT;
65287 break;
65288 @@ -1780,6 +1780,8 @@ static int compat_copy_everything_to_use
65289 int ret;
65290 void __user *pos;
65291
65292 + pax_track_stack();
65293 +
65294 memset(&tinfo, 0, sizeof(tinfo));
65295
65296 if (cmd == EBT_SO_GET_ENTRIES) {
65297 diff -urNp linux-2.6.39.4/net/caif/caif_socket.c linux-2.6.39.4/net/caif/caif_socket.c
65298 --- linux-2.6.39.4/net/caif/caif_socket.c 2011-05-19 00:06:34.000000000 -0400
65299 +++ linux-2.6.39.4/net/caif/caif_socket.c 2011-08-05 19:44:37.000000000 -0400
65300 @@ -48,18 +48,19 @@ static struct dentry *debugfsdir;
65301 #ifdef CONFIG_DEBUG_FS
65302 struct debug_fs_counter {
65303 atomic_t caif_nr_socks;
65304 - atomic_t num_connect_req;
65305 - atomic_t num_connect_resp;
65306 - atomic_t num_connect_fail_resp;
65307 - atomic_t num_disconnect;
65308 - atomic_t num_remote_shutdown_ind;
65309 - atomic_t num_tx_flow_off_ind;
65310 - atomic_t num_tx_flow_on_ind;
65311 - atomic_t num_rx_flow_off;
65312 - atomic_t num_rx_flow_on;
65313 + atomic_unchecked_t num_connect_req;
65314 + atomic_unchecked_t num_connect_resp;
65315 + atomic_unchecked_t num_connect_fail_resp;
65316 + atomic_unchecked_t num_disconnect;
65317 + atomic_unchecked_t num_remote_shutdown_ind;
65318 + atomic_unchecked_t num_tx_flow_off_ind;
65319 + atomic_unchecked_t num_tx_flow_on_ind;
65320 + atomic_unchecked_t num_rx_flow_off;
65321 + atomic_unchecked_t num_rx_flow_on;
65322 };
65323 static struct debug_fs_counter cnt;
65324 #define dbfs_atomic_inc(v) atomic_inc(v)
65325 +#define dbfs_atomic_inc_unchecked(v) atomic_inc_unchecked(v)
65326 #define dbfs_atomic_dec(v) atomic_dec(v)
65327 #else
65328 #define dbfs_atomic_inc(v)
65329 @@ -159,7 +160,7 @@ static int caif_queue_rcv_skb(struct soc
65330 atomic_read(&cf_sk->sk.sk_rmem_alloc),
65331 sk_rcvbuf_lowwater(cf_sk));
65332 set_rx_flow_off(cf_sk);
65333 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
65334 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
65335 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
65336 }
65337
65338 @@ -169,7 +170,7 @@ static int caif_queue_rcv_skb(struct soc
65339 if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
65340 set_rx_flow_off(cf_sk);
65341 pr_debug("sending flow OFF due to rmem_schedule\n");
65342 - dbfs_atomic_inc(&cnt.num_rx_flow_off);
65343 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_off);
65344 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ);
65345 }
65346 skb->dev = NULL;
65347 @@ -218,21 +219,21 @@ static void caif_ctrl_cb(struct cflayer
65348 switch (flow) {
65349 case CAIF_CTRLCMD_FLOW_ON_IND:
65350 /* OK from modem to start sending again */
65351 - dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
65352 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_on_ind);
65353 set_tx_flow_on(cf_sk);
65354 cf_sk->sk.sk_state_change(&cf_sk->sk);
65355 break;
65356
65357 case CAIF_CTRLCMD_FLOW_OFF_IND:
65358 /* Modem asks us to shut up */
65359 - dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
65360 + dbfs_atomic_inc_unchecked(&cnt.num_tx_flow_off_ind);
65361 set_tx_flow_off(cf_sk);
65362 cf_sk->sk.sk_state_change(&cf_sk->sk);
65363 break;
65364
65365 case CAIF_CTRLCMD_INIT_RSP:
65366 /* We're now connected */
65367 - dbfs_atomic_inc(&cnt.num_connect_resp);
65368 + dbfs_atomic_inc_unchecked(&cnt.num_connect_resp);
65369 cf_sk->sk.sk_state = CAIF_CONNECTED;
65370 set_tx_flow_on(cf_sk);
65371 cf_sk->sk.sk_state_change(&cf_sk->sk);
65372 @@ -247,7 +248,7 @@ static void caif_ctrl_cb(struct cflayer
65373
65374 case CAIF_CTRLCMD_INIT_FAIL_RSP:
65375 /* Connect request failed */
65376 - dbfs_atomic_inc(&cnt.num_connect_fail_resp);
65377 + dbfs_atomic_inc_unchecked(&cnt.num_connect_fail_resp);
65378 cf_sk->sk.sk_err = ECONNREFUSED;
65379 cf_sk->sk.sk_state = CAIF_DISCONNECTED;
65380 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
65381 @@ -261,7 +262,7 @@ static void caif_ctrl_cb(struct cflayer
65382
65383 case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
65384 /* Modem has closed this connection, or device is down. */
65385 - dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
65386 + dbfs_atomic_inc_unchecked(&cnt.num_remote_shutdown_ind);
65387 cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
65388 cf_sk->sk.sk_err = ECONNRESET;
65389 set_rx_flow_on(cf_sk);
65390 @@ -281,7 +282,7 @@ static void caif_check_flow_release(stru
65391 return;
65392
65393 if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
65394 - dbfs_atomic_inc(&cnt.num_rx_flow_on);
65395 + dbfs_atomic_inc_unchecked(&cnt.num_rx_flow_on);
65396 set_rx_flow_on(cf_sk);
65397 caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ);
65398 }
65399 @@ -864,7 +865,7 @@ static int caif_connect(struct socket *s
65400 /*ifindex = id of the interface.*/
65401 cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
65402
65403 - dbfs_atomic_inc(&cnt.num_connect_req);
65404 + dbfs_atomic_inc_unchecked(&cnt.num_connect_req);
65405 cf_sk->layer.receive = caif_sktrecv_cb;
65406 err = caif_connect_client(&cf_sk->conn_req,
65407 &cf_sk->layer, &ifindex, &headroom, &tailroom);
65408 @@ -952,7 +953,7 @@ static int caif_release(struct socket *s
65409 spin_unlock(&sk->sk_receive_queue.lock);
65410 sock->sk = NULL;
65411
65412 - dbfs_atomic_inc(&cnt.num_disconnect);
65413 + dbfs_atomic_inc_unchecked(&cnt.num_disconnect);
65414
65415 if (cf_sk->debugfs_socket_dir != NULL)
65416 debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
65417 diff -urNp linux-2.6.39.4/net/caif/cfctrl.c linux-2.6.39.4/net/caif/cfctrl.c
65418 --- linux-2.6.39.4/net/caif/cfctrl.c 2011-05-19 00:06:34.000000000 -0400
65419 +++ linux-2.6.39.4/net/caif/cfctrl.c 2011-08-05 19:44:37.000000000 -0400
65420 @@ -9,6 +9,7 @@
65421 #include <linux/stddef.h>
65422 #include <linux/spinlock.h>
65423 #include <linux/slab.h>
65424 +#include <linux/sched.h>
65425 #include <net/caif/caif_layer.h>
65426 #include <net/caif/cfpkt.h>
65427 #include <net/caif/cfctrl.h>
65428 @@ -46,8 +47,8 @@ struct cflayer *cfctrl_create(void)
65429 dev_info.id = 0xff;
65430 memset(this, 0, sizeof(*this));
65431 cfsrvl_init(&this->serv, 0, &dev_info, false);
65432 - atomic_set(&this->req_seq_no, 1);
65433 - atomic_set(&this->rsp_seq_no, 1);
65434 + atomic_set_unchecked(&this->req_seq_no, 1);
65435 + atomic_set_unchecked(&this->rsp_seq_no, 1);
65436 this->serv.layer.receive = cfctrl_recv;
65437 sprintf(this->serv.layer.name, "ctrl");
65438 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
65439 @@ -116,8 +117,8 @@ void cfctrl_insert_req(struct cfctrl *ct
65440 struct cfctrl_request_info *req)
65441 {
65442 spin_lock(&ctrl->info_list_lock);
65443 - atomic_inc(&ctrl->req_seq_no);
65444 - req->sequence_no = atomic_read(&ctrl->req_seq_no);
65445 + atomic_inc_unchecked(&ctrl->req_seq_no);
65446 + req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
65447 list_add_tail(&req->list, &ctrl->list);
65448 spin_unlock(&ctrl->info_list_lock);
65449 }
65450 @@ -136,7 +137,7 @@ struct cfctrl_request_info *cfctrl_remov
65451 if (p != first)
65452 pr_warn("Requests are not received in order\n");
65453
65454 - atomic_set(&ctrl->rsp_seq_no,
65455 + atomic_set_unchecked(&ctrl->rsp_seq_no,
65456 p->sequence_no);
65457 list_del(&p->list);
65458 goto out;
65459 @@ -385,6 +386,7 @@ static int cfctrl_recv(struct cflayer *l
65460 struct cfctrl *cfctrl = container_obj(layer);
65461 struct cfctrl_request_info rsp, *req;
65462
65463 + pax_track_stack();
65464
65465 cfpkt_extr_head(pkt, &cmdrsp, 1);
65466 cmd = cmdrsp & CFCTRL_CMD_MASK;
65467 diff -urNp linux-2.6.39.4/net/can/bcm.c linux-2.6.39.4/net/can/bcm.c
65468 --- linux-2.6.39.4/net/can/bcm.c 2011-05-19 00:06:34.000000000 -0400
65469 +++ linux-2.6.39.4/net/can/bcm.c 2011-08-05 19:44:37.000000000 -0400
65470 @@ -165,9 +165,15 @@ static int bcm_proc_show(struct seq_file
65471 struct bcm_sock *bo = bcm_sk(sk);
65472 struct bcm_op *op;
65473
65474 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65475 + seq_printf(m, ">>> socket %p", NULL);
65476 + seq_printf(m, " / sk %p", NULL);
65477 + seq_printf(m, " / bo %p", NULL);
65478 +#else
65479 seq_printf(m, ">>> socket %p", sk->sk_socket);
65480 seq_printf(m, " / sk %p", sk);
65481 seq_printf(m, " / bo %p", bo);
65482 +#endif
65483 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
65484 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
65485 seq_printf(m, " <<<\n");
65486 diff -urNp linux-2.6.39.4/net/core/datagram.c linux-2.6.39.4/net/core/datagram.c
65487 --- linux-2.6.39.4/net/core/datagram.c 2011-05-19 00:06:34.000000000 -0400
65488 +++ linux-2.6.39.4/net/core/datagram.c 2011-08-05 19:44:37.000000000 -0400
65489 @@ -285,7 +285,7 @@ int skb_kill_datagram(struct sock *sk, s
65490 }
65491
65492 kfree_skb(skb);
65493 - atomic_inc(&sk->sk_drops);
65494 + atomic_inc_unchecked(&sk->sk_drops);
65495 sk_mem_reclaim_partial(sk);
65496
65497 return err;
65498 diff -urNp linux-2.6.39.4/net/core/dev.c linux-2.6.39.4/net/core/dev.c
65499 --- linux-2.6.39.4/net/core/dev.c 2011-06-03 00:04:14.000000000 -0400
65500 +++ linux-2.6.39.4/net/core/dev.c 2011-08-05 20:34:06.000000000 -0400
65501 @@ -1125,10 +1125,14 @@ void dev_load(struct net *net, const cha
65502 if (no_module && capable(CAP_NET_ADMIN))
65503 no_module = request_module("netdev-%s", name);
65504 if (no_module && capable(CAP_SYS_MODULE)) {
65505 +#ifdef CONFIG_GRKERNSEC_MODHARDEN
65506 + ___request_module(true, "grsec_modharden_netdev", "%s", name);
65507 +#else
65508 if (!request_module("%s", name))
65509 pr_err("Loading kernel module for a network device "
65510 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
65511 "instead\n", name);
65512 +#endif
65513 }
65514 }
65515 EXPORT_SYMBOL(dev_load);
65516 @@ -1951,7 +1955,7 @@ static int illegal_highdma(struct net_de
65517
65518 struct dev_gso_cb {
65519 void (*destructor)(struct sk_buff *skb);
65520 -};
65521 +} __no_const;
65522
65523 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
65524
65525 @@ -2901,7 +2905,7 @@ int netif_rx_ni(struct sk_buff *skb)
65526 }
65527 EXPORT_SYMBOL(netif_rx_ni);
65528
65529 -static void net_tx_action(struct softirq_action *h)
65530 +static void net_tx_action(void)
65531 {
65532 struct softnet_data *sd = &__get_cpu_var(softnet_data);
65533
65534 @@ -3765,7 +3769,7 @@ void netif_napi_del(struct napi_struct *
65535 }
65536 EXPORT_SYMBOL(netif_napi_del);
65537
65538 -static void net_rx_action(struct softirq_action *h)
65539 +static void net_rx_action(void)
65540 {
65541 struct softnet_data *sd = &__get_cpu_var(softnet_data);
65542 unsigned long time_limit = jiffies + 2;
65543 diff -urNp linux-2.6.39.4/net/core/flow.c linux-2.6.39.4/net/core/flow.c
65544 --- linux-2.6.39.4/net/core/flow.c 2011-05-19 00:06:34.000000000 -0400
65545 +++ linux-2.6.39.4/net/core/flow.c 2011-08-05 19:44:37.000000000 -0400
65546 @@ -60,7 +60,7 @@ struct flow_cache {
65547 struct timer_list rnd_timer;
65548 };
65549
65550 -atomic_t flow_cache_genid = ATOMIC_INIT(0);
65551 +atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
65552 EXPORT_SYMBOL(flow_cache_genid);
65553 static struct flow_cache flow_cache_global;
65554 static struct kmem_cache *flow_cachep __read_mostly;
65555 @@ -85,7 +85,7 @@ static void flow_cache_new_hashrnd(unsig
65556
65557 static int flow_entry_valid(struct flow_cache_entry *fle)
65558 {
65559 - if (atomic_read(&flow_cache_genid) != fle->genid)
65560 + if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
65561 return 0;
65562 if (fle->object && !fle->object->ops->check(fle->object))
65563 return 0;
65564 @@ -253,7 +253,7 @@ flow_cache_lookup(struct net *net, const
65565 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
65566 fcp->hash_count++;
65567 }
65568 - } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
65569 + } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
65570 flo = fle->object;
65571 if (!flo)
65572 goto ret_object;
65573 @@ -274,7 +274,7 @@ nocache:
65574 }
65575 flo = resolver(net, key, family, dir, flo, ctx);
65576 if (fle) {
65577 - fle->genid = atomic_read(&flow_cache_genid);
65578 + fle->genid = atomic_read_unchecked(&flow_cache_genid);
65579 if (!IS_ERR(flo))
65580 fle->object = flo;
65581 else
65582 diff -urNp linux-2.6.39.4/net/core/rtnetlink.c linux-2.6.39.4/net/core/rtnetlink.c
65583 --- linux-2.6.39.4/net/core/rtnetlink.c 2011-05-19 00:06:34.000000000 -0400
65584 +++ linux-2.6.39.4/net/core/rtnetlink.c 2011-08-05 20:34:06.000000000 -0400
65585 @@ -56,7 +56,7 @@
65586 struct rtnl_link {
65587 rtnl_doit_func doit;
65588 rtnl_dumpit_func dumpit;
65589 -};
65590 +} __no_const;
65591
65592 static DEFINE_MUTEX(rtnl_mutex);
65593
65594 diff -urNp linux-2.6.39.4/net/core/skbuff.c linux-2.6.39.4/net/core/skbuff.c
65595 --- linux-2.6.39.4/net/core/skbuff.c 2011-06-03 00:04:14.000000000 -0400
65596 +++ linux-2.6.39.4/net/core/skbuff.c 2011-08-05 19:44:37.000000000 -0400
65597 @@ -1542,6 +1542,8 @@ int skb_splice_bits(struct sk_buff *skb,
65598 struct sock *sk = skb->sk;
65599 int ret = 0;
65600
65601 + pax_track_stack();
65602 +
65603 if (splice_grow_spd(pipe, &spd))
65604 return -ENOMEM;
65605
65606 diff -urNp linux-2.6.39.4/net/core/sock.c linux-2.6.39.4/net/core/sock.c
65607 --- linux-2.6.39.4/net/core/sock.c 2011-05-19 00:06:34.000000000 -0400
65608 +++ linux-2.6.39.4/net/core/sock.c 2011-08-05 19:44:37.000000000 -0400
65609 @@ -291,7 +291,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65610 */
65611 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
65612 (unsigned)sk->sk_rcvbuf) {
65613 - atomic_inc(&sk->sk_drops);
65614 + atomic_inc_unchecked(&sk->sk_drops);
65615 return -ENOMEM;
65616 }
65617
65618 @@ -300,7 +300,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65619 return err;
65620
65621 if (!sk_rmem_schedule(sk, skb->truesize)) {
65622 - atomic_inc(&sk->sk_drops);
65623 + atomic_inc_unchecked(&sk->sk_drops);
65624 return -ENOBUFS;
65625 }
65626
65627 @@ -320,7 +320,7 @@ int sock_queue_rcv_skb(struct sock *sk,
65628 skb_dst_force(skb);
65629
65630 spin_lock_irqsave(&list->lock, flags);
65631 - skb->dropcount = atomic_read(&sk->sk_drops);
65632 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
65633 __skb_queue_tail(list, skb);
65634 spin_unlock_irqrestore(&list->lock, flags);
65635
65636 @@ -340,7 +340,7 @@ int sk_receive_skb(struct sock *sk, stru
65637 skb->dev = NULL;
65638
65639 if (sk_rcvqueues_full(sk, skb)) {
65640 - atomic_inc(&sk->sk_drops);
65641 + atomic_inc_unchecked(&sk->sk_drops);
65642 goto discard_and_relse;
65643 }
65644 if (nested)
65645 @@ -358,7 +358,7 @@ int sk_receive_skb(struct sock *sk, stru
65646 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
65647 } else if (sk_add_backlog(sk, skb)) {
65648 bh_unlock_sock(sk);
65649 - atomic_inc(&sk->sk_drops);
65650 + atomic_inc_unchecked(&sk->sk_drops);
65651 goto discard_and_relse;
65652 }
65653
65654 @@ -934,7 +934,7 @@ int sock_getsockopt(struct socket *sock,
65655 return -ENOTCONN;
65656 if (lv < len)
65657 return -EINVAL;
65658 - if (copy_to_user(optval, address, len))
65659 + if (len > sizeof(address) || copy_to_user(optval, address, len))
65660 return -EFAULT;
65661 goto lenout;
65662 }
65663 @@ -967,7 +967,7 @@ int sock_getsockopt(struct socket *sock,
65664
65665 if (len > lv)
65666 len = lv;
65667 - if (copy_to_user(optval, &v, len))
65668 + if (len > sizeof(v) || copy_to_user(optval, &v, len))
65669 return -EFAULT;
65670 lenout:
65671 if (put_user(len, optlen))
65672 @@ -2023,7 +2023,7 @@ void sock_init_data(struct socket *sock,
65673 */
65674 smp_wmb();
65675 atomic_set(&sk->sk_refcnt, 1);
65676 - atomic_set(&sk->sk_drops, 0);
65677 + atomic_set_unchecked(&sk->sk_drops, 0);
65678 }
65679 EXPORT_SYMBOL(sock_init_data);
65680
65681 diff -urNp linux-2.6.39.4/net/decnet/sysctl_net_decnet.c linux-2.6.39.4/net/decnet/sysctl_net_decnet.c
65682 --- linux-2.6.39.4/net/decnet/sysctl_net_decnet.c 2011-05-19 00:06:34.000000000 -0400
65683 +++ linux-2.6.39.4/net/decnet/sysctl_net_decnet.c 2011-08-05 19:44:37.000000000 -0400
65684 @@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t
65685
65686 if (len > *lenp) len = *lenp;
65687
65688 - if (copy_to_user(buffer, addr, len))
65689 + if (len > sizeof addr || copy_to_user(buffer, addr, len))
65690 return -EFAULT;
65691
65692 *lenp = len;
65693 @@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table
65694
65695 if (len > *lenp) len = *lenp;
65696
65697 - if (copy_to_user(buffer, devname, len))
65698 + if (len > sizeof devname || copy_to_user(buffer, devname, len))
65699 return -EFAULT;
65700
65701 *lenp = len;
65702 diff -urNp linux-2.6.39.4/net/econet/Kconfig linux-2.6.39.4/net/econet/Kconfig
65703 --- linux-2.6.39.4/net/econet/Kconfig 2011-05-19 00:06:34.000000000 -0400
65704 +++ linux-2.6.39.4/net/econet/Kconfig 2011-08-05 19:44:37.000000000 -0400
65705 @@ -4,7 +4,7 @@
65706
65707 config ECONET
65708 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
65709 - depends on EXPERIMENTAL && INET
65710 + depends on EXPERIMENTAL && INET && BROKEN
65711 ---help---
65712 Econet is a fairly old and slow networking protocol mainly used by
65713 Acorn computers to access file and print servers. It uses native
65714 diff -urNp linux-2.6.39.4/net/ipv4/fib_frontend.c linux-2.6.39.4/net/ipv4/fib_frontend.c
65715 --- linux-2.6.39.4/net/ipv4/fib_frontend.c 2011-05-19 00:06:34.000000000 -0400
65716 +++ linux-2.6.39.4/net/ipv4/fib_frontend.c 2011-08-05 19:44:37.000000000 -0400
65717 @@ -968,12 +968,12 @@ static int fib_inetaddr_event(struct not
65718 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65719 fib_sync_up(dev);
65720 #endif
65721 - atomic_inc(&net->ipv4.dev_addr_genid);
65722 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65723 rt_cache_flush(dev_net(dev), -1);
65724 break;
65725 case NETDEV_DOWN:
65726 fib_del_ifaddr(ifa, NULL);
65727 - atomic_inc(&net->ipv4.dev_addr_genid);
65728 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65729 if (ifa->ifa_dev->ifa_list == NULL) {
65730 /* Last address was deleted from this interface.
65731 * Disable IP.
65732 @@ -1009,7 +1009,7 @@ static int fib_netdev_event(struct notif
65733 #ifdef CONFIG_IP_ROUTE_MULTIPATH
65734 fib_sync_up(dev);
65735 #endif
65736 - atomic_inc(&net->ipv4.dev_addr_genid);
65737 + atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
65738 rt_cache_flush(dev_net(dev), -1);
65739 break;
65740 case NETDEV_DOWN:
65741 diff -urNp linux-2.6.39.4/net/ipv4/fib_semantics.c linux-2.6.39.4/net/ipv4/fib_semantics.c
65742 --- linux-2.6.39.4/net/ipv4/fib_semantics.c 2011-05-19 00:06:34.000000000 -0400
65743 +++ linux-2.6.39.4/net/ipv4/fib_semantics.c 2011-08-05 19:44:37.000000000 -0400
65744 @@ -701,7 +701,7 @@ __be32 fib_info_update_nh_saddr(struct n
65745 nh->nh_saddr = inet_select_addr(nh->nh_dev,
65746 nh->nh_gw,
65747 nh->nh_parent->fib_scope);
65748 - nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
65749 + nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
65750
65751 return nh->nh_saddr;
65752 }
65753 diff -urNp linux-2.6.39.4/net/ipv4/inet_diag.c linux-2.6.39.4/net/ipv4/inet_diag.c
65754 --- linux-2.6.39.4/net/ipv4/inet_diag.c 2011-07-09 09:18:51.000000000 -0400
65755 +++ linux-2.6.39.4/net/ipv4/inet_diag.c 2011-08-05 19:44:37.000000000 -0400
65756 @@ -114,8 +114,14 @@ static int inet_csk_diag_fill(struct soc
65757 r->idiag_retrans = 0;
65758
65759 r->id.idiag_if = sk->sk_bound_dev_if;
65760 +
65761 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65762 + r->id.idiag_cookie[0] = 0;
65763 + r->id.idiag_cookie[1] = 0;
65764 +#else
65765 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
65766 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
65767 +#endif
65768
65769 r->id.idiag_sport = inet->inet_sport;
65770 r->id.idiag_dport = inet->inet_dport;
65771 @@ -201,8 +207,15 @@ static int inet_twsk_diag_fill(struct in
65772 r->idiag_family = tw->tw_family;
65773 r->idiag_retrans = 0;
65774 r->id.idiag_if = tw->tw_bound_dev_if;
65775 +
65776 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65777 + r->id.idiag_cookie[0] = 0;
65778 + r->id.idiag_cookie[1] = 0;
65779 +#else
65780 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
65781 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
65782 +#endif
65783 +
65784 r->id.idiag_sport = tw->tw_sport;
65785 r->id.idiag_dport = tw->tw_dport;
65786 r->id.idiag_src[0] = tw->tw_rcv_saddr;
65787 @@ -285,12 +298,14 @@ static int inet_diag_get_exact(struct sk
65788 if (sk == NULL)
65789 goto unlock;
65790
65791 +#ifndef CONFIG_GRKERNSEC_HIDESYM
65792 err = -ESTALE;
65793 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
65794 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
65795 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
65796 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
65797 goto out;
65798 +#endif
65799
65800 err = -ENOMEM;
65801 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
65802 @@ -580,8 +595,14 @@ static int inet_diag_fill_req(struct sk_
65803 r->idiag_retrans = req->retrans;
65804
65805 r->id.idiag_if = sk->sk_bound_dev_if;
65806 +
65807 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65808 + r->id.idiag_cookie[0] = 0;
65809 + r->id.idiag_cookie[1] = 0;
65810 +#else
65811 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
65812 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
65813 +#endif
65814
65815 tmo = req->expires - jiffies;
65816 if (tmo < 0)
65817 diff -urNp linux-2.6.39.4/net/ipv4/inet_hashtables.c linux-2.6.39.4/net/ipv4/inet_hashtables.c
65818 --- linux-2.6.39.4/net/ipv4/inet_hashtables.c 2011-05-19 00:06:34.000000000 -0400
65819 +++ linux-2.6.39.4/net/ipv4/inet_hashtables.c 2011-08-05 19:44:37.000000000 -0400
65820 @@ -18,11 +18,14 @@
65821 #include <linux/sched.h>
65822 #include <linux/slab.h>
65823 #include <linux/wait.h>
65824 +#include <linux/security.h>
65825
65826 #include <net/inet_connection_sock.h>
65827 #include <net/inet_hashtables.h>
65828 #include <net/ip.h>
65829
65830 +extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
65831 +
65832 /*
65833 * Allocate and initialize a new local port bind bucket.
65834 * The bindhash mutex for snum's hash chain must be held here.
65835 @@ -529,6 +532,8 @@ ok:
65836 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
65837 spin_unlock(&head->lock);
65838
65839 + gr_update_task_in_ip_table(current, inet_sk(sk));
65840 +
65841 if (tw) {
65842 inet_twsk_deschedule(tw, death_row);
65843 while (twrefcnt) {
65844 diff -urNp linux-2.6.39.4/net/ipv4/inetpeer.c linux-2.6.39.4/net/ipv4/inetpeer.c
65845 --- linux-2.6.39.4/net/ipv4/inetpeer.c 2011-07-09 09:18:51.000000000 -0400
65846 +++ linux-2.6.39.4/net/ipv4/inetpeer.c 2011-08-05 19:44:37.000000000 -0400
65847 @@ -480,6 +480,8 @@ struct inet_peer *inet_getpeer(struct in
65848 unsigned int sequence;
65849 int invalidated, newrefcnt = 0;
65850
65851 + pax_track_stack();
65852 +
65853 /* Look up for the address quickly, lockless.
65854 * Because of a concurrent writer, we might not find an existing entry.
65855 */
65856 @@ -516,8 +518,8 @@ found: /* The existing node has been fo
65857 if (p) {
65858 p->daddr = *daddr;
65859 atomic_set(&p->refcnt, 1);
65860 - atomic_set(&p->rid, 0);
65861 - atomic_set(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
65862 + atomic_set_unchecked(&p->rid, 0);
65863 + atomic_set_unchecked(&p->ip_id_count, secure_ip_id(daddr->addr.a4));
65864 p->tcp_ts_stamp = 0;
65865 p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
65866 p->rate_tokens = 0;
65867 diff -urNp linux-2.6.39.4/net/ipv4/ip_fragment.c linux-2.6.39.4/net/ipv4/ip_fragment.c
65868 --- linux-2.6.39.4/net/ipv4/ip_fragment.c 2011-05-19 00:06:34.000000000 -0400
65869 +++ linux-2.6.39.4/net/ipv4/ip_fragment.c 2011-08-05 19:44:37.000000000 -0400
65870 @@ -297,7 +297,7 @@ static inline int ip_frag_too_far(struct
65871 return 0;
65872
65873 start = qp->rid;
65874 - end = atomic_inc_return(&peer->rid);
65875 + end = atomic_inc_return_unchecked(&peer->rid);
65876 qp->rid = end;
65877
65878 rc = qp->q.fragments && (end - start) > max;
65879 diff -urNp linux-2.6.39.4/net/ipv4/ip_sockglue.c linux-2.6.39.4/net/ipv4/ip_sockglue.c
65880 --- linux-2.6.39.4/net/ipv4/ip_sockglue.c 2011-05-19 00:06:34.000000000 -0400
65881 +++ linux-2.6.39.4/net/ipv4/ip_sockglue.c 2011-08-05 19:44:37.000000000 -0400
65882 @@ -1064,6 +1064,8 @@ static int do_ip_getsockopt(struct sock
65883 int val;
65884 int len;
65885
65886 + pax_track_stack();
65887 +
65888 if (level != SOL_IP)
65889 return -EOPNOTSUPP;
65890
65891 diff -urNp linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c
65892 --- linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-05-19 00:06:34.000000000 -0400
65893 +++ linux-2.6.39.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-08-05 19:44:37.000000000 -0400
65894 @@ -399,7 +399,7 @@ static unsigned char asn1_octets_decode(
65895
65896 *len = 0;
65897
65898 - *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
65899 + *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
65900 if (*octets == NULL) {
65901 if (net_ratelimit())
65902 pr_notice("OOM in bsalg (%d)\n", __LINE__);
65903 diff -urNp linux-2.6.39.4/net/ipv4/raw.c linux-2.6.39.4/net/ipv4/raw.c
65904 --- linux-2.6.39.4/net/ipv4/raw.c 2011-05-19 00:06:34.000000000 -0400
65905 +++ linux-2.6.39.4/net/ipv4/raw.c 2011-08-14 11:22:59.000000000 -0400
65906 @@ -302,7 +302,7 @@ static int raw_rcv_skb(struct sock * sk,
65907 int raw_rcv(struct sock *sk, struct sk_buff *skb)
65908 {
65909 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
65910 - atomic_inc(&sk->sk_drops);
65911 + atomic_inc_unchecked(&sk->sk_drops);
65912 kfree_skb(skb);
65913 return NET_RX_DROP;
65914 }
65915 @@ -730,16 +730,20 @@ static int raw_init(struct sock *sk)
65916
65917 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
65918 {
65919 + struct icmp_filter filter;
65920 +
65921 if (optlen > sizeof(struct icmp_filter))
65922 optlen = sizeof(struct icmp_filter);
65923 - if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
65924 + if (copy_from_user(&filter, optval, optlen))
65925 return -EFAULT;
65926 + raw_sk(sk)->filter = filter;
65927 return 0;
65928 }
65929
65930 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
65931 {
65932 int len, ret = -EFAULT;
65933 + struct icmp_filter filter;
65934
65935 if (get_user(len, optlen))
65936 goto out;
65937 @@ -749,8 +753,9 @@ static int raw_geticmpfilter(struct sock
65938 if (len > sizeof(struct icmp_filter))
65939 len = sizeof(struct icmp_filter);
65940 ret = -EFAULT;
65941 - if (put_user(len, optlen) ||
65942 - copy_to_user(optval, &raw_sk(sk)->filter, len))
65943 + filter = raw_sk(sk)->filter;
65944 + if (put_user(len, optlen) || len > sizeof filter ||
65945 + copy_to_user(optval, &filter, len))
65946 goto out;
65947 ret = 0;
65948 out: return ret;
65949 @@ -978,7 +983,13 @@ static void raw_sock_seq_show(struct seq
65950 sk_wmem_alloc_get(sp),
65951 sk_rmem_alloc_get(sp),
65952 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
65953 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
65954 + atomic_read(&sp->sk_refcnt),
65955 +#ifdef CONFIG_GRKERNSEC_HIDESYM
65956 + NULL,
65957 +#else
65958 + sp,
65959 +#endif
65960 + atomic_read_unchecked(&sp->sk_drops));
65961 }
65962
65963 static int raw_seq_show(struct seq_file *seq, void *v)
65964 diff -urNp linux-2.6.39.4/net/ipv4/route.c linux-2.6.39.4/net/ipv4/route.c
65965 --- linux-2.6.39.4/net/ipv4/route.c 2011-07-09 09:18:51.000000000 -0400
65966 +++ linux-2.6.39.4/net/ipv4/route.c 2011-08-05 19:44:37.000000000 -0400
65967 @@ -303,7 +303,7 @@ static inline unsigned int rt_hash(__be3
65968
65969 static inline int rt_genid(struct net *net)
65970 {
65971 - return atomic_read(&net->ipv4.rt_genid);
65972 + return atomic_read_unchecked(&net->ipv4.rt_genid);
65973 }
65974
65975 #ifdef CONFIG_PROC_FS
65976 @@ -831,7 +831,7 @@ static void rt_cache_invalidate(struct n
65977 unsigned char shuffle;
65978
65979 get_random_bytes(&shuffle, sizeof(shuffle));
65980 - atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
65981 + atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
65982 }
65983
65984 /*
65985 @@ -2833,7 +2833,7 @@ static int rt_fill_info(struct net *net,
65986 rt->peer->pmtu_expires - jiffies : 0;
65987 if (rt->peer) {
65988 inet_peer_refcheck(rt->peer);
65989 - id = atomic_read(&rt->peer->ip_id_count) & 0xffff;
65990 + id = atomic_read_unchecked(&rt->peer->ip_id_count) & 0xffff;
65991 if (rt->peer->tcp_ts_stamp) {
65992 ts = rt->peer->tcp_ts;
65993 tsage = get_seconds() - rt->peer->tcp_ts_stamp;
65994 diff -urNp linux-2.6.39.4/net/ipv4/tcp.c linux-2.6.39.4/net/ipv4/tcp.c
65995 --- linux-2.6.39.4/net/ipv4/tcp.c 2011-05-19 00:06:34.000000000 -0400
65996 +++ linux-2.6.39.4/net/ipv4/tcp.c 2011-08-05 19:44:37.000000000 -0400
65997 @@ -2121,6 +2121,8 @@ static int do_tcp_setsockopt(struct sock
65998 int val;
65999 int err = 0;
66000
66001 + pax_track_stack();
66002 +
66003 /* These are data/string values, all the others are ints */
66004 switch (optname) {
66005 case TCP_CONGESTION: {
66006 @@ -2500,6 +2502,8 @@ static int do_tcp_getsockopt(struct sock
66007 struct tcp_sock *tp = tcp_sk(sk);
66008 int val, len;
66009
66010 + pax_track_stack();
66011 +
66012 if (get_user(len, optlen))
66013 return -EFAULT;
66014
66015 diff -urNp linux-2.6.39.4/net/ipv4/tcp_ipv4.c linux-2.6.39.4/net/ipv4/tcp_ipv4.c
66016 --- linux-2.6.39.4/net/ipv4/tcp_ipv4.c 2011-05-19 00:06:34.000000000 -0400
66017 +++ linux-2.6.39.4/net/ipv4/tcp_ipv4.c 2011-08-05 19:44:37.000000000 -0400
66018 @@ -86,6 +86,9 @@ int sysctl_tcp_tw_reuse __read_mostly;
66019 int sysctl_tcp_low_latency __read_mostly;
66020 EXPORT_SYMBOL(sysctl_tcp_low_latency);
66021
66022 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66023 +extern int grsec_enable_blackhole;
66024 +#endif
66025
66026 #ifdef CONFIG_TCP_MD5SIG
66027 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
66028 @@ -1594,6 +1597,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
66029 return 0;
66030
66031 reset:
66032 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66033 + if (!grsec_enable_blackhole)
66034 +#endif
66035 tcp_v4_send_reset(rsk, skb);
66036 discard:
66037 kfree_skb(skb);
66038 @@ -1656,12 +1662,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
66039 TCP_SKB_CB(skb)->sacked = 0;
66040
66041 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
66042 - if (!sk)
66043 + if (!sk) {
66044 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66045 + ret = 1;
66046 +#endif
66047 goto no_tcp_socket;
66048 -
66049 + }
66050 process:
66051 - if (sk->sk_state == TCP_TIME_WAIT)
66052 + if (sk->sk_state == TCP_TIME_WAIT) {
66053 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66054 + ret = 2;
66055 +#endif
66056 goto do_time_wait;
66057 + }
66058
66059 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
66060 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
66061 @@ -1711,6 +1724,10 @@ no_tcp_socket:
66062 bad_packet:
66063 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
66064 } else {
66065 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66066 + if (!grsec_enable_blackhole || (ret == 1 &&
66067 + (skb->dev->flags & IFF_LOOPBACK)))
66068 +#endif
66069 tcp_v4_send_reset(NULL, skb);
66070 }
66071
66072 @@ -2374,7 +2391,11 @@ static void get_openreq4(struct sock *sk
66073 0, /* non standard timer */
66074 0, /* open_requests have no inode */
66075 atomic_read(&sk->sk_refcnt),
66076 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66077 + NULL,
66078 +#else
66079 req,
66080 +#endif
66081 len);
66082 }
66083
66084 @@ -2424,7 +2445,12 @@ static void get_tcp4_sock(struct sock *s
66085 sock_i_uid(sk),
66086 icsk->icsk_probes_out,
66087 sock_i_ino(sk),
66088 - atomic_read(&sk->sk_refcnt), sk,
66089 + atomic_read(&sk->sk_refcnt),
66090 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66091 + NULL,
66092 +#else
66093 + sk,
66094 +#endif
66095 jiffies_to_clock_t(icsk->icsk_rto),
66096 jiffies_to_clock_t(icsk->icsk_ack.ato),
66097 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
66098 @@ -2452,7 +2478,13 @@ static void get_timewait4_sock(struct in
66099 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
66100 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
66101 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
66102 - atomic_read(&tw->tw_refcnt), tw, len);
66103 + atomic_read(&tw->tw_refcnt),
66104 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66105 + NULL,
66106 +#else
66107 + tw,
66108 +#endif
66109 + len);
66110 }
66111
66112 #define TMPSZ 150
66113 diff -urNp linux-2.6.39.4/net/ipv4/tcp_minisocks.c linux-2.6.39.4/net/ipv4/tcp_minisocks.c
66114 --- linux-2.6.39.4/net/ipv4/tcp_minisocks.c 2011-05-19 00:06:34.000000000 -0400
66115 +++ linux-2.6.39.4/net/ipv4/tcp_minisocks.c 2011-08-05 19:44:37.000000000 -0400
66116 @@ -27,6 +27,10 @@
66117 #include <net/inet_common.h>
66118 #include <net/xfrm.h>
66119
66120 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66121 +extern int grsec_enable_blackhole;
66122 +#endif
66123 +
66124 int sysctl_tcp_syncookies __read_mostly = 1;
66125 EXPORT_SYMBOL(sysctl_tcp_syncookies);
66126
66127 @@ -745,6 +749,10 @@ listen_overflow:
66128
66129 embryonic_reset:
66130 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
66131 +
66132 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66133 + if (!grsec_enable_blackhole)
66134 +#endif
66135 if (!(flg & TCP_FLAG_RST))
66136 req->rsk_ops->send_reset(sk, skb);
66137
66138 diff -urNp linux-2.6.39.4/net/ipv4/tcp_output.c linux-2.6.39.4/net/ipv4/tcp_output.c
66139 --- linux-2.6.39.4/net/ipv4/tcp_output.c 2011-05-19 00:06:34.000000000 -0400
66140 +++ linux-2.6.39.4/net/ipv4/tcp_output.c 2011-08-05 19:44:37.000000000 -0400
66141 @@ -2421,6 +2421,8 @@ struct sk_buff *tcp_make_synack(struct s
66142 int mss;
66143 int s_data_desired = 0;
66144
66145 + pax_track_stack();
66146 +
66147 if (cvp != NULL && cvp->s_data_constant && cvp->s_data_desired)
66148 s_data_desired = cvp->s_data_desired;
66149 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15 + s_data_desired, 1, GFP_ATOMIC);
66150 diff -urNp linux-2.6.39.4/net/ipv4/tcp_probe.c linux-2.6.39.4/net/ipv4/tcp_probe.c
66151 --- linux-2.6.39.4/net/ipv4/tcp_probe.c 2011-05-19 00:06:34.000000000 -0400
66152 +++ linux-2.6.39.4/net/ipv4/tcp_probe.c 2011-08-05 19:44:37.000000000 -0400
66153 @@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file
66154 if (cnt + width >= len)
66155 break;
66156
66157 - if (copy_to_user(buf + cnt, tbuf, width))
66158 + if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
66159 return -EFAULT;
66160 cnt += width;
66161 }
66162 diff -urNp linux-2.6.39.4/net/ipv4/tcp_timer.c linux-2.6.39.4/net/ipv4/tcp_timer.c
66163 --- linux-2.6.39.4/net/ipv4/tcp_timer.c 2011-05-19 00:06:34.000000000 -0400
66164 +++ linux-2.6.39.4/net/ipv4/tcp_timer.c 2011-08-05 19:44:37.000000000 -0400
66165 @@ -22,6 +22,10 @@
66166 #include <linux/gfp.h>
66167 #include <net/tcp.h>
66168
66169 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66170 +extern int grsec_lastack_retries;
66171 +#endif
66172 +
66173 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
66174 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
66175 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
66176 @@ -199,6 +203,13 @@ static int tcp_write_timeout(struct sock
66177 }
66178 }
66179
66180 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66181 + if ((sk->sk_state == TCP_LAST_ACK) &&
66182 + (grsec_lastack_retries > 0) &&
66183 + (grsec_lastack_retries < retry_until))
66184 + retry_until = grsec_lastack_retries;
66185 +#endif
66186 +
66187 if (retransmits_timed_out(sk, retry_until,
66188 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
66189 /* Has it gone just too far? */
66190 diff -urNp linux-2.6.39.4/net/ipv4/udp.c linux-2.6.39.4/net/ipv4/udp.c
66191 --- linux-2.6.39.4/net/ipv4/udp.c 2011-07-09 09:18:51.000000000 -0400
66192 +++ linux-2.6.39.4/net/ipv4/udp.c 2011-08-05 19:44:37.000000000 -0400
66193 @@ -86,6 +86,7 @@
66194 #include <linux/types.h>
66195 #include <linux/fcntl.h>
66196 #include <linux/module.h>
66197 +#include <linux/security.h>
66198 #include <linux/socket.h>
66199 #include <linux/sockios.h>
66200 #include <linux/igmp.h>
66201 @@ -107,6 +108,10 @@
66202 #include <net/xfrm.h>
66203 #include "udp_impl.h"
66204
66205 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66206 +extern int grsec_enable_blackhole;
66207 +#endif
66208 +
66209 struct udp_table udp_table __read_mostly;
66210 EXPORT_SYMBOL(udp_table);
66211
66212 @@ -564,6 +569,9 @@ found:
66213 return s;
66214 }
66215
66216 +extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
66217 +extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
66218 +
66219 /*
66220 * This routine is called by the ICMP module when it gets some
66221 * sort of error condition. If err < 0 then the socket should
66222 @@ -853,9 +861,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
66223 dport = usin->sin_port;
66224 if (dport == 0)
66225 return -EINVAL;
66226 +
66227 + err = gr_search_udp_sendmsg(sk, usin);
66228 + if (err)
66229 + return err;
66230 } else {
66231 if (sk->sk_state != TCP_ESTABLISHED)
66232 return -EDESTADDRREQ;
66233 +
66234 + err = gr_search_udp_sendmsg(sk, NULL);
66235 + if (err)
66236 + return err;
66237 +
66238 daddr = inet->inet_daddr;
66239 dport = inet->inet_dport;
66240 /* Open fast path for connected socket.
66241 @@ -1090,7 +1107,7 @@ static unsigned int first_packet_length(
66242 udp_lib_checksum_complete(skb)) {
66243 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
66244 IS_UDPLITE(sk));
66245 - atomic_inc(&sk->sk_drops);
66246 + atomic_inc_unchecked(&sk->sk_drops);
66247 __skb_unlink(skb, rcvq);
66248 __skb_queue_tail(&list_kill, skb);
66249 }
66250 @@ -1176,6 +1193,10 @@ try_again:
66251 if (!skb)
66252 goto out;
66253
66254 + err = gr_search_udp_recvmsg(sk, skb);
66255 + if (err)
66256 + goto out_free;
66257 +
66258 ulen = skb->len - sizeof(struct udphdr);
66259 if (len > ulen)
66260 len = ulen;
66261 @@ -1475,7 +1496,7 @@ int udp_queue_rcv_skb(struct sock *sk, s
66262
66263 drop:
66264 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
66265 - atomic_inc(&sk->sk_drops);
66266 + atomic_inc_unchecked(&sk->sk_drops);
66267 kfree_skb(skb);
66268 return -1;
66269 }
66270 @@ -1494,7 +1515,7 @@ static void flush_stack(struct sock **st
66271 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
66272
66273 if (!skb1) {
66274 - atomic_inc(&sk->sk_drops);
66275 + atomic_inc_unchecked(&sk->sk_drops);
66276 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
66277 IS_UDPLITE(sk));
66278 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
66279 @@ -1663,6 +1684,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
66280 goto csum_error;
66281
66282 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
66283 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66284 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
66285 +#endif
66286 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
66287
66288 /*
66289 @@ -2090,8 +2114,13 @@ static void udp4_format_sock(struct sock
66290 sk_wmem_alloc_get(sp),
66291 sk_rmem_alloc_get(sp),
66292 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
66293 - atomic_read(&sp->sk_refcnt), sp,
66294 - atomic_read(&sp->sk_drops), len);
66295 + atomic_read(&sp->sk_refcnt),
66296 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66297 + NULL,
66298 +#else
66299 + sp,
66300 +#endif
66301 + atomic_read_unchecked(&sp->sk_drops), len);
66302 }
66303
66304 int udp4_seq_show(struct seq_file *seq, void *v)
66305 diff -urNp linux-2.6.39.4/net/ipv6/inet6_connection_sock.c linux-2.6.39.4/net/ipv6/inet6_connection_sock.c
66306 --- linux-2.6.39.4/net/ipv6/inet6_connection_sock.c 2011-05-19 00:06:34.000000000 -0400
66307 +++ linux-2.6.39.4/net/ipv6/inet6_connection_sock.c 2011-08-05 19:44:37.000000000 -0400
66308 @@ -178,7 +178,7 @@ void __inet6_csk_dst_store(struct sock *
66309 #ifdef CONFIG_XFRM
66310 {
66311 struct rt6_info *rt = (struct rt6_info *)dst;
66312 - rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
66313 + rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
66314 }
66315 #endif
66316 }
66317 @@ -193,7 +193,7 @@ struct dst_entry *__inet6_csk_dst_check(
66318 #ifdef CONFIG_XFRM
66319 if (dst) {
66320 struct rt6_info *rt = (struct rt6_info *)dst;
66321 - if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
66322 + if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
66323 __sk_dst_reset(sk);
66324 dst = NULL;
66325 }
66326 diff -urNp linux-2.6.39.4/net/ipv6/ipv6_sockglue.c linux-2.6.39.4/net/ipv6/ipv6_sockglue.c
66327 --- linux-2.6.39.4/net/ipv6/ipv6_sockglue.c 2011-05-19 00:06:34.000000000 -0400
66328 +++ linux-2.6.39.4/net/ipv6/ipv6_sockglue.c 2011-08-05 19:44:37.000000000 -0400
66329 @@ -129,6 +129,8 @@ static int do_ipv6_setsockopt(struct soc
66330 int val, valbool;
66331 int retv = -ENOPROTOOPT;
66332
66333 + pax_track_stack();
66334 +
66335 if (optval == NULL)
66336 val=0;
66337 else {
66338 @@ -919,6 +921,8 @@ static int do_ipv6_getsockopt(struct soc
66339 int len;
66340 int val;
66341
66342 + pax_track_stack();
66343 +
66344 if (ip6_mroute_opt(optname))
66345 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
66346
66347 diff -urNp linux-2.6.39.4/net/ipv6/raw.c linux-2.6.39.4/net/ipv6/raw.c
66348 --- linux-2.6.39.4/net/ipv6/raw.c 2011-05-19 00:06:34.000000000 -0400
66349 +++ linux-2.6.39.4/net/ipv6/raw.c 2011-08-14 11:25:44.000000000 -0400
66350 @@ -376,7 +376,7 @@ static inline int rawv6_rcv_skb(struct s
66351 {
66352 if ((raw6_sk(sk)->checksum || rcu_dereference_raw(sk->sk_filter)) &&
66353 skb_checksum_complete(skb)) {
66354 - atomic_inc(&sk->sk_drops);
66355 + atomic_inc_unchecked(&sk->sk_drops);
66356 kfree_skb(skb);
66357 return NET_RX_DROP;
66358 }
66359 @@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
66360 struct raw6_sock *rp = raw6_sk(sk);
66361
66362 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
66363 - atomic_inc(&sk->sk_drops);
66364 + atomic_inc_unchecked(&sk->sk_drops);
66365 kfree_skb(skb);
66366 return NET_RX_DROP;
66367 }
66368 @@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
66369
66370 if (inet->hdrincl) {
66371 if (skb_checksum_complete(skb)) {
66372 - atomic_inc(&sk->sk_drops);
66373 + atomic_inc_unchecked(&sk->sk_drops);
66374 kfree_skb(skb);
66375 return NET_RX_DROP;
66376 }
66377 @@ -601,7 +601,7 @@ out:
66378 return err;
66379 }
66380
66381 -static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
66382 +static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
66383 struct flowi6 *fl6, struct dst_entry **dstp,
66384 unsigned int flags)
66385 {
66386 @@ -742,6 +742,8 @@ static int rawv6_sendmsg(struct kiocb *i
66387 u16 proto;
66388 int err;
66389
66390 + pax_track_stack();
66391 +
66392 /* Rough check on arithmetic overflow,
66393 better check is made in ip6_append_data().
66394 */
66395 @@ -909,12 +911,15 @@ do_confirm:
66396 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
66397 char __user *optval, int optlen)
66398 {
66399 + struct icmp6_filter filter;
66400 +
66401 switch (optname) {
66402 case ICMPV6_FILTER:
66403 if (optlen > sizeof(struct icmp6_filter))
66404 optlen = sizeof(struct icmp6_filter);
66405 - if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
66406 + if (copy_from_user(&filter, optval, optlen))
66407 return -EFAULT;
66408 + raw6_sk(sk)->filter = filter;
66409 return 0;
66410 default:
66411 return -ENOPROTOOPT;
66412 @@ -927,6 +932,7 @@ static int rawv6_geticmpfilter(struct so
66413 char __user *optval, int __user *optlen)
66414 {
66415 int len;
66416 + struct icmp6_filter filter;
66417
66418 switch (optname) {
66419 case ICMPV6_FILTER:
66420 @@ -938,7 +944,8 @@ static int rawv6_geticmpfilter(struct so
66421 len = sizeof(struct icmp6_filter);
66422 if (put_user(len, optlen))
66423 return -EFAULT;
66424 - if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
66425 + filter = raw6_sk(sk)->filter;
66426 + if (len > sizeof filter || copy_to_user(optval, &filter, len))
66427 return -EFAULT;
66428 return 0;
66429 default:
66430 @@ -1252,7 +1259,13 @@ static void raw6_sock_seq_show(struct se
66431 0, 0L, 0,
66432 sock_i_uid(sp), 0,
66433 sock_i_ino(sp),
66434 - atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
66435 + atomic_read(&sp->sk_refcnt),
66436 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66437 + NULL,
66438 +#else
66439 + sp,
66440 +#endif
66441 + atomic_read_unchecked(&sp->sk_drops));
66442 }
66443
66444 static int raw6_seq_show(struct seq_file *seq, void *v)
66445 diff -urNp linux-2.6.39.4/net/ipv6/tcp_ipv6.c linux-2.6.39.4/net/ipv6/tcp_ipv6.c
66446 --- linux-2.6.39.4/net/ipv6/tcp_ipv6.c 2011-05-19 00:06:34.000000000 -0400
66447 +++ linux-2.6.39.4/net/ipv6/tcp_ipv6.c 2011-08-05 19:44:37.000000000 -0400
66448 @@ -92,6 +92,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
66449 }
66450 #endif
66451
66452 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66453 +extern int grsec_enable_blackhole;
66454 +#endif
66455 +
66456 static void tcp_v6_hash(struct sock *sk)
66457 {
66458 if (sk->sk_state != TCP_CLOSE) {
66459 @@ -1660,6 +1664,9 @@ static int tcp_v6_do_rcv(struct sock *sk
66460 return 0;
66461
66462 reset:
66463 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66464 + if (!grsec_enable_blackhole)
66465 +#endif
66466 tcp_v6_send_reset(sk, skb);
66467 discard:
66468 if (opt_skb)
66469 @@ -1739,12 +1746,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
66470 TCP_SKB_CB(skb)->sacked = 0;
66471
66472 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
66473 - if (!sk)
66474 + if (!sk) {
66475 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66476 + ret = 1;
66477 +#endif
66478 goto no_tcp_socket;
66479 + }
66480
66481 process:
66482 - if (sk->sk_state == TCP_TIME_WAIT)
66483 + if (sk->sk_state == TCP_TIME_WAIT) {
66484 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66485 + ret = 2;
66486 +#endif
66487 goto do_time_wait;
66488 + }
66489
66490 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
66491 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
66492 @@ -1792,6 +1807,10 @@ no_tcp_socket:
66493 bad_packet:
66494 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
66495 } else {
66496 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66497 + if (!grsec_enable_blackhole || (ret == 1 &&
66498 + (skb->dev->flags & IFF_LOOPBACK)))
66499 +#endif
66500 tcp_v6_send_reset(NULL, skb);
66501 }
66502
66503 @@ -2052,7 +2071,13 @@ static void get_openreq6(struct seq_file
66504 uid,
66505 0, /* non standard timer */
66506 0, /* open_requests have no inode */
66507 - 0, req);
66508 + 0,
66509 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66510 + NULL
66511 +#else
66512 + req
66513 +#endif
66514 + );
66515 }
66516
66517 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
66518 @@ -2102,7 +2127,12 @@ static void get_tcp6_sock(struct seq_fil
66519 sock_i_uid(sp),
66520 icsk->icsk_probes_out,
66521 sock_i_ino(sp),
66522 - atomic_read(&sp->sk_refcnt), sp,
66523 + atomic_read(&sp->sk_refcnt),
66524 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66525 + NULL,
66526 +#else
66527 + sp,
66528 +#endif
66529 jiffies_to_clock_t(icsk->icsk_rto),
66530 jiffies_to_clock_t(icsk->icsk_ack.ato),
66531 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
66532 @@ -2137,7 +2167,13 @@ static void get_timewait6_sock(struct se
66533 dest->s6_addr32[2], dest->s6_addr32[3], destp,
66534 tw->tw_substate, 0, 0,
66535 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
66536 - atomic_read(&tw->tw_refcnt), tw);
66537 + atomic_read(&tw->tw_refcnt),
66538 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66539 + NULL
66540 +#else
66541 + tw
66542 +#endif
66543 + );
66544 }
66545
66546 static int tcp6_seq_show(struct seq_file *seq, void *v)
66547 diff -urNp linux-2.6.39.4/net/ipv6/udp.c linux-2.6.39.4/net/ipv6/udp.c
66548 --- linux-2.6.39.4/net/ipv6/udp.c 2011-07-09 09:18:51.000000000 -0400
66549 +++ linux-2.6.39.4/net/ipv6/udp.c 2011-08-05 19:44:37.000000000 -0400
66550 @@ -50,6 +50,10 @@
66551 #include <linux/seq_file.h>
66552 #include "udp_impl.h"
66553
66554 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66555 +extern int grsec_enable_blackhole;
66556 +#endif
66557 +
66558 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
66559 {
66560 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
66561 @@ -548,7 +552,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
66562
66563 return 0;
66564 drop:
66565 - atomic_inc(&sk->sk_drops);
66566 + atomic_inc_unchecked(&sk->sk_drops);
66567 drop_no_sk_drops_inc:
66568 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
66569 kfree_skb(skb);
66570 @@ -624,7 +628,7 @@ static void flush_stack(struct sock **st
66571 continue;
66572 }
66573 drop:
66574 - atomic_inc(&sk->sk_drops);
66575 + atomic_inc_unchecked(&sk->sk_drops);
66576 UDP6_INC_STATS_BH(sock_net(sk),
66577 UDP_MIB_RCVBUFERRORS, IS_UDPLITE(sk));
66578 UDP6_INC_STATS_BH(sock_net(sk),
66579 @@ -779,6 +783,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
66580 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
66581 proto == IPPROTO_UDPLITE);
66582
66583 +#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66584 + if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
66585 +#endif
66586 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
66587
66588 kfree_skb(skb);
66589 @@ -795,7 +802,7 @@ int __udp6_lib_rcv(struct sk_buff *skb,
66590 if (!sock_owned_by_user(sk))
66591 udpv6_queue_rcv_skb(sk, skb);
66592 else if (sk_add_backlog(sk, skb)) {
66593 - atomic_inc(&sk->sk_drops);
66594 + atomic_inc_unchecked(&sk->sk_drops);
66595 bh_unlock_sock(sk);
66596 sock_put(sk);
66597 goto discard;
66598 @@ -1406,8 +1413,13 @@ static void udp6_sock_seq_show(struct se
66599 0, 0L, 0,
66600 sock_i_uid(sp), 0,
66601 sock_i_ino(sp),
66602 - atomic_read(&sp->sk_refcnt), sp,
66603 - atomic_read(&sp->sk_drops));
66604 + atomic_read(&sp->sk_refcnt),
66605 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66606 + NULL,
66607 +#else
66608 + sp,
66609 +#endif
66610 + atomic_read_unchecked(&sp->sk_drops));
66611 }
66612
66613 int udp6_seq_show(struct seq_file *seq, void *v)
66614 diff -urNp linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c
66615 --- linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c 2011-05-19 00:06:34.000000000 -0400
66616 +++ linux-2.6.39.4/net/irda/ircomm/ircomm_tty.c 2011-08-05 19:44:37.000000000 -0400
66617 @@ -281,16 +281,16 @@ static int ircomm_tty_block_til_ready(st
66618 add_wait_queue(&self->open_wait, &wait);
66619
66620 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
66621 - __FILE__,__LINE__, tty->driver->name, self->open_count );
66622 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
66623
66624 /* As far as I can see, we protect open_count - Jean II */
66625 spin_lock_irqsave(&self->spinlock, flags);
66626 if (!tty_hung_up_p(filp)) {
66627 extra_count = 1;
66628 - self->open_count--;
66629 + local_dec(&self->open_count);
66630 }
66631 spin_unlock_irqrestore(&self->spinlock, flags);
66632 - self->blocked_open++;
66633 + local_inc(&self->blocked_open);
66634
66635 while (1) {
66636 if (tty->termios->c_cflag & CBAUD) {
66637 @@ -330,7 +330,7 @@ static int ircomm_tty_block_til_ready(st
66638 }
66639
66640 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
66641 - __FILE__,__LINE__, tty->driver->name, self->open_count );
66642 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
66643
66644 schedule();
66645 }
66646 @@ -341,13 +341,13 @@ static int ircomm_tty_block_til_ready(st
66647 if (extra_count) {
66648 /* ++ is not atomic, so this should be protected - Jean II */
66649 spin_lock_irqsave(&self->spinlock, flags);
66650 - self->open_count++;
66651 + local_inc(&self->open_count);
66652 spin_unlock_irqrestore(&self->spinlock, flags);
66653 }
66654 - self->blocked_open--;
66655 + local_dec(&self->blocked_open);
66656
66657 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
66658 - __FILE__,__LINE__, tty->driver->name, self->open_count);
66659 + __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
66660
66661 if (!retval)
66662 self->flags |= ASYNC_NORMAL_ACTIVE;
66663 @@ -416,14 +416,14 @@ static int ircomm_tty_open(struct tty_st
66664 }
66665 /* ++ is not atomic, so this should be protected - Jean II */
66666 spin_lock_irqsave(&self->spinlock, flags);
66667 - self->open_count++;
66668 + local_inc(&self->open_count);
66669
66670 tty->driver_data = self;
66671 self->tty = tty;
66672 spin_unlock_irqrestore(&self->spinlock, flags);
66673
66674 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
66675 - self->line, self->open_count);
66676 + self->line, local_read(&self->open_count));
66677
66678 /* Not really used by us, but lets do it anyway */
66679 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
66680 @@ -509,7 +509,7 @@ static void ircomm_tty_close(struct tty_
66681 return;
66682 }
66683
66684 - if ((tty->count == 1) && (self->open_count != 1)) {
66685 + if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
66686 /*
66687 * Uh, oh. tty->count is 1, which means that the tty
66688 * structure will be freed. state->count should always
66689 @@ -519,16 +519,16 @@ static void ircomm_tty_close(struct tty_
66690 */
66691 IRDA_DEBUG(0, "%s(), bad serial port count; "
66692 "tty->count is 1, state->count is %d\n", __func__ ,
66693 - self->open_count);
66694 - self->open_count = 1;
66695 + local_read(&self->open_count));
66696 + local_set(&self->open_count, 1);
66697 }
66698
66699 - if (--self->open_count < 0) {
66700 + if (local_dec_return(&self->open_count) < 0) {
66701 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
66702 - __func__, self->line, self->open_count);
66703 - self->open_count = 0;
66704 + __func__, self->line, local_read(&self->open_count));
66705 + local_set(&self->open_count, 0);
66706 }
66707 - if (self->open_count) {
66708 + if (local_read(&self->open_count)) {
66709 spin_unlock_irqrestore(&self->spinlock, flags);
66710
66711 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
66712 @@ -560,7 +560,7 @@ static void ircomm_tty_close(struct tty_
66713 tty->closing = 0;
66714 self->tty = NULL;
66715
66716 - if (self->blocked_open) {
66717 + if (local_read(&self->blocked_open)) {
66718 if (self->close_delay)
66719 schedule_timeout_interruptible(self->close_delay);
66720 wake_up_interruptible(&self->open_wait);
66721 @@ -1012,7 +1012,7 @@ static void ircomm_tty_hangup(struct tty
66722 spin_lock_irqsave(&self->spinlock, flags);
66723 self->flags &= ~ASYNC_NORMAL_ACTIVE;
66724 self->tty = NULL;
66725 - self->open_count = 0;
66726 + local_set(&self->open_count, 0);
66727 spin_unlock_irqrestore(&self->spinlock, flags);
66728
66729 wake_up_interruptible(&self->open_wait);
66730 @@ -1364,7 +1364,7 @@ static void ircomm_tty_line_info(struct
66731 seq_putc(m, '\n');
66732
66733 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
66734 - seq_printf(m, "Open count: %d\n", self->open_count);
66735 + seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
66736 seq_printf(m, "Max data size: %d\n", self->max_data_size);
66737 seq_printf(m, "Max header size: %d\n", self->max_header_size);
66738
66739 diff -urNp linux-2.6.39.4/net/iucv/af_iucv.c linux-2.6.39.4/net/iucv/af_iucv.c
66740 --- linux-2.6.39.4/net/iucv/af_iucv.c 2011-05-19 00:06:34.000000000 -0400
66741 +++ linux-2.6.39.4/net/iucv/af_iucv.c 2011-08-05 19:44:37.000000000 -0400
66742 @@ -653,10 +653,10 @@ static int iucv_sock_autobind(struct soc
66743
66744 write_lock_bh(&iucv_sk_list.lock);
66745
66746 - sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
66747 + sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
66748 while (__iucv_get_sock_by_name(name)) {
66749 sprintf(name, "%08x",
66750 - atomic_inc_return(&iucv_sk_list.autobind_name));
66751 + atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
66752 }
66753
66754 write_unlock_bh(&iucv_sk_list.lock);
66755 diff -urNp linux-2.6.39.4/net/key/af_key.c linux-2.6.39.4/net/key/af_key.c
66756 --- linux-2.6.39.4/net/key/af_key.c 2011-05-19 00:06:34.000000000 -0400
66757 +++ linux-2.6.39.4/net/key/af_key.c 2011-08-05 19:44:37.000000000 -0400
66758 @@ -2481,6 +2481,8 @@ static int pfkey_migrate(struct sock *sk
66759 struct xfrm_migrate m[XFRM_MAX_DEPTH];
66760 struct xfrm_kmaddress k;
66761
66762 + pax_track_stack();
66763 +
66764 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
66765 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
66766 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
66767 @@ -3016,10 +3018,10 @@ static int pfkey_send_policy_notify(stru
66768 static u32 get_acqseq(void)
66769 {
66770 u32 res;
66771 - static atomic_t acqseq;
66772 + static atomic_unchecked_t acqseq;
66773
66774 do {
66775 - res = atomic_inc_return(&acqseq);
66776 + res = atomic_inc_return_unchecked(&acqseq);
66777 } while (!res);
66778 return res;
66779 }
66780 @@ -3657,7 +3659,11 @@ static int pfkey_seq_show(struct seq_fil
66781 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
66782 else
66783 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
66784 +#ifdef CONFIG_GRKERNSEC_HIDESYM
66785 + NULL,
66786 +#else
66787 s,
66788 +#endif
66789 atomic_read(&s->sk_refcnt),
66790 sk_rmem_alloc_get(s),
66791 sk_wmem_alloc_get(s),
66792 diff -urNp linux-2.6.39.4/net/lapb/lapb_iface.c linux-2.6.39.4/net/lapb/lapb_iface.c
66793 --- linux-2.6.39.4/net/lapb/lapb_iface.c 2011-05-19 00:06:34.000000000 -0400
66794 +++ linux-2.6.39.4/net/lapb/lapb_iface.c 2011-08-05 20:34:06.000000000 -0400
66795 @@ -158,7 +158,7 @@ int lapb_register(struct net_device *dev
66796 goto out;
66797
66798 lapb->dev = dev;
66799 - lapb->callbacks = *callbacks;
66800 + lapb->callbacks = callbacks;
66801
66802 __lapb_insert_cb(lapb);
66803
66804 @@ -380,32 +380,32 @@ int lapb_data_received(struct net_device
66805
66806 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
66807 {
66808 - if (lapb->callbacks.connect_confirmation)
66809 - lapb->callbacks.connect_confirmation(lapb->dev, reason);
66810 + if (lapb->callbacks->connect_confirmation)
66811 + lapb->callbacks->connect_confirmation(lapb->dev, reason);
66812 }
66813
66814 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
66815 {
66816 - if (lapb->callbacks.connect_indication)
66817 - lapb->callbacks.connect_indication(lapb->dev, reason);
66818 + if (lapb->callbacks->connect_indication)
66819 + lapb->callbacks->connect_indication(lapb->dev, reason);
66820 }
66821
66822 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
66823 {
66824 - if (lapb->callbacks.disconnect_confirmation)
66825 - lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
66826 + if (lapb->callbacks->disconnect_confirmation)
66827 + lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
66828 }
66829
66830 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
66831 {
66832 - if (lapb->callbacks.disconnect_indication)
66833 - lapb->callbacks.disconnect_indication(lapb->dev, reason);
66834 + if (lapb->callbacks->disconnect_indication)
66835 + lapb->callbacks->disconnect_indication(lapb->dev, reason);
66836 }
66837
66838 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
66839 {
66840 - if (lapb->callbacks.data_indication)
66841 - return lapb->callbacks.data_indication(lapb->dev, skb);
66842 + if (lapb->callbacks->data_indication)
66843 + return lapb->callbacks->data_indication(lapb->dev, skb);
66844
66845 kfree_skb(skb);
66846 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
66847 @@ -415,8 +415,8 @@ int lapb_data_transmit(struct lapb_cb *l
66848 {
66849 int used = 0;
66850
66851 - if (lapb->callbacks.data_transmit) {
66852 - lapb->callbacks.data_transmit(lapb->dev, skb);
66853 + if (lapb->callbacks->data_transmit) {
66854 + lapb->callbacks->data_transmit(lapb->dev, skb);
66855 used = 1;
66856 }
66857
66858 diff -urNp linux-2.6.39.4/net/mac80211/debugfs_sta.c linux-2.6.39.4/net/mac80211/debugfs_sta.c
66859 --- linux-2.6.39.4/net/mac80211/debugfs_sta.c 2011-05-19 00:06:34.000000000 -0400
66860 +++ linux-2.6.39.4/net/mac80211/debugfs_sta.c 2011-08-05 19:44:37.000000000 -0400
66861 @@ -115,6 +115,8 @@ static ssize_t sta_agg_status_read(struc
66862 struct tid_ampdu_rx *tid_rx;
66863 struct tid_ampdu_tx *tid_tx;
66864
66865 + pax_track_stack();
66866 +
66867 rcu_read_lock();
66868
66869 p += scnprintf(p, sizeof(buf) + buf - p, "next dialog_token: %#02x\n",
66870 @@ -215,6 +217,8 @@ static ssize_t sta_ht_capa_read(struct f
66871 struct sta_info *sta = file->private_data;
66872 struct ieee80211_sta_ht_cap *htc = &sta->sta.ht_cap;
66873
66874 + pax_track_stack();
66875 +
66876 p += scnprintf(p, sizeof(buf) + buf - p, "ht %ssupported\n",
66877 htc->ht_supported ? "" : "not ");
66878 if (htc->ht_supported) {
66879 diff -urNp linux-2.6.39.4/net/mac80211/ieee80211_i.h linux-2.6.39.4/net/mac80211/ieee80211_i.h
66880 --- linux-2.6.39.4/net/mac80211/ieee80211_i.h 2011-05-19 00:06:34.000000000 -0400
66881 +++ linux-2.6.39.4/net/mac80211/ieee80211_i.h 2011-08-05 19:44:37.000000000 -0400
66882 @@ -27,6 +27,7 @@
66883 #include <net/ieee80211_radiotap.h>
66884 #include <net/cfg80211.h>
66885 #include <net/mac80211.h>
66886 +#include <asm/local.h>
66887 #include "key.h"
66888 #include "sta_info.h"
66889
66890 @@ -714,7 +715,7 @@ struct ieee80211_local {
66891 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
66892 spinlock_t queue_stop_reason_lock;
66893
66894 - int open_count;
66895 + local_t open_count;
66896 int monitors, cooked_mntrs;
66897 /* number of interfaces with corresponding FIF_ flags */
66898 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
66899 diff -urNp linux-2.6.39.4/net/mac80211/iface.c linux-2.6.39.4/net/mac80211/iface.c
66900 --- linux-2.6.39.4/net/mac80211/iface.c 2011-05-19 00:06:34.000000000 -0400
66901 +++ linux-2.6.39.4/net/mac80211/iface.c 2011-08-05 19:44:37.000000000 -0400
66902 @@ -211,7 +211,7 @@ static int ieee80211_do_open(struct net_
66903 break;
66904 }
66905
66906 - if (local->open_count == 0) {
66907 + if (local_read(&local->open_count) == 0) {
66908 res = drv_start(local);
66909 if (res)
66910 goto err_del_bss;
66911 @@ -235,7 +235,7 @@ static int ieee80211_do_open(struct net_
66912 memcpy(dev->perm_addr, dev->dev_addr, ETH_ALEN);
66913
66914 if (!is_valid_ether_addr(dev->dev_addr)) {
66915 - if (!local->open_count)
66916 + if (!local_read(&local->open_count))
66917 drv_stop(local);
66918 return -EADDRNOTAVAIL;
66919 }
66920 @@ -327,7 +327,7 @@ static int ieee80211_do_open(struct net_
66921 mutex_unlock(&local->mtx);
66922
66923 if (coming_up)
66924 - local->open_count++;
66925 + local_inc(&local->open_count);
66926
66927 if (hw_reconf_flags) {
66928 ieee80211_hw_config(local, hw_reconf_flags);
66929 @@ -347,7 +347,7 @@ static int ieee80211_do_open(struct net_
66930 err_del_interface:
66931 drv_remove_interface(local, &sdata->vif);
66932 err_stop:
66933 - if (!local->open_count)
66934 + if (!local_read(&local->open_count))
66935 drv_stop(local);
66936 err_del_bss:
66937 sdata->bss = NULL;
66938 @@ -474,7 +474,7 @@ static void ieee80211_do_stop(struct iee
66939 }
66940
66941 if (going_down)
66942 - local->open_count--;
66943 + local_dec(&local->open_count);
66944
66945 switch (sdata->vif.type) {
66946 case NL80211_IFTYPE_AP_VLAN:
66947 @@ -533,7 +533,7 @@ static void ieee80211_do_stop(struct iee
66948
66949 ieee80211_recalc_ps(local, -1);
66950
66951 - if (local->open_count == 0) {
66952 + if (local_read(&local->open_count) == 0) {
66953 if (local->ops->napi_poll)
66954 napi_disable(&local->napi);
66955 ieee80211_clear_tx_pending(local);
66956 diff -urNp linux-2.6.39.4/net/mac80211/main.c linux-2.6.39.4/net/mac80211/main.c
66957 --- linux-2.6.39.4/net/mac80211/main.c 2011-05-19 00:06:34.000000000 -0400
66958 +++ linux-2.6.39.4/net/mac80211/main.c 2011-08-05 19:44:37.000000000 -0400
66959 @@ -215,7 +215,7 @@ int ieee80211_hw_config(struct ieee80211
66960 local->hw.conf.power_level = power;
66961 }
66962
66963 - if (changed && local->open_count) {
66964 + if (changed && local_read(&local->open_count)) {
66965 ret = drv_config(local, changed);
66966 /*
66967 * Goal:
66968 diff -urNp linux-2.6.39.4/net/mac80211/mlme.c linux-2.6.39.4/net/mac80211/mlme.c
66969 --- linux-2.6.39.4/net/mac80211/mlme.c 2011-06-03 00:04:14.000000000 -0400
66970 +++ linux-2.6.39.4/net/mac80211/mlme.c 2011-08-05 19:44:37.000000000 -0400
66971 @@ -1431,6 +1431,8 @@ static bool ieee80211_assoc_success(stru
66972 bool have_higher_than_11mbit = false;
66973 u16 ap_ht_cap_flags;
66974
66975 + pax_track_stack();
66976 +
66977 /* AssocResp and ReassocResp have identical structure */
66978
66979 aid = le16_to_cpu(mgmt->u.assoc_resp.aid);
66980 diff -urNp linux-2.6.39.4/net/mac80211/pm.c linux-2.6.39.4/net/mac80211/pm.c
66981 --- linux-2.6.39.4/net/mac80211/pm.c 2011-05-19 00:06:34.000000000 -0400
66982 +++ linux-2.6.39.4/net/mac80211/pm.c 2011-08-05 19:44:37.000000000 -0400
66983 @@ -95,7 +95,7 @@ int __ieee80211_suspend(struct ieee80211
66984 }
66985
66986 /* stop hardware - this must stop RX */
66987 - if (local->open_count)
66988 + if (local_read(&local->open_count))
66989 ieee80211_stop_device(local);
66990
66991 local->suspended = true;
66992 diff -urNp linux-2.6.39.4/net/mac80211/rate.c linux-2.6.39.4/net/mac80211/rate.c
66993 --- linux-2.6.39.4/net/mac80211/rate.c 2011-05-19 00:06:34.000000000 -0400
66994 +++ linux-2.6.39.4/net/mac80211/rate.c 2011-08-05 19:44:37.000000000 -0400
66995 @@ -371,7 +371,7 @@ int ieee80211_init_rate_ctrl_alg(struct
66996
66997 ASSERT_RTNL();
66998
66999 - if (local->open_count)
67000 + if (local_read(&local->open_count))
67001 return -EBUSY;
67002
67003 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
67004 diff -urNp linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c
67005 --- linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c 2011-05-19 00:06:34.000000000 -0400
67006 +++ linux-2.6.39.4/net/mac80211/rc80211_pid_debugfs.c 2011-08-05 19:44:37.000000000 -0400
67007 @@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r
67008
67009 spin_unlock_irqrestore(&events->lock, status);
67010
67011 - if (copy_to_user(buf, pb, p))
67012 + if (p > sizeof(pb) || copy_to_user(buf, pb, p))
67013 return -EFAULT;
67014
67015 return p;
67016 diff -urNp linux-2.6.39.4/net/mac80211/util.c linux-2.6.39.4/net/mac80211/util.c
67017 --- linux-2.6.39.4/net/mac80211/util.c 2011-05-19 00:06:34.000000000 -0400
67018 +++ linux-2.6.39.4/net/mac80211/util.c 2011-08-05 19:44:37.000000000 -0400
67019 @@ -1129,7 +1129,7 @@ int ieee80211_reconfig(struct ieee80211_
67020 local->resuming = true;
67021
67022 /* restart hardware */
67023 - if (local->open_count) {
67024 + if (local_read(&local->open_count)) {
67025 /*
67026 * Upon resume hardware can sometimes be goofy due to
67027 * various platform / driver / bus issues, so restarting
67028 diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c
67029 --- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c 2011-07-09 09:18:51.000000000 -0400
67030 +++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_conn.c 2011-08-05 19:44:37.000000000 -0400
67031 @@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
67032 /* Increase the refcnt counter of the dest */
67033 atomic_inc(&dest->refcnt);
67034
67035 - conn_flags = atomic_read(&dest->conn_flags);
67036 + conn_flags = atomic_read_unchecked(&dest->conn_flags);
67037 if (cp->protocol != IPPROTO_UDP)
67038 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
67039 /* Bind with the destination and its corresponding transmitter */
67040 @@ -869,7 +869,7 @@ ip_vs_conn_new(const struct ip_vs_conn_p
67041 atomic_set(&cp->refcnt, 1);
67042
67043 atomic_set(&cp->n_control, 0);
67044 - atomic_set(&cp->in_pkts, 0);
67045 + atomic_set_unchecked(&cp->in_pkts, 0);
67046
67047 atomic_inc(&ipvs->conn_count);
67048 if (flags & IP_VS_CONN_F_NO_CPORT)
67049 @@ -1149,7 +1149,7 @@ static inline int todrop_entry(struct ip
67050
67051 /* Don't drop the entry if its number of incoming packets is not
67052 located in [0, 8] */
67053 - i = atomic_read(&cp->in_pkts);
67054 + i = atomic_read_unchecked(&cp->in_pkts);
67055 if (i > 8 || i < 0) return 0;
67056
67057 if (!todrop_rate[i]) return 0;
67058 diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c
67059 --- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c 2011-07-09 09:18:51.000000000 -0400
67060 +++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_core.c 2011-08-05 19:44:37.000000000 -0400
67061 @@ -563,7 +563,7 @@ int ip_vs_leave(struct ip_vs_service *sv
67062 ret = cp->packet_xmit(skb, cp, pd->pp);
67063 /* do not touch skb anymore */
67064
67065 - atomic_inc(&cp->in_pkts);
67066 + atomic_inc_unchecked(&cp->in_pkts);
67067 ip_vs_conn_put(cp);
67068 return ret;
67069 }
67070 @@ -1633,7 +1633,7 @@ ip_vs_in(unsigned int hooknum, struct sk
67071 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
67072 pkts = sysctl_sync_threshold(ipvs);
67073 else
67074 - pkts = atomic_add_return(1, &cp->in_pkts);
67075 + pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
67076
67077 if ((ipvs->sync_state & IP_VS_STATE_MASTER) &&
67078 cp->protocol == IPPROTO_SCTP) {
67079 diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c
67080 --- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-19 00:06:34.000000000 -0400
67081 +++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_ctl.c 2011-08-05 19:44:37.000000000 -0400
67082 @@ -782,7 +782,7 @@ __ip_vs_update_dest(struct ip_vs_service
67083 ip_vs_rs_hash(ipvs, dest);
67084 write_unlock_bh(&ipvs->rs_lock);
67085 }
67086 - atomic_set(&dest->conn_flags, conn_flags);
67087 + atomic_set_unchecked(&dest->conn_flags, conn_flags);
67088
67089 /* bind the service */
67090 if (!dest->svc) {
67091 @@ -2027,7 +2027,7 @@ static int ip_vs_info_seq_show(struct se
67092 " %-7s %-6d %-10d %-10d\n",
67093 &dest->addr.in6,
67094 ntohs(dest->port),
67095 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
67096 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
67097 atomic_read(&dest->weight),
67098 atomic_read(&dest->activeconns),
67099 atomic_read(&dest->inactconns));
67100 @@ -2038,7 +2038,7 @@ static int ip_vs_info_seq_show(struct se
67101 "%-7s %-6d %-10d %-10d\n",
67102 ntohl(dest->addr.ip),
67103 ntohs(dest->port),
67104 - ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
67105 + ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
67106 atomic_read(&dest->weight),
67107 atomic_read(&dest->activeconns),
67108 atomic_read(&dest->inactconns));
67109 @@ -2287,6 +2287,8 @@ do_ip_vs_set_ctl(struct sock *sk, int cm
67110 struct ip_vs_dest_user *udest_compat;
67111 struct ip_vs_dest_user_kern udest;
67112
67113 + pax_track_stack();
67114 +
67115 if (!capable(CAP_NET_ADMIN))
67116 return -EPERM;
67117
67118 @@ -2501,7 +2503,7 @@ __ip_vs_get_dest_entries(struct net *net
67119
67120 entry.addr = dest->addr.ip;
67121 entry.port = dest->port;
67122 - entry.conn_flags = atomic_read(&dest->conn_flags);
67123 + entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
67124 entry.weight = atomic_read(&dest->weight);
67125 entry.u_threshold = dest->u_threshold;
67126 entry.l_threshold = dest->l_threshold;
67127 @@ -3029,7 +3031,7 @@ static int ip_vs_genl_fill_dest(struct s
67128 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
67129
67130 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
67131 - atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
67132 + atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
67133 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
67134 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
67135 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
67136 diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c
67137 --- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c 2011-05-19 00:06:34.000000000 -0400
67138 +++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_sync.c 2011-08-05 19:44:37.000000000 -0400
67139 @@ -648,7 +648,7 @@ control:
67140 * i.e only increment in_pkts for Templates.
67141 */
67142 if (cp->flags & IP_VS_CONN_F_TEMPLATE) {
67143 - int pkts = atomic_add_return(1, &cp->in_pkts);
67144 + int pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
67145
67146 if (pkts % sysctl_sync_period(ipvs) != 1)
67147 return;
67148 @@ -794,7 +794,7 @@ static void ip_vs_proc_conn(struct net *
67149
67150 if (opt)
67151 memcpy(&cp->in_seq, opt, sizeof(*opt));
67152 - atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
67153 + atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
67154 cp->state = state;
67155 cp->old_state = cp->state;
67156 /*
67157 diff -urNp linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c
67158 --- linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-19 00:06:34.000000000 -0400
67159 +++ linux-2.6.39.4/net/netfilter/ipvs/ip_vs_xmit.c 2011-08-05 19:44:37.000000000 -0400
67160 @@ -1127,7 +1127,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
67161 else
67162 rc = NF_ACCEPT;
67163 /* do not touch skb anymore */
67164 - atomic_inc(&cp->in_pkts);
67165 + atomic_inc_unchecked(&cp->in_pkts);
67166 goto out;
67167 }
67168
67169 @@ -1245,7 +1245,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
67170 else
67171 rc = NF_ACCEPT;
67172 /* do not touch skb anymore */
67173 - atomic_inc(&cp->in_pkts);
67174 + atomic_inc_unchecked(&cp->in_pkts);
67175 goto out;
67176 }
67177
67178 diff -urNp linux-2.6.39.4/net/netfilter/Kconfig linux-2.6.39.4/net/netfilter/Kconfig
67179 --- linux-2.6.39.4/net/netfilter/Kconfig 2011-05-19 00:06:34.000000000 -0400
67180 +++ linux-2.6.39.4/net/netfilter/Kconfig 2011-08-05 19:44:37.000000000 -0400
67181 @@ -781,6 +781,16 @@ config NETFILTER_XT_MATCH_ESP
67182
67183 To compile it as a module, choose M here. If unsure, say N.
67184
67185 +config NETFILTER_XT_MATCH_GRADM
67186 + tristate '"gradm" match support'
67187 + depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
67188 + depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
67189 + ---help---
67190 + The gradm match allows to match on grsecurity RBAC being enabled.
67191 + It is useful when iptables rules are applied early on bootup to
67192 + prevent connections to the machine (except from a trusted host)
67193 + while the RBAC system is disabled.
67194 +
67195 config NETFILTER_XT_MATCH_HASHLIMIT
67196 tristate '"hashlimit" match support'
67197 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
67198 diff -urNp linux-2.6.39.4/net/netfilter/Makefile linux-2.6.39.4/net/netfilter/Makefile
67199 --- linux-2.6.39.4/net/netfilter/Makefile 2011-05-19 00:06:34.000000000 -0400
67200 +++ linux-2.6.39.4/net/netfilter/Makefile 2011-08-05 19:44:37.000000000 -0400
67201 @@ -81,6 +81,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) +=
67202 obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
67203 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
67204 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
67205 +obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
67206 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
67207 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
67208 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
67209 diff -urNp linux-2.6.39.4/net/netfilter/nfnetlink_log.c linux-2.6.39.4/net/netfilter/nfnetlink_log.c
67210 --- linux-2.6.39.4/net/netfilter/nfnetlink_log.c 2011-05-19 00:06:34.000000000 -0400
67211 +++ linux-2.6.39.4/net/netfilter/nfnetlink_log.c 2011-08-05 19:44:37.000000000 -0400
67212 @@ -70,7 +70,7 @@ struct nfulnl_instance {
67213 };
67214
67215 static DEFINE_SPINLOCK(instances_lock);
67216 -static atomic_t global_seq;
67217 +static atomic_unchecked_t global_seq;
67218
67219 #define INSTANCE_BUCKETS 16
67220 static struct hlist_head instance_table[INSTANCE_BUCKETS];
67221 @@ -506,7 +506,7 @@ __build_packet_message(struct nfulnl_ins
67222 /* global sequence number */
67223 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
67224 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
67225 - htonl(atomic_inc_return(&global_seq)));
67226 + htonl(atomic_inc_return_unchecked(&global_seq)));
67227
67228 if (data_len) {
67229 struct nlattr *nla;
67230 diff -urNp linux-2.6.39.4/net/netfilter/nfnetlink_queue.c linux-2.6.39.4/net/netfilter/nfnetlink_queue.c
67231 --- linux-2.6.39.4/net/netfilter/nfnetlink_queue.c 2011-05-19 00:06:34.000000000 -0400
67232 +++ linux-2.6.39.4/net/netfilter/nfnetlink_queue.c 2011-08-05 19:44:37.000000000 -0400
67233 @@ -58,7 +58,7 @@ struct nfqnl_instance {
67234 */
67235 spinlock_t lock;
67236 unsigned int queue_total;
67237 - atomic_t id_sequence; /* 'sequence' of pkt ids */
67238 + atomic_unchecked_t id_sequence; /* 'sequence' of pkt ids */
67239 struct list_head queue_list; /* packets in queue */
67240 };
67241
67242 @@ -272,7 +272,7 @@ nfqnl_build_packet_message(struct nfqnl_
67243 nfmsg->version = NFNETLINK_V0;
67244 nfmsg->res_id = htons(queue->queue_num);
67245
67246 - entry->id = atomic_inc_return(&queue->id_sequence);
67247 + entry->id = atomic_inc_return_unchecked(&queue->id_sequence);
67248 pmsg.packet_id = htonl(entry->id);
67249 pmsg.hw_protocol = entskb->protocol;
67250 pmsg.hook = entry->hook;
67251 @@ -869,7 +869,7 @@ static int seq_show(struct seq_file *s,
67252 inst->peer_pid, inst->queue_total,
67253 inst->copy_mode, inst->copy_range,
67254 inst->queue_dropped, inst->queue_user_dropped,
67255 - atomic_read(&inst->id_sequence), 1);
67256 + atomic_read_unchecked(&inst->id_sequence), 1);
67257 }
67258
67259 static const struct seq_operations nfqnl_seq_ops = {
67260 diff -urNp linux-2.6.39.4/net/netfilter/xt_gradm.c linux-2.6.39.4/net/netfilter/xt_gradm.c
67261 --- linux-2.6.39.4/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
67262 +++ linux-2.6.39.4/net/netfilter/xt_gradm.c 2011-08-05 19:44:37.000000000 -0400
67263 @@ -0,0 +1,51 @@
67264 +/*
67265 + * gradm match for netfilter
67266 + * Copyright © Zbigniew Krzystolik, 2010
67267 + *
67268 + * This program is free software; you can redistribute it and/or modify
67269 + * it under the terms of the GNU General Public License; either version
67270 + * 2 or 3 as published by the Free Software Foundation.
67271 + */
67272 +#include <linux/module.h>
67273 +#include <linux/moduleparam.h>
67274 +#include <linux/skbuff.h>
67275 +#include <linux/netfilter/x_tables.h>
67276 +#include <linux/grsecurity.h>
67277 +#include <linux/netfilter/xt_gradm.h>
67278 +
67279 +static bool
67280 +gradm_mt(const struct sk_buff *skb, struct xt_action_param *par)
67281 +{
67282 + const struct xt_gradm_mtinfo *info = par->matchinfo;
67283 + bool retval = false;
67284 + if (gr_acl_is_enabled())
67285 + retval = true;
67286 + return retval ^ info->invflags;
67287 +}
67288 +
67289 +static struct xt_match gradm_mt_reg __read_mostly = {
67290 + .name = "gradm",
67291 + .revision = 0,
67292 + .family = NFPROTO_UNSPEC,
67293 + .match = gradm_mt,
67294 + .matchsize = XT_ALIGN(sizeof(struct xt_gradm_mtinfo)),
67295 + .me = THIS_MODULE,
67296 +};
67297 +
67298 +static int __init gradm_mt_init(void)
67299 +{
67300 + return xt_register_match(&gradm_mt_reg);
67301 +}
67302 +
67303 +static void __exit gradm_mt_exit(void)
67304 +{
67305 + xt_unregister_match(&gradm_mt_reg);
67306 +}
67307 +
67308 +module_init(gradm_mt_init);
67309 +module_exit(gradm_mt_exit);
67310 +MODULE_AUTHOR("Zbigniew Krzystolik <zbyniu@destrukcja.pl>");
67311 +MODULE_DESCRIPTION("Xtables: Grsecurity RBAC match");
67312 +MODULE_LICENSE("GPL");
67313 +MODULE_ALIAS("ipt_gradm");
67314 +MODULE_ALIAS("ip6t_gradm");
67315 diff -urNp linux-2.6.39.4/net/netfilter/xt_statistic.c linux-2.6.39.4/net/netfilter/xt_statistic.c
67316 --- linux-2.6.39.4/net/netfilter/xt_statistic.c 2011-05-19 00:06:34.000000000 -0400
67317 +++ linux-2.6.39.4/net/netfilter/xt_statistic.c 2011-08-05 19:44:37.000000000 -0400
67318 @@ -18,7 +18,7 @@
67319 #include <linux/netfilter/x_tables.h>
67320
67321 struct xt_statistic_priv {
67322 - atomic_t count;
67323 + atomic_unchecked_t count;
67324 } ____cacheline_aligned_in_smp;
67325
67326 MODULE_LICENSE("GPL");
67327 @@ -41,9 +41,9 @@ statistic_mt(const struct sk_buff *skb,
67328 break;
67329 case XT_STATISTIC_MODE_NTH:
67330 do {
67331 - oval = atomic_read(&info->master->count);
67332 + oval = atomic_read_unchecked(&info->master->count);
67333 nval = (oval == info->u.nth.every) ? 0 : oval + 1;
67334 - } while (atomic_cmpxchg(&info->master->count, oval, nval) != oval);
67335 + } while (atomic_cmpxchg_unchecked(&info->master->count, oval, nval) != oval);
67336 if (nval == 0)
67337 ret = !ret;
67338 break;
67339 @@ -63,7 +63,7 @@ static int statistic_mt_check(const stru
67340 info->master = kzalloc(sizeof(*info->master), GFP_KERNEL);
67341 if (info->master == NULL)
67342 return -ENOMEM;
67343 - atomic_set(&info->master->count, info->u.nth.count);
67344 + atomic_set_unchecked(&info->master->count, info->u.nth.count);
67345
67346 return 0;
67347 }
67348 diff -urNp linux-2.6.39.4/net/netlink/af_netlink.c linux-2.6.39.4/net/netlink/af_netlink.c
67349 --- linux-2.6.39.4/net/netlink/af_netlink.c 2011-05-19 00:06:34.000000000 -0400
67350 +++ linux-2.6.39.4/net/netlink/af_netlink.c 2011-08-05 19:44:37.000000000 -0400
67351 @@ -742,7 +742,7 @@ static void netlink_overrun(struct sock
67352 sk->sk_error_report(sk);
67353 }
67354 }
67355 - atomic_inc(&sk->sk_drops);
67356 + atomic_inc_unchecked(&sk->sk_drops);
67357 }
67358
67359 static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
67360 @@ -1992,15 +1992,23 @@ static int netlink_seq_show(struct seq_f
67361 struct netlink_sock *nlk = nlk_sk(s);
67362
67363 seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d %-8lu\n",
67364 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67365 + NULL,
67366 +#else
67367 s,
67368 +#endif
67369 s->sk_protocol,
67370 nlk->pid,
67371 nlk->groups ? (u32)nlk->groups[0] : 0,
67372 sk_rmem_alloc_get(s),
67373 sk_wmem_alloc_get(s),
67374 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67375 + NULL,
67376 +#else
67377 nlk->cb,
67378 +#endif
67379 atomic_read(&s->sk_refcnt),
67380 - atomic_read(&s->sk_drops),
67381 + atomic_read_unchecked(&s->sk_drops),
67382 sock_i_ino(s)
67383 );
67384
67385 diff -urNp linux-2.6.39.4/net/netrom/af_netrom.c linux-2.6.39.4/net/netrom/af_netrom.c
67386 --- linux-2.6.39.4/net/netrom/af_netrom.c 2011-05-19 00:06:34.000000000 -0400
67387 +++ linux-2.6.39.4/net/netrom/af_netrom.c 2011-08-05 19:44:37.000000000 -0400
67388 @@ -840,6 +840,7 @@ static int nr_getname(struct socket *soc
67389 struct sock *sk = sock->sk;
67390 struct nr_sock *nr = nr_sk(sk);
67391
67392 + memset(sax, 0, sizeof(*sax));
67393 lock_sock(sk);
67394 if (peer != 0) {
67395 if (sk->sk_state != TCP_ESTABLISHED) {
67396 @@ -854,7 +855,6 @@ static int nr_getname(struct socket *soc
67397 *uaddr_len = sizeof(struct full_sockaddr_ax25);
67398 } else {
67399 sax->fsa_ax25.sax25_family = AF_NETROM;
67400 - sax->fsa_ax25.sax25_ndigis = 0;
67401 sax->fsa_ax25.sax25_call = nr->source_addr;
67402 *uaddr_len = sizeof(struct sockaddr_ax25);
67403 }
67404 diff -urNp linux-2.6.39.4/net/packet/af_packet.c linux-2.6.39.4/net/packet/af_packet.c
67405 --- linux-2.6.39.4/net/packet/af_packet.c 2011-07-09 09:18:51.000000000 -0400
67406 +++ linux-2.6.39.4/net/packet/af_packet.c 2011-08-05 19:44:37.000000000 -0400
67407 @@ -647,14 +647,14 @@ static int packet_rcv(struct sk_buff *sk
67408
67409 spin_lock(&sk->sk_receive_queue.lock);
67410 po->stats.tp_packets++;
67411 - skb->dropcount = atomic_read(&sk->sk_drops);
67412 + skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
67413 __skb_queue_tail(&sk->sk_receive_queue, skb);
67414 spin_unlock(&sk->sk_receive_queue.lock);
67415 sk->sk_data_ready(sk, skb->len);
67416 return 0;
67417
67418 drop_n_acct:
67419 - po->stats.tp_drops = atomic_inc_return(&sk->sk_drops);
67420 + po->stats.tp_drops = atomic_inc_return_unchecked(&sk->sk_drops);
67421
67422 drop_n_restore:
67423 if (skb_head != skb->data && skb_shared(skb)) {
67424 @@ -2159,7 +2159,7 @@ static int packet_getsockopt(struct sock
67425 case PACKET_HDRLEN:
67426 if (len > sizeof(int))
67427 len = sizeof(int);
67428 - if (copy_from_user(&val, optval, len))
67429 + if (len > sizeof(val) || copy_from_user(&val, optval, len))
67430 return -EFAULT;
67431 switch (val) {
67432 case TPACKET_V1:
67433 @@ -2197,7 +2197,7 @@ static int packet_getsockopt(struct sock
67434
67435 if (put_user(len, optlen))
67436 return -EFAULT;
67437 - if (copy_to_user(optval, data, len))
67438 + if (len > sizeof(st) || copy_to_user(optval, data, len))
67439 return -EFAULT;
67440 return 0;
67441 }
67442 @@ -2709,7 +2709,11 @@ static int packet_seq_show(struct seq_fi
67443
67444 seq_printf(seq,
67445 "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
67446 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67447 + NULL,
67448 +#else
67449 s,
67450 +#endif
67451 atomic_read(&s->sk_refcnt),
67452 s->sk_type,
67453 ntohs(po->num),
67454 diff -urNp linux-2.6.39.4/net/phonet/af_phonet.c linux-2.6.39.4/net/phonet/af_phonet.c
67455 --- linux-2.6.39.4/net/phonet/af_phonet.c 2011-05-19 00:06:34.000000000 -0400
67456 +++ linux-2.6.39.4/net/phonet/af_phonet.c 2011-08-05 20:34:06.000000000 -0400
67457 @@ -41,7 +41,7 @@ static struct phonet_protocol *phonet_pr
67458 {
67459 struct phonet_protocol *pp;
67460
67461 - if (protocol >= PHONET_NPROTO)
67462 + if (protocol < 0 || protocol >= PHONET_NPROTO)
67463 return NULL;
67464
67465 rcu_read_lock();
67466 @@ -469,7 +469,7 @@ int __init_or_module phonet_proto_regist
67467 {
67468 int err = 0;
67469
67470 - if (protocol >= PHONET_NPROTO)
67471 + if (protocol < 0 || protocol >= PHONET_NPROTO)
67472 return -EINVAL;
67473
67474 err = proto_register(pp->prot, 1);
67475 diff -urNp linux-2.6.39.4/net/phonet/pep.c linux-2.6.39.4/net/phonet/pep.c
67476 --- linux-2.6.39.4/net/phonet/pep.c 2011-05-19 00:06:34.000000000 -0400
67477 +++ linux-2.6.39.4/net/phonet/pep.c 2011-08-05 19:44:37.000000000 -0400
67478 @@ -387,7 +387,7 @@ static int pipe_do_rcv(struct sock *sk,
67479
67480 case PNS_PEP_CTRL_REQ:
67481 if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
67482 - atomic_inc(&sk->sk_drops);
67483 + atomic_inc_unchecked(&sk->sk_drops);
67484 break;
67485 }
67486 __skb_pull(skb, 4);
67487 @@ -408,7 +408,7 @@ static int pipe_do_rcv(struct sock *sk,
67488 }
67489
67490 if (pn->rx_credits == 0) {
67491 - atomic_inc(&sk->sk_drops);
67492 + atomic_inc_unchecked(&sk->sk_drops);
67493 err = -ENOBUFS;
67494 break;
67495 }
67496 @@ -556,7 +556,7 @@ static int pipe_handler_do_rcv(struct so
67497 }
67498
67499 if (pn->rx_credits == 0) {
67500 - atomic_inc(&sk->sk_drops);
67501 + atomic_inc_unchecked(&sk->sk_drops);
67502 err = NET_RX_DROP;
67503 break;
67504 }
67505 diff -urNp linux-2.6.39.4/net/phonet/socket.c linux-2.6.39.4/net/phonet/socket.c
67506 --- linux-2.6.39.4/net/phonet/socket.c 2011-05-19 00:06:34.000000000 -0400
67507 +++ linux-2.6.39.4/net/phonet/socket.c 2011-08-05 19:44:37.000000000 -0400
67508 @@ -613,8 +613,13 @@ static int pn_sock_seq_show(struct seq_f
67509 pn->resource, sk->sk_state,
67510 sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
67511 sock_i_uid(sk), sock_i_ino(sk),
67512 - atomic_read(&sk->sk_refcnt), sk,
67513 - atomic_read(&sk->sk_drops), &len);
67514 + atomic_read(&sk->sk_refcnt),
67515 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67516 + NULL,
67517 +#else
67518 + sk,
67519 +#endif
67520 + atomic_read_unchecked(&sk->sk_drops), &len);
67521 }
67522 seq_printf(seq, "%*s\n", 127 - len, "");
67523 return 0;
67524 diff -urNp linux-2.6.39.4/net/rds/cong.c linux-2.6.39.4/net/rds/cong.c
67525 --- linux-2.6.39.4/net/rds/cong.c 2011-05-19 00:06:34.000000000 -0400
67526 +++ linux-2.6.39.4/net/rds/cong.c 2011-08-05 19:44:37.000000000 -0400
67527 @@ -77,7 +77,7 @@
67528 * finds that the saved generation number is smaller than the global generation
67529 * number, it wakes up the process.
67530 */
67531 -static atomic_t rds_cong_generation = ATOMIC_INIT(0);
67532 +static atomic_unchecked_t rds_cong_generation = ATOMIC_INIT(0);
67533
67534 /*
67535 * Congestion monitoring
67536 @@ -232,7 +232,7 @@ void rds_cong_map_updated(struct rds_con
67537 rdsdebug("waking map %p for %pI4\n",
67538 map, &map->m_addr);
67539 rds_stats_inc(s_cong_update_received);
67540 - atomic_inc(&rds_cong_generation);
67541 + atomic_inc_unchecked(&rds_cong_generation);
67542 if (waitqueue_active(&map->m_waitq))
67543 wake_up(&map->m_waitq);
67544 if (waitqueue_active(&rds_poll_waitq))
67545 @@ -258,7 +258,7 @@ EXPORT_SYMBOL_GPL(rds_cong_map_updated);
67546
67547 int rds_cong_updated_since(unsigned long *recent)
67548 {
67549 - unsigned long gen = atomic_read(&rds_cong_generation);
67550 + unsigned long gen = atomic_read_unchecked(&rds_cong_generation);
67551
67552 if (likely(*recent == gen))
67553 return 0;
67554 diff -urNp linux-2.6.39.4/net/rds/ib_cm.c linux-2.6.39.4/net/rds/ib_cm.c
67555 --- linux-2.6.39.4/net/rds/ib_cm.c 2011-05-19 00:06:34.000000000 -0400
67556 +++ linux-2.6.39.4/net/rds/ib_cm.c 2011-08-05 19:44:37.000000000 -0400
67557 @@ -720,7 +720,7 @@ void rds_ib_conn_shutdown(struct rds_con
67558 /* Clear the ACK state */
67559 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
67560 #ifdef KERNEL_HAS_ATOMIC64
67561 - atomic64_set(&ic->i_ack_next, 0);
67562 + atomic64_set_unchecked(&ic->i_ack_next, 0);
67563 #else
67564 ic->i_ack_next = 0;
67565 #endif
67566 diff -urNp linux-2.6.39.4/net/rds/ib.h linux-2.6.39.4/net/rds/ib.h
67567 --- linux-2.6.39.4/net/rds/ib.h 2011-05-19 00:06:34.000000000 -0400
67568 +++ linux-2.6.39.4/net/rds/ib.h 2011-08-05 19:44:37.000000000 -0400
67569 @@ -127,7 +127,7 @@ struct rds_ib_connection {
67570 /* sending acks */
67571 unsigned long i_ack_flags;
67572 #ifdef KERNEL_HAS_ATOMIC64
67573 - atomic64_t i_ack_next; /* next ACK to send */
67574 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
67575 #else
67576 spinlock_t i_ack_lock; /* protect i_ack_next */
67577 u64 i_ack_next; /* next ACK to send */
67578 diff -urNp linux-2.6.39.4/net/rds/ib_recv.c linux-2.6.39.4/net/rds/ib_recv.c
67579 --- linux-2.6.39.4/net/rds/ib_recv.c 2011-05-19 00:06:34.000000000 -0400
67580 +++ linux-2.6.39.4/net/rds/ib_recv.c 2011-08-05 19:44:37.000000000 -0400
67581 @@ -592,7 +592,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
67582 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
67583 int ack_required)
67584 {
67585 - atomic64_set(&ic->i_ack_next, seq);
67586 + atomic64_set_unchecked(&ic->i_ack_next, seq);
67587 if (ack_required) {
67588 smp_mb__before_clear_bit();
67589 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67590 @@ -604,7 +604,7 @@ static u64 rds_ib_get_ack(struct rds_ib_
67591 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67592 smp_mb__after_clear_bit();
67593
67594 - return atomic64_read(&ic->i_ack_next);
67595 + return atomic64_read_unchecked(&ic->i_ack_next);
67596 }
67597 #endif
67598
67599 diff -urNp linux-2.6.39.4/net/rds/iw_cm.c linux-2.6.39.4/net/rds/iw_cm.c
67600 --- linux-2.6.39.4/net/rds/iw_cm.c 2011-05-19 00:06:34.000000000 -0400
67601 +++ linux-2.6.39.4/net/rds/iw_cm.c 2011-08-05 19:44:37.000000000 -0400
67602 @@ -664,7 +664,7 @@ void rds_iw_conn_shutdown(struct rds_con
67603 /* Clear the ACK state */
67604 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
67605 #ifdef KERNEL_HAS_ATOMIC64
67606 - atomic64_set(&ic->i_ack_next, 0);
67607 + atomic64_set_unchecked(&ic->i_ack_next, 0);
67608 #else
67609 ic->i_ack_next = 0;
67610 #endif
67611 diff -urNp linux-2.6.39.4/net/rds/iw.h linux-2.6.39.4/net/rds/iw.h
67612 --- linux-2.6.39.4/net/rds/iw.h 2011-05-19 00:06:34.000000000 -0400
67613 +++ linux-2.6.39.4/net/rds/iw.h 2011-08-05 19:44:37.000000000 -0400
67614 @@ -133,7 +133,7 @@ struct rds_iw_connection {
67615 /* sending acks */
67616 unsigned long i_ack_flags;
67617 #ifdef KERNEL_HAS_ATOMIC64
67618 - atomic64_t i_ack_next; /* next ACK to send */
67619 + atomic64_unchecked_t i_ack_next; /* next ACK to send */
67620 #else
67621 spinlock_t i_ack_lock; /* protect i_ack_next */
67622 u64 i_ack_next; /* next ACK to send */
67623 diff -urNp linux-2.6.39.4/net/rds/iw_rdma.c linux-2.6.39.4/net/rds/iw_rdma.c
67624 --- linux-2.6.39.4/net/rds/iw_rdma.c 2011-05-19 00:06:34.000000000 -0400
67625 +++ linux-2.6.39.4/net/rds/iw_rdma.c 2011-08-05 19:44:37.000000000 -0400
67626 @@ -182,6 +182,8 @@ int rds_iw_update_cm_id(struct rds_iw_de
67627 struct rdma_cm_id *pcm_id;
67628 int rc;
67629
67630 + pax_track_stack();
67631 +
67632 src_addr = (struct sockaddr_in *)&cm_id->route.addr.src_addr;
67633 dst_addr = (struct sockaddr_in *)&cm_id->route.addr.dst_addr;
67634
67635 diff -urNp linux-2.6.39.4/net/rds/iw_recv.c linux-2.6.39.4/net/rds/iw_recv.c
67636 --- linux-2.6.39.4/net/rds/iw_recv.c 2011-05-19 00:06:34.000000000 -0400
67637 +++ linux-2.6.39.4/net/rds/iw_recv.c 2011-08-05 19:44:37.000000000 -0400
67638 @@ -427,7 +427,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
67639 static void rds_iw_set_ack(struct rds_iw_connection *ic, u64 seq,
67640 int ack_required)
67641 {
67642 - atomic64_set(&ic->i_ack_next, seq);
67643 + atomic64_set_unchecked(&ic->i_ack_next, seq);
67644 if (ack_required) {
67645 smp_mb__before_clear_bit();
67646 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67647 @@ -439,7 +439,7 @@ static u64 rds_iw_get_ack(struct rds_iw_
67648 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
67649 smp_mb__after_clear_bit();
67650
67651 - return atomic64_read(&ic->i_ack_next);
67652 + return atomic64_read_unchecked(&ic->i_ack_next);
67653 }
67654 #endif
67655
67656 diff -urNp linux-2.6.39.4/net/rxrpc/af_rxrpc.c linux-2.6.39.4/net/rxrpc/af_rxrpc.c
67657 --- linux-2.6.39.4/net/rxrpc/af_rxrpc.c 2011-05-19 00:06:34.000000000 -0400
67658 +++ linux-2.6.39.4/net/rxrpc/af_rxrpc.c 2011-08-05 19:44:37.000000000 -0400
67659 @@ -39,7 +39,7 @@ static const struct proto_ops rxrpc_rpc_
67660 __be32 rxrpc_epoch;
67661
67662 /* current debugging ID */
67663 -atomic_t rxrpc_debug_id;
67664 +atomic_unchecked_t rxrpc_debug_id;
67665
67666 /* count of skbs currently in use */
67667 atomic_t rxrpc_n_skbs;
67668 diff -urNp linux-2.6.39.4/net/rxrpc/ar-ack.c linux-2.6.39.4/net/rxrpc/ar-ack.c
67669 --- linux-2.6.39.4/net/rxrpc/ar-ack.c 2011-05-19 00:06:34.000000000 -0400
67670 +++ linux-2.6.39.4/net/rxrpc/ar-ack.c 2011-08-05 19:44:37.000000000 -0400
67671 @@ -175,7 +175,7 @@ static void rxrpc_resend(struct rxrpc_ca
67672
67673 _enter("{%d,%d,%d,%d},",
67674 call->acks_hard, call->acks_unacked,
67675 - atomic_read(&call->sequence),
67676 + atomic_read_unchecked(&call->sequence),
67677 CIRC_CNT(call->acks_head, call->acks_tail, call->acks_winsz));
67678
67679 stop = 0;
67680 @@ -199,7 +199,7 @@ static void rxrpc_resend(struct rxrpc_ca
67681
67682 /* each Tx packet has a new serial number */
67683 sp->hdr.serial =
67684 - htonl(atomic_inc_return(&call->conn->serial));
67685 + htonl(atomic_inc_return_unchecked(&call->conn->serial));
67686
67687 hdr = (struct rxrpc_header *) txb->head;
67688 hdr->serial = sp->hdr.serial;
67689 @@ -405,7 +405,7 @@ static void rxrpc_rotate_tx_window(struc
67690 */
67691 static void rxrpc_clear_tx_window(struct rxrpc_call *call)
67692 {
67693 - rxrpc_rotate_tx_window(call, atomic_read(&call->sequence));
67694 + rxrpc_rotate_tx_window(call, atomic_read_unchecked(&call->sequence));
67695 }
67696
67697 /*
67698 @@ -631,7 +631,7 @@ process_further:
67699
67700 latest = ntohl(sp->hdr.serial);
67701 hard = ntohl(ack.firstPacket);
67702 - tx = atomic_read(&call->sequence);
67703 + tx = atomic_read_unchecked(&call->sequence);
67704
67705 _proto("Rx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
67706 latest,
67707 @@ -844,6 +844,8 @@ void rxrpc_process_call(struct work_stru
67708 u32 abort_code = RX_PROTOCOL_ERROR;
67709 u8 *acks = NULL;
67710
67711 + pax_track_stack();
67712 +
67713 //printk("\n--------------------\n");
67714 _enter("{%d,%s,%lx} [%lu]",
67715 call->debug_id, rxrpc_call_states[call->state], call->events,
67716 @@ -1163,7 +1165,7 @@ void rxrpc_process_call(struct work_stru
67717 goto maybe_reschedule;
67718
67719 send_ACK_with_skew:
67720 - ack.maxSkew = htons(atomic_read(&call->conn->hi_serial) -
67721 + ack.maxSkew = htons(atomic_read_unchecked(&call->conn->hi_serial) -
67722 ntohl(ack.serial));
67723 send_ACK:
67724 mtu = call->conn->trans->peer->if_mtu;
67725 @@ -1175,7 +1177,7 @@ send_ACK:
67726 ackinfo.rxMTU = htonl(5692);
67727 ackinfo.jumbo_max = htonl(4);
67728
67729 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
67730 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
67731 _proto("Tx ACK %%%u { m=%hu f=#%u p=#%u s=%%%u r=%s n=%u }",
67732 ntohl(hdr.serial),
67733 ntohs(ack.maxSkew),
67734 @@ -1193,7 +1195,7 @@ send_ACK:
67735 send_message:
67736 _debug("send message");
67737
67738 - hdr.serial = htonl(atomic_inc_return(&call->conn->serial));
67739 + hdr.serial = htonl(atomic_inc_return_unchecked(&call->conn->serial));
67740 _proto("Tx %s %%%u", rxrpc_pkts[hdr.type], ntohl(hdr.serial));
67741 send_message_2:
67742
67743 diff -urNp linux-2.6.39.4/net/rxrpc/ar-call.c linux-2.6.39.4/net/rxrpc/ar-call.c
67744 --- linux-2.6.39.4/net/rxrpc/ar-call.c 2011-05-19 00:06:34.000000000 -0400
67745 +++ linux-2.6.39.4/net/rxrpc/ar-call.c 2011-08-05 19:44:37.000000000 -0400
67746 @@ -83,7 +83,7 @@ static struct rxrpc_call *rxrpc_alloc_ca
67747 spin_lock_init(&call->lock);
67748 rwlock_init(&call->state_lock);
67749 atomic_set(&call->usage, 1);
67750 - call->debug_id = atomic_inc_return(&rxrpc_debug_id);
67751 + call->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67752 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
67753
67754 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
67755 diff -urNp linux-2.6.39.4/net/rxrpc/ar-connection.c linux-2.6.39.4/net/rxrpc/ar-connection.c
67756 --- linux-2.6.39.4/net/rxrpc/ar-connection.c 2011-05-19 00:06:34.000000000 -0400
67757 +++ linux-2.6.39.4/net/rxrpc/ar-connection.c 2011-08-05 19:44:37.000000000 -0400
67758 @@ -206,7 +206,7 @@ static struct rxrpc_connection *rxrpc_al
67759 rwlock_init(&conn->lock);
67760 spin_lock_init(&conn->state_lock);
67761 atomic_set(&conn->usage, 1);
67762 - conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
67763 + conn->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67764 conn->avail_calls = RXRPC_MAXCALLS;
67765 conn->size_align = 4;
67766 conn->header_size = sizeof(struct rxrpc_header);
67767 diff -urNp linux-2.6.39.4/net/rxrpc/ar-connevent.c linux-2.6.39.4/net/rxrpc/ar-connevent.c
67768 --- linux-2.6.39.4/net/rxrpc/ar-connevent.c 2011-05-19 00:06:34.000000000 -0400
67769 +++ linux-2.6.39.4/net/rxrpc/ar-connevent.c 2011-08-05 19:44:37.000000000 -0400
67770 @@ -109,7 +109,7 @@ static int rxrpc_abort_connection(struct
67771
67772 len = iov[0].iov_len + iov[1].iov_len;
67773
67774 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
67775 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
67776 _proto("Tx CONN ABORT %%%u { %d }", ntohl(hdr.serial), abort_code);
67777
67778 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
67779 diff -urNp linux-2.6.39.4/net/rxrpc/ar-input.c linux-2.6.39.4/net/rxrpc/ar-input.c
67780 --- linux-2.6.39.4/net/rxrpc/ar-input.c 2011-05-19 00:06:34.000000000 -0400
67781 +++ linux-2.6.39.4/net/rxrpc/ar-input.c 2011-08-05 19:44:37.000000000 -0400
67782 @@ -340,9 +340,9 @@ void rxrpc_fast_process_packet(struct rx
67783 /* track the latest serial number on this connection for ACK packet
67784 * information */
67785 serial = ntohl(sp->hdr.serial);
67786 - hi_serial = atomic_read(&call->conn->hi_serial);
67787 + hi_serial = atomic_read_unchecked(&call->conn->hi_serial);
67788 while (serial > hi_serial)
67789 - hi_serial = atomic_cmpxchg(&call->conn->hi_serial, hi_serial,
67790 + hi_serial = atomic_cmpxchg_unchecked(&call->conn->hi_serial, hi_serial,
67791 serial);
67792
67793 /* request ACK generation for any ACK or DATA packet that requests
67794 diff -urNp linux-2.6.39.4/net/rxrpc/ar-internal.h linux-2.6.39.4/net/rxrpc/ar-internal.h
67795 --- linux-2.6.39.4/net/rxrpc/ar-internal.h 2011-05-19 00:06:34.000000000 -0400
67796 +++ linux-2.6.39.4/net/rxrpc/ar-internal.h 2011-08-05 19:44:37.000000000 -0400
67797 @@ -272,8 +272,8 @@ struct rxrpc_connection {
67798 int error; /* error code for local abort */
67799 int debug_id; /* debug ID for printks */
67800 unsigned call_counter; /* call ID counter */
67801 - atomic_t serial; /* packet serial number counter */
67802 - atomic_t hi_serial; /* highest serial number received */
67803 + atomic_unchecked_t serial; /* packet serial number counter */
67804 + atomic_unchecked_t hi_serial; /* highest serial number received */
67805 u8 avail_calls; /* number of calls available */
67806 u8 size_align; /* data size alignment (for security) */
67807 u8 header_size; /* rxrpc + security header size */
67808 @@ -346,7 +346,7 @@ struct rxrpc_call {
67809 spinlock_t lock;
67810 rwlock_t state_lock; /* lock for state transition */
67811 atomic_t usage;
67812 - atomic_t sequence; /* Tx data packet sequence counter */
67813 + atomic_unchecked_t sequence; /* Tx data packet sequence counter */
67814 u32 abort_code; /* local/remote abort code */
67815 enum { /* current state of call */
67816 RXRPC_CALL_CLIENT_SEND_REQUEST, /* - client sending request phase */
67817 @@ -420,7 +420,7 @@ static inline void rxrpc_abort_call(stru
67818 */
67819 extern atomic_t rxrpc_n_skbs;
67820 extern __be32 rxrpc_epoch;
67821 -extern atomic_t rxrpc_debug_id;
67822 +extern atomic_unchecked_t rxrpc_debug_id;
67823 extern struct workqueue_struct *rxrpc_workqueue;
67824
67825 /*
67826 diff -urNp linux-2.6.39.4/net/rxrpc/ar-local.c linux-2.6.39.4/net/rxrpc/ar-local.c
67827 --- linux-2.6.39.4/net/rxrpc/ar-local.c 2011-05-19 00:06:34.000000000 -0400
67828 +++ linux-2.6.39.4/net/rxrpc/ar-local.c 2011-08-05 19:44:37.000000000 -0400
67829 @@ -45,7 +45,7 @@ struct rxrpc_local *rxrpc_alloc_local(st
67830 spin_lock_init(&local->lock);
67831 rwlock_init(&local->services_lock);
67832 atomic_set(&local->usage, 1);
67833 - local->debug_id = atomic_inc_return(&rxrpc_debug_id);
67834 + local->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67835 memcpy(&local->srx, srx, sizeof(*srx));
67836 }
67837
67838 diff -urNp linux-2.6.39.4/net/rxrpc/ar-output.c linux-2.6.39.4/net/rxrpc/ar-output.c
67839 --- linux-2.6.39.4/net/rxrpc/ar-output.c 2011-05-19 00:06:34.000000000 -0400
67840 +++ linux-2.6.39.4/net/rxrpc/ar-output.c 2011-08-05 19:44:37.000000000 -0400
67841 @@ -681,9 +681,9 @@ static int rxrpc_send_data(struct kiocb
67842 sp->hdr.cid = call->cid;
67843 sp->hdr.callNumber = call->call_id;
67844 sp->hdr.seq =
67845 - htonl(atomic_inc_return(&call->sequence));
67846 + htonl(atomic_inc_return_unchecked(&call->sequence));
67847 sp->hdr.serial =
67848 - htonl(atomic_inc_return(&conn->serial));
67849 + htonl(atomic_inc_return_unchecked(&conn->serial));
67850 sp->hdr.type = RXRPC_PACKET_TYPE_DATA;
67851 sp->hdr.userStatus = 0;
67852 sp->hdr.securityIndex = conn->security_ix;
67853 diff -urNp linux-2.6.39.4/net/rxrpc/ar-peer.c linux-2.6.39.4/net/rxrpc/ar-peer.c
67854 --- linux-2.6.39.4/net/rxrpc/ar-peer.c 2011-05-19 00:06:34.000000000 -0400
67855 +++ linux-2.6.39.4/net/rxrpc/ar-peer.c 2011-08-05 19:44:37.000000000 -0400
67856 @@ -71,7 +71,7 @@ static struct rxrpc_peer *rxrpc_alloc_pe
67857 INIT_LIST_HEAD(&peer->error_targets);
67858 spin_lock_init(&peer->lock);
67859 atomic_set(&peer->usage, 1);
67860 - peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
67861 + peer->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67862 memcpy(&peer->srx, srx, sizeof(*srx));
67863
67864 rxrpc_assess_MTU_size(peer);
67865 diff -urNp linux-2.6.39.4/net/rxrpc/ar-proc.c linux-2.6.39.4/net/rxrpc/ar-proc.c
67866 --- linux-2.6.39.4/net/rxrpc/ar-proc.c 2011-05-19 00:06:34.000000000 -0400
67867 +++ linux-2.6.39.4/net/rxrpc/ar-proc.c 2011-08-05 19:44:37.000000000 -0400
67868 @@ -164,8 +164,8 @@ static int rxrpc_connection_seq_show(str
67869 atomic_read(&conn->usage),
67870 rxrpc_conn_states[conn->state],
67871 key_serial(conn->key),
67872 - atomic_read(&conn->serial),
67873 - atomic_read(&conn->hi_serial));
67874 + atomic_read_unchecked(&conn->serial),
67875 + atomic_read_unchecked(&conn->hi_serial));
67876
67877 return 0;
67878 }
67879 diff -urNp linux-2.6.39.4/net/rxrpc/ar-transport.c linux-2.6.39.4/net/rxrpc/ar-transport.c
67880 --- linux-2.6.39.4/net/rxrpc/ar-transport.c 2011-05-19 00:06:34.000000000 -0400
67881 +++ linux-2.6.39.4/net/rxrpc/ar-transport.c 2011-08-05 19:44:37.000000000 -0400
67882 @@ -47,7 +47,7 @@ static struct rxrpc_transport *rxrpc_all
67883 spin_lock_init(&trans->client_lock);
67884 rwlock_init(&trans->conn_lock);
67885 atomic_set(&trans->usage, 1);
67886 - trans->debug_id = atomic_inc_return(&rxrpc_debug_id);
67887 + trans->debug_id = atomic_inc_return_unchecked(&rxrpc_debug_id);
67888
67889 if (peer->srx.transport.family == AF_INET) {
67890 switch (peer->srx.transport_type) {
67891 diff -urNp linux-2.6.39.4/net/rxrpc/rxkad.c linux-2.6.39.4/net/rxrpc/rxkad.c
67892 --- linux-2.6.39.4/net/rxrpc/rxkad.c 2011-05-19 00:06:34.000000000 -0400
67893 +++ linux-2.6.39.4/net/rxrpc/rxkad.c 2011-08-05 19:44:37.000000000 -0400
67894 @@ -211,6 +211,8 @@ static int rxkad_secure_packet_encrypt(c
67895 u16 check;
67896 int nsg;
67897
67898 + pax_track_stack();
67899 +
67900 sp = rxrpc_skb(skb);
67901
67902 _enter("");
67903 @@ -338,6 +340,8 @@ static int rxkad_verify_packet_auth(cons
67904 u16 check;
67905 int nsg;
67906
67907 + pax_track_stack();
67908 +
67909 _enter("");
67910
67911 sp = rxrpc_skb(skb);
67912 @@ -610,7 +614,7 @@ static int rxkad_issue_challenge(struct
67913
67914 len = iov[0].iov_len + iov[1].iov_len;
67915
67916 - hdr.serial = htonl(atomic_inc_return(&conn->serial));
67917 + hdr.serial = htonl(atomic_inc_return_unchecked(&conn->serial));
67918 _proto("Tx CHALLENGE %%%u", ntohl(hdr.serial));
67919
67920 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 2, len);
67921 @@ -660,7 +664,7 @@ static int rxkad_send_response(struct rx
67922
67923 len = iov[0].iov_len + iov[1].iov_len + iov[2].iov_len;
67924
67925 - hdr->serial = htonl(atomic_inc_return(&conn->serial));
67926 + hdr->serial = htonl(atomic_inc_return_unchecked(&conn->serial));
67927 _proto("Tx RESPONSE %%%u", ntohl(hdr->serial));
67928
67929 ret = kernel_sendmsg(conn->trans->local->socket, &msg, iov, 3, len);
67930 diff -urNp linux-2.6.39.4/net/sctp/proc.c linux-2.6.39.4/net/sctp/proc.c
67931 --- linux-2.6.39.4/net/sctp/proc.c 2011-05-19 00:06:34.000000000 -0400
67932 +++ linux-2.6.39.4/net/sctp/proc.c 2011-08-05 19:44:37.000000000 -0400
67933 @@ -212,7 +212,12 @@ static int sctp_eps_seq_show(struct seq_
67934 sctp_for_each_hentry(epb, node, &head->chain) {
67935 ep = sctp_ep(epb);
67936 sk = epb->sk;
67937 - seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ", ep, sk,
67938 + seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d %5d %5lu ",
67939 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67940 + NULL, NULL,
67941 +#else
67942 + ep, sk,
67943 +#endif
67944 sctp_sk(sk)->type, sk->sk_state, hash,
67945 epb->bind_addr.port,
67946 sock_i_uid(sk), sock_i_ino(sk));
67947 @@ -318,7 +323,12 @@ static int sctp_assocs_seq_show(struct s
67948 seq_printf(seq,
67949 "%8p %8p %-3d %-3d %-2d %-4d "
67950 "%4d %8d %8d %7d %5lu %-5d %5d ",
67951 - assoc, sk, sctp_sk(sk)->type, sk->sk_state,
67952 +#ifdef CONFIG_GRKERNSEC_HIDESYM
67953 + NULL, NULL,
67954 +#else
67955 + assoc, sk,
67956 +#endif
67957 + sctp_sk(sk)->type, sk->sk_state,
67958 assoc->state, hash,
67959 assoc->assoc_id,
67960 assoc->sndbuf_used,
67961 diff -urNp linux-2.6.39.4/net/sctp/socket.c linux-2.6.39.4/net/sctp/socket.c
67962 --- linux-2.6.39.4/net/sctp/socket.c 2011-05-19 00:06:34.000000000 -0400
67963 +++ linux-2.6.39.4/net/sctp/socket.c 2011-08-05 19:44:37.000000000 -0400
67964 @@ -4433,7 +4433,7 @@ static int sctp_getsockopt_peer_addrs(st
67965 addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
67966 if (space_left < addrlen)
67967 return -ENOMEM;
67968 - if (copy_to_user(to, &temp, addrlen))
67969 + if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen))
67970 return -EFAULT;
67971 to += addrlen;
67972 cnt++;
67973 diff -urNp linux-2.6.39.4/net/socket.c linux-2.6.39.4/net/socket.c
67974 --- linux-2.6.39.4/net/socket.c 2011-06-03 00:04:14.000000000 -0400
67975 +++ linux-2.6.39.4/net/socket.c 2011-08-05 19:44:37.000000000 -0400
67976 @@ -88,6 +88,7 @@
67977 #include <linux/nsproxy.h>
67978 #include <linux/magic.h>
67979 #include <linux/slab.h>
67980 +#include <linux/in.h>
67981
67982 #include <asm/uaccess.h>
67983 #include <asm/unistd.h>
67984 @@ -105,6 +106,8 @@
67985 #include <linux/sockios.h>
67986 #include <linux/atalk.h>
67987
67988 +#include <linux/grsock.h>
67989 +
67990 static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
67991 static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov,
67992 unsigned long nr_segs, loff_t pos);
67993 @@ -330,7 +333,7 @@ static struct dentry *sockfs_mount(struc
67994 &sockfs_dentry_operations, SOCKFS_MAGIC);
67995 }
67996
67997 -static struct vfsmount *sock_mnt __read_mostly;
67998 +struct vfsmount *sock_mnt __read_mostly;
67999
68000 static struct file_system_type sock_fs_type = {
68001 .name = "sockfs",
68002 @@ -1179,6 +1182,8 @@ int __sock_create(struct net *net, int f
68003 return -EAFNOSUPPORT;
68004 if (type < 0 || type >= SOCK_MAX)
68005 return -EINVAL;
68006 + if (protocol < 0)
68007 + return -EINVAL;
68008
68009 /* Compatibility.
68010
68011 @@ -1311,6 +1316,16 @@ SYSCALL_DEFINE3(socket, int, family, int
68012 if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
68013 flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK;
68014
68015 + if(!gr_search_socket(family, type, protocol)) {
68016 + retval = -EACCES;
68017 + goto out;
68018 + }
68019 +
68020 + if (gr_handle_sock_all(family, type, protocol)) {
68021 + retval = -EACCES;
68022 + goto out;
68023 + }
68024 +
68025 retval = sock_create(family, type, protocol, &sock);
68026 if (retval < 0)
68027 goto out;
68028 @@ -1423,6 +1438,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
68029 if (sock) {
68030 err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address);
68031 if (err >= 0) {
68032 + if (gr_handle_sock_server((struct sockaddr *)&address)) {
68033 + err = -EACCES;
68034 + goto error;
68035 + }
68036 + err = gr_search_bind(sock, (struct sockaddr_in *)&address);
68037 + if (err)
68038 + goto error;
68039 +
68040 err = security_socket_bind(sock,
68041 (struct sockaddr *)&address,
68042 addrlen);
68043 @@ -1431,6 +1454,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so
68044 (struct sockaddr *)
68045 &address, addrlen);
68046 }
68047 +error:
68048 fput_light(sock->file, fput_needed);
68049 }
68050 return err;
68051 @@ -1454,10 +1478,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba
68052 if ((unsigned)backlog > somaxconn)
68053 backlog = somaxconn;
68054
68055 + if (gr_handle_sock_server_other(sock->sk)) {
68056 + err = -EPERM;
68057 + goto error;
68058 + }
68059 +
68060 + err = gr_search_listen(sock);
68061 + if (err)
68062 + goto error;
68063 +
68064 err = security_socket_listen(sock, backlog);
68065 if (!err)
68066 err = sock->ops->listen(sock, backlog);
68067
68068 +error:
68069 fput_light(sock->file, fput_needed);
68070 }
68071 return err;
68072 @@ -1501,6 +1535,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
68073 newsock->type = sock->type;
68074 newsock->ops = sock->ops;
68075
68076 + if (gr_handle_sock_server_other(sock->sk)) {
68077 + err = -EPERM;
68078 + sock_release(newsock);
68079 + goto out_put;
68080 + }
68081 +
68082 + err = gr_search_accept(sock);
68083 + if (err) {
68084 + sock_release(newsock);
68085 + goto out_put;
68086 + }
68087 +
68088 /*
68089 * We don't need try_module_get here, as the listening socket (sock)
68090 * has the protocol module (sock->ops->owner) held.
68091 @@ -1539,6 +1585,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct
68092 fd_install(newfd, newfile);
68093 err = newfd;
68094
68095 + gr_attach_curr_ip(newsock->sk);
68096 +
68097 out_put:
68098 fput_light(sock->file, fput_needed);
68099 out:
68100 @@ -1571,6 +1619,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct
68101 int, addrlen)
68102 {
68103 struct socket *sock;
68104 + struct sockaddr *sck;
68105 struct sockaddr_storage address;
68106 int err, fput_needed;
68107
68108 @@ -1581,6 +1630,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct
68109 if (err < 0)
68110 goto out_put;
68111
68112 + sck = (struct sockaddr *)&address;
68113 +
68114 + if (gr_handle_sock_client(sck)) {
68115 + err = -EACCES;
68116 + goto out_put;
68117 + }
68118 +
68119 + err = gr_search_connect(sock, (struct sockaddr_in *)sck);
68120 + if (err)
68121 + goto out_put;
68122 +
68123 err =
68124 security_socket_connect(sock, (struct sockaddr *)&address, addrlen);
68125 if (err)
68126 @@ -1882,6 +1942,8 @@ SYSCALL_DEFINE3(sendmsg, int, fd, struct
68127 int err, ctl_len, iov_size, total_len;
68128 int fput_needed;
68129
68130 + pax_track_stack();
68131 +
68132 err = -EFAULT;
68133 if (MSG_CMSG_COMPAT & flags) {
68134 if (get_compat_msghdr(&msg_sys, msg_compat))
68135 diff -urNp linux-2.6.39.4/net/sunrpc/sched.c linux-2.6.39.4/net/sunrpc/sched.c
68136 --- linux-2.6.39.4/net/sunrpc/sched.c 2011-08-05 21:11:51.000000000 -0400
68137 +++ linux-2.6.39.4/net/sunrpc/sched.c 2011-08-05 21:12:20.000000000 -0400
68138 @@ -234,9 +234,9 @@ static int rpc_wait_bit_killable(void *w
68139 #ifdef RPC_DEBUG
68140 static void rpc_task_set_debuginfo(struct rpc_task *task)
68141 {
68142 - static atomic_t rpc_pid;
68143 + static atomic_unchecked_t rpc_pid;
68144
68145 - task->tk_pid = atomic_inc_return(&rpc_pid);
68146 + task->tk_pid = atomic_inc_return_unchecked(&rpc_pid);
68147 }
68148 #else
68149 static inline void rpc_task_set_debuginfo(struct rpc_task *task)
68150 diff -urNp linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma.c linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma.c
68151 --- linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma.c 2011-05-19 00:06:34.000000000 -0400
68152 +++ linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma.c 2011-08-05 19:44:37.000000000 -0400
68153 @@ -61,15 +61,15 @@ unsigned int svcrdma_max_req_size = RPCR
68154 static unsigned int min_max_inline = 4096;
68155 static unsigned int max_max_inline = 65536;
68156
68157 -atomic_t rdma_stat_recv;
68158 -atomic_t rdma_stat_read;
68159 -atomic_t rdma_stat_write;
68160 -atomic_t rdma_stat_sq_starve;
68161 -atomic_t rdma_stat_rq_starve;
68162 -atomic_t rdma_stat_rq_poll;
68163 -atomic_t rdma_stat_rq_prod;
68164 -atomic_t rdma_stat_sq_poll;
68165 -atomic_t rdma_stat_sq_prod;
68166 +atomic_unchecked_t rdma_stat_recv;
68167 +atomic_unchecked_t rdma_stat_read;
68168 +atomic_unchecked_t rdma_stat_write;
68169 +atomic_unchecked_t rdma_stat_sq_starve;
68170 +atomic_unchecked_t rdma_stat_rq_starve;
68171 +atomic_unchecked_t rdma_stat_rq_poll;
68172 +atomic_unchecked_t rdma_stat_rq_prod;
68173 +atomic_unchecked_t rdma_stat_sq_poll;
68174 +atomic_unchecked_t rdma_stat_sq_prod;
68175
68176 /* Temporary NFS request map and context caches */
68177 struct kmem_cache *svc_rdma_map_cachep;
68178 @@ -109,7 +109,7 @@ static int read_reset_stat(ctl_table *ta
68179 len -= *ppos;
68180 if (len > *lenp)
68181 len = *lenp;
68182 - if (len && copy_to_user(buffer, str_buf, len))
68183 + if (len > sizeof str_buf || (len && copy_to_user(buffer, str_buf, len)))
68184 return -EFAULT;
68185 *lenp = len;
68186 *ppos += len;
68187 @@ -150,63 +150,63 @@ static ctl_table svcrdma_parm_table[] =
68188 {
68189 .procname = "rdma_stat_read",
68190 .data = &rdma_stat_read,
68191 - .maxlen = sizeof(atomic_t),
68192 + .maxlen = sizeof(atomic_unchecked_t),
68193 .mode = 0644,
68194 .proc_handler = read_reset_stat,
68195 },
68196 {
68197 .procname = "rdma_stat_recv",
68198 .data = &rdma_stat_recv,
68199 - .maxlen = sizeof(atomic_t),
68200 + .maxlen = sizeof(atomic_unchecked_t),
68201 .mode = 0644,
68202 .proc_handler = read_reset_stat,
68203 },
68204 {
68205 .procname = "rdma_stat_write",
68206 .data = &rdma_stat_write,
68207 - .maxlen = sizeof(atomic_t),
68208 + .maxlen = sizeof(atomic_unchecked_t),
68209 .mode = 0644,
68210 .proc_handler = read_reset_stat,
68211 },
68212 {
68213 .procname = "rdma_stat_sq_starve",
68214 .data = &rdma_stat_sq_starve,
68215 - .maxlen = sizeof(atomic_t),
68216 + .maxlen = sizeof(atomic_unchecked_t),
68217 .mode = 0644,
68218 .proc_handler = read_reset_stat,
68219 },
68220 {
68221 .procname = "rdma_stat_rq_starve",
68222 .data = &rdma_stat_rq_starve,
68223 - .maxlen = sizeof(atomic_t),
68224 + .maxlen = sizeof(atomic_unchecked_t),
68225 .mode = 0644,
68226 .proc_handler = read_reset_stat,
68227 },
68228 {
68229 .procname = "rdma_stat_rq_poll",
68230 .data = &rdma_stat_rq_poll,
68231 - .maxlen = sizeof(atomic_t),
68232 + .maxlen = sizeof(atomic_unchecked_t),
68233 .mode = 0644,
68234 .proc_handler = read_reset_stat,
68235 },
68236 {
68237 .procname = "rdma_stat_rq_prod",
68238 .data = &rdma_stat_rq_prod,
68239 - .maxlen = sizeof(atomic_t),
68240 + .maxlen = sizeof(atomic_unchecked_t),
68241 .mode = 0644,
68242 .proc_handler = read_reset_stat,
68243 },
68244 {
68245 .procname = "rdma_stat_sq_poll",
68246 .data = &rdma_stat_sq_poll,
68247 - .maxlen = sizeof(atomic_t),
68248 + .maxlen = sizeof(atomic_unchecked_t),
68249 .mode = 0644,
68250 .proc_handler = read_reset_stat,
68251 },
68252 {
68253 .procname = "rdma_stat_sq_prod",
68254 .data = &rdma_stat_sq_prod,
68255 - .maxlen = sizeof(atomic_t),
68256 + .maxlen = sizeof(atomic_unchecked_t),
68257 .mode = 0644,
68258 .proc_handler = read_reset_stat,
68259 },
68260 diff -urNp linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
68261 --- linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-05-19 00:06:34.000000000 -0400
68262 +++ linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c 2011-08-05 19:44:37.000000000 -0400
68263 @@ -499,7 +499,7 @@ next_sge:
68264 svc_rdma_put_context(ctxt, 0);
68265 goto out;
68266 }
68267 - atomic_inc(&rdma_stat_read);
68268 + atomic_inc_unchecked(&rdma_stat_read);
68269
68270 if (read_wr.num_sge < chl_map->ch[ch_no].count) {
68271 chl_map->ch[ch_no].count -= read_wr.num_sge;
68272 @@ -609,7 +609,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
68273 dto_q);
68274 list_del_init(&ctxt->dto_q);
68275 } else {
68276 - atomic_inc(&rdma_stat_rq_starve);
68277 + atomic_inc_unchecked(&rdma_stat_rq_starve);
68278 clear_bit(XPT_DATA, &xprt->xpt_flags);
68279 ctxt = NULL;
68280 }
68281 @@ -629,7 +629,7 @@ int svc_rdma_recvfrom(struct svc_rqst *r
68282 dprintk("svcrdma: processing ctxt=%p on xprt=%p, rqstp=%p, status=%d\n",
68283 ctxt, rdma_xprt, rqstp, ctxt->wc_status);
68284 BUG_ON(ctxt->wc_status != IB_WC_SUCCESS);
68285 - atomic_inc(&rdma_stat_recv);
68286 + atomic_inc_unchecked(&rdma_stat_recv);
68287
68288 /* Build up the XDR from the receive buffers. */
68289 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
68290 diff -urNp linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c
68291 --- linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-05-19 00:06:34.000000000 -0400
68292 +++ linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_sendto.c 2011-08-05 19:44:37.000000000 -0400
68293 @@ -362,7 +362,7 @@ static int send_write(struct svcxprt_rdm
68294 write_wr.wr.rdma.remote_addr = to;
68295
68296 /* Post It */
68297 - atomic_inc(&rdma_stat_write);
68298 + atomic_inc_unchecked(&rdma_stat_write);
68299 if (svc_rdma_send(xprt, &write_wr))
68300 goto err;
68301 return 0;
68302 diff -urNp linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_transport.c linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_transport.c
68303 --- linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-05-19 00:06:34.000000000 -0400
68304 +++ linux-2.6.39.4/net/sunrpc/xprtrdma/svc_rdma_transport.c 2011-08-05 19:44:37.000000000 -0400
68305 @@ -298,7 +298,7 @@ static void rq_cq_reap(struct svcxprt_rd
68306 return;
68307
68308 ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
68309 - atomic_inc(&rdma_stat_rq_poll);
68310 + atomic_inc_unchecked(&rdma_stat_rq_poll);
68311
68312 while ((ret = ib_poll_cq(xprt->sc_rq_cq, 1, &wc)) > 0) {
68313 ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
68314 @@ -320,7 +320,7 @@ static void rq_cq_reap(struct svcxprt_rd
68315 }
68316
68317 if (ctxt)
68318 - atomic_inc(&rdma_stat_rq_prod);
68319 + atomic_inc_unchecked(&rdma_stat_rq_prod);
68320
68321 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
68322 /*
68323 @@ -392,7 +392,7 @@ static void sq_cq_reap(struct svcxprt_rd
68324 return;
68325
68326 ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
68327 - atomic_inc(&rdma_stat_sq_poll);
68328 + atomic_inc_unchecked(&rdma_stat_sq_poll);
68329 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
68330 if (wc.status != IB_WC_SUCCESS)
68331 /* Close the transport */
68332 @@ -410,7 +410,7 @@ static void sq_cq_reap(struct svcxprt_rd
68333 }
68334
68335 if (ctxt)
68336 - atomic_inc(&rdma_stat_sq_prod);
68337 + atomic_inc_unchecked(&rdma_stat_sq_prod);
68338 }
68339
68340 static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
68341 @@ -1271,7 +1271,7 @@ int svc_rdma_send(struct svcxprt_rdma *x
68342 spin_lock_bh(&xprt->sc_lock);
68343 if (xprt->sc_sq_depth < atomic_read(&xprt->sc_sq_count) + wr_count) {
68344 spin_unlock_bh(&xprt->sc_lock);
68345 - atomic_inc(&rdma_stat_sq_starve);
68346 + atomic_inc_unchecked(&rdma_stat_sq_starve);
68347
68348 /* See if we can opportunistically reap SQ WR to make room */
68349 sq_cq_reap(xprt);
68350 diff -urNp linux-2.6.39.4/net/sysctl_net.c linux-2.6.39.4/net/sysctl_net.c
68351 --- linux-2.6.39.4/net/sysctl_net.c 2011-05-19 00:06:34.000000000 -0400
68352 +++ linux-2.6.39.4/net/sysctl_net.c 2011-08-05 19:44:37.000000000 -0400
68353 @@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct
68354 struct ctl_table *table)
68355 {
68356 /* Allow network administrator to have same access as root. */
68357 - if (capable(CAP_NET_ADMIN)) {
68358 + if (capable_nolog(CAP_NET_ADMIN)) {
68359 int mode = (table->mode >> 6) & 7;
68360 return (mode << 6) | (mode << 3) | mode;
68361 }
68362 diff -urNp linux-2.6.39.4/net/unix/af_unix.c linux-2.6.39.4/net/unix/af_unix.c
68363 --- linux-2.6.39.4/net/unix/af_unix.c 2011-05-19 00:06:34.000000000 -0400
68364 +++ linux-2.6.39.4/net/unix/af_unix.c 2011-08-05 19:44:37.000000000 -0400
68365 @@ -767,6 +767,12 @@ static struct sock *unix_find_other(stru
68366 err = -ECONNREFUSED;
68367 if (!S_ISSOCK(inode->i_mode))
68368 goto put_fail;
68369 +
68370 + if (!gr_acl_handle_unix(path.dentry, path.mnt)) {
68371 + err = -EACCES;
68372 + goto put_fail;
68373 + }
68374 +
68375 u = unix_find_socket_byinode(inode);
68376 if (!u)
68377 goto put_fail;
68378 @@ -787,6 +793,13 @@ static struct sock *unix_find_other(stru
68379 if (u) {
68380 struct dentry *dentry;
68381 dentry = unix_sk(u)->dentry;
68382 +
68383 + if (!gr_handle_chroot_unix(pid_vnr(u->sk_peer_pid))) {
68384 + err = -EPERM;
68385 + sock_put(u);
68386 + goto fail;
68387 + }
68388 +
68389 if (dentry)
68390 touch_atime(unix_sk(u)->mnt, dentry);
68391 } else
68392 @@ -872,11 +885,18 @@ static int unix_bind(struct socket *sock
68393 err = security_path_mknod(&nd.path, dentry, mode, 0);
68394 if (err)
68395 goto out_mknod_drop_write;
68396 + if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
68397 + err = -EACCES;
68398 + goto out_mknod_drop_write;
68399 + }
68400 err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
68401 out_mknod_drop_write:
68402 mnt_drop_write(nd.path.mnt);
68403 if (err)
68404 goto out_mknod_dput;
68405 +
68406 + gr_handle_create(dentry, nd.path.mnt);
68407 +
68408 mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
68409 dput(nd.path.dentry);
68410 nd.path.dentry = dentry;
68411 @@ -2255,7 +2275,11 @@ static int unix_seq_show(struct seq_file
68412 unix_state_lock(s);
68413
68414 seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu",
68415 +#ifdef CONFIG_GRKERNSEC_HIDESYM
68416 + NULL,
68417 +#else
68418 s,
68419 +#endif
68420 atomic_read(&s->sk_refcnt),
68421 0,
68422 s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0,
68423 diff -urNp linux-2.6.39.4/net/wireless/core.h linux-2.6.39.4/net/wireless/core.h
68424 --- linux-2.6.39.4/net/wireless/core.h 2011-05-19 00:06:34.000000000 -0400
68425 +++ linux-2.6.39.4/net/wireless/core.h 2011-08-05 20:34:06.000000000 -0400
68426 @@ -27,7 +27,7 @@ struct cfg80211_registered_device {
68427 struct mutex mtx;
68428
68429 /* rfkill support */
68430 - struct rfkill_ops rfkill_ops;
68431 + rfkill_ops_no_const rfkill_ops;
68432 struct rfkill *rfkill;
68433 struct work_struct rfkill_sync;
68434
68435 diff -urNp linux-2.6.39.4/net/wireless/wext-core.c linux-2.6.39.4/net/wireless/wext-core.c
68436 --- linux-2.6.39.4/net/wireless/wext-core.c 2011-05-19 00:06:34.000000000 -0400
68437 +++ linux-2.6.39.4/net/wireless/wext-core.c 2011-08-05 19:44:37.000000000 -0400
68438 @@ -746,8 +746,7 @@ static int ioctl_standard_iw_point(struc
68439 */
68440
68441 /* Support for very large requests */
68442 - if ((descr->flags & IW_DESCR_FLAG_NOMAX) &&
68443 - (user_length > descr->max_tokens)) {
68444 + if (user_length > descr->max_tokens) {
68445 /* Allow userspace to GET more than max so
68446 * we can support any size GET requests.
68447 * There is still a limit : -ENOMEM.
68448 @@ -784,22 +783,6 @@ static int ioctl_standard_iw_point(struc
68449 }
68450 }
68451
68452 - if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) {
68453 - /*
68454 - * If this is a GET, but not NOMAX, it means that the extra
68455 - * data is not bounded by userspace, but by max_tokens. Thus
68456 - * set the length to max_tokens. This matches the extra data
68457 - * allocation.
68458 - * The driver should fill it with the number of tokens it
68459 - * provided, and it may check iwp->length rather than having
68460 - * knowledge of max_tokens. If the driver doesn't change the
68461 - * iwp->length, this ioctl just copies back max_token tokens
68462 - * filled with zeroes. Hopefully the driver isn't claiming
68463 - * them to be valid data.
68464 - */
68465 - iwp->length = descr->max_tokens;
68466 - }
68467 -
68468 err = handler(dev, info, (union iwreq_data *) iwp, extra);
68469
68470 iwp->length += essid_compat;
68471 diff -urNp linux-2.6.39.4/net/xfrm/xfrm_policy.c linux-2.6.39.4/net/xfrm/xfrm_policy.c
68472 --- linux-2.6.39.4/net/xfrm/xfrm_policy.c 2011-05-19 00:06:34.000000000 -0400
68473 +++ linux-2.6.39.4/net/xfrm/xfrm_policy.c 2011-08-05 19:44:37.000000000 -0400
68474 @@ -299,7 +299,7 @@ static void xfrm_policy_kill(struct xfrm
68475 {
68476 policy->walk.dead = 1;
68477
68478 - atomic_inc(&policy->genid);
68479 + atomic_inc_unchecked(&policy->genid);
68480
68481 if (del_timer(&policy->timer))
68482 xfrm_pol_put(policy);
68483 @@ -583,7 +583,7 @@ int xfrm_policy_insert(int dir, struct x
68484 hlist_add_head(&policy->bydst, chain);
68485 xfrm_pol_hold(policy);
68486 net->xfrm.policy_count[dir]++;
68487 - atomic_inc(&flow_cache_genid);
68488 + atomic_inc_unchecked(&flow_cache_genid);
68489 if (delpol)
68490 __xfrm_policy_unlink(delpol, dir);
68491 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir);
68492 @@ -1527,7 +1527,7 @@ free_dst:
68493 goto out;
68494 }
68495
68496 -static int inline
68497 +static inline int
68498 xfrm_dst_alloc_copy(void **target, const void *src, int size)
68499 {
68500 if (!*target) {
68501 @@ -1539,7 +1539,7 @@ xfrm_dst_alloc_copy(void **target, const
68502 return 0;
68503 }
68504
68505 -static int inline
68506 +static inline int
68507 xfrm_dst_update_parent(struct dst_entry *dst, const struct xfrm_selector *sel)
68508 {
68509 #ifdef CONFIG_XFRM_SUB_POLICY
68510 @@ -1551,7 +1551,7 @@ xfrm_dst_update_parent(struct dst_entry
68511 #endif
68512 }
68513
68514 -static int inline
68515 +static inline int
68516 xfrm_dst_update_origin(struct dst_entry *dst, const struct flowi *fl)
68517 {
68518 #ifdef CONFIG_XFRM_SUB_POLICY
68519 @@ -1645,7 +1645,7 @@ xfrm_resolve_and_create_bundle(struct xf
68520
68521 xdst->num_pols = num_pols;
68522 memcpy(xdst->pols, pols, sizeof(struct xfrm_policy*) * num_pols);
68523 - xdst->policy_genid = atomic_read(&pols[0]->genid);
68524 + xdst->policy_genid = atomic_read_unchecked(&pols[0]->genid);
68525
68526 return xdst;
68527 }
68528 @@ -2332,7 +2332,7 @@ static int xfrm_bundle_ok(struct xfrm_ds
68529 if (xdst->xfrm_genid != dst->xfrm->genid)
68530 return 0;
68531 if (xdst->num_pols > 0 &&
68532 - xdst->policy_genid != atomic_read(&xdst->pols[0]->genid))
68533 + xdst->policy_genid != atomic_read_unchecked(&xdst->pols[0]->genid))
68534 return 0;
68535
68536 mtu = dst_mtu(dst->child);
68537 @@ -2860,7 +2860,7 @@ static int xfrm_policy_migrate(struct xf
68538 sizeof(pol->xfrm_vec[i].saddr));
68539 pol->xfrm_vec[i].encap_family = mp->new_family;
68540 /* flush bundles */
68541 - atomic_inc(&pol->genid);
68542 + atomic_inc_unchecked(&pol->genid);
68543 }
68544 }
68545
68546 diff -urNp linux-2.6.39.4/net/xfrm/xfrm_user.c linux-2.6.39.4/net/xfrm/xfrm_user.c
68547 --- linux-2.6.39.4/net/xfrm/xfrm_user.c 2011-05-19 00:06:34.000000000 -0400
68548 +++ linux-2.6.39.4/net/xfrm/xfrm_user.c 2011-08-05 19:44:37.000000000 -0400
68549 @@ -1394,6 +1394,8 @@ static int copy_to_user_tmpl(struct xfrm
68550 struct xfrm_user_tmpl vec[XFRM_MAX_DEPTH];
68551 int i;
68552
68553 + pax_track_stack();
68554 +
68555 if (xp->xfrm_nr == 0)
68556 return 0;
68557
68558 @@ -2062,6 +2064,8 @@ static int xfrm_do_migrate(struct sk_buf
68559 int err;
68560 int n = 0;
68561
68562 + pax_track_stack();
68563 +
68564 if (attrs[XFRMA_MIGRATE] == NULL)
68565 return -EINVAL;
68566
68567 diff -urNp linux-2.6.39.4/scripts/basic/fixdep.c linux-2.6.39.4/scripts/basic/fixdep.c
68568 --- linux-2.6.39.4/scripts/basic/fixdep.c 2011-05-19 00:06:34.000000000 -0400
68569 +++ linux-2.6.39.4/scripts/basic/fixdep.c 2011-08-05 19:44:37.000000000 -0400
68570 @@ -235,9 +235,9 @@ static void use_config(const char *m, in
68571
68572 static void parse_config_file(const char *map, size_t len)
68573 {
68574 - const int *end = (const int *) (map + len);
68575 + const unsigned int *end = (const unsigned int *) (map + len);
68576 /* start at +1, so that p can never be < map */
68577 - const int *m = (const int *) map + 1;
68578 + const unsigned int *m = (const unsigned int *) map + 1;
68579 const char *p, *q;
68580
68581 for (; m < end; m++) {
68582 @@ -405,7 +405,7 @@ static void print_deps(void)
68583 static void traps(void)
68584 {
68585 static char test[] __attribute__((aligned(sizeof(int)))) = "CONF";
68586 - int *p = (int *)test;
68587 + unsigned int *p = (unsigned int *)test;
68588
68589 if (*p != INT_CONF) {
68590 fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n",
68591 diff -urNp linux-2.6.39.4/scripts/gcc-plugin.sh linux-2.6.39.4/scripts/gcc-plugin.sh
68592 --- linux-2.6.39.4/scripts/gcc-plugin.sh 1969-12-31 19:00:00.000000000 -0500
68593 +++ linux-2.6.39.4/scripts/gcc-plugin.sh 2011-08-05 20:34:06.000000000 -0400
68594 @@ -0,0 +1,3 @@
68595 +#!/bin/sh
68596 +
68597 +echo "#include \"gcc-plugin.h\"" | $* -x c - -c -o /dev/null -I`$* -print-file-name=plugin`/include>/dev/null 2>&1 && echo "y"
68598 diff -urNp linux-2.6.39.4/scripts/Makefile.build linux-2.6.39.4/scripts/Makefile.build
68599 --- linux-2.6.39.4/scripts/Makefile.build 2011-05-19 00:06:34.000000000 -0400
68600 +++ linux-2.6.39.4/scripts/Makefile.build 2011-08-05 19:44:37.000000000 -0400
68601 @@ -93,7 +93,7 @@ endif
68602 endif
68603
68604 # Do not include host rules unless needed
68605 -ifneq ($(hostprogs-y)$(hostprogs-m),)
68606 +ifneq ($(hostprogs-y)$(hostprogs-m)$(hostlibs-y)$(hostlibs-m),)
68607 include scripts/Makefile.host
68608 endif
68609
68610 diff -urNp linux-2.6.39.4/scripts/Makefile.clean linux-2.6.39.4/scripts/Makefile.clean
68611 --- linux-2.6.39.4/scripts/Makefile.clean 2011-05-19 00:06:34.000000000 -0400
68612 +++ linux-2.6.39.4/scripts/Makefile.clean 2011-08-05 19:44:37.000000000 -0400
68613 @@ -43,7 +43,8 @@ subdir-ymn := $(addprefix $(obj)/,$(subd
68614 __clean-files := $(extra-y) $(always) \
68615 $(targets) $(clean-files) \
68616 $(host-progs) \
68617 - $(hostprogs-y) $(hostprogs-m) $(hostprogs-)
68618 + $(hostprogs-y) $(hostprogs-m) $(hostprogs-) \
68619 + $(hostlibs-y) $(hostlibs-m) $(hostlibs-)
68620
68621 __clean-files := $(filter-out $(no-clean-files), $(__clean-files))
68622
68623 diff -urNp linux-2.6.39.4/scripts/Makefile.host linux-2.6.39.4/scripts/Makefile.host
68624 --- linux-2.6.39.4/scripts/Makefile.host 2011-05-19 00:06:34.000000000 -0400
68625 +++ linux-2.6.39.4/scripts/Makefile.host 2011-08-05 19:44:37.000000000 -0400
68626 @@ -31,6 +31,7 @@
68627 # Note: Shared libraries consisting of C++ files are not supported
68628
68629 __hostprogs := $(sort $(hostprogs-y) $(hostprogs-m))
68630 +__hostlibs := $(sort $(hostlibs-y) $(hostlibs-m))
68631
68632 # C code
68633 # Executables compiled from a single .c file
68634 @@ -54,6 +55,7 @@ host-cxxobjs := $(sort $(foreach m,$(hos
68635 # Shared libaries (only .c supported)
68636 # Shared libraries (.so) - all .so files referenced in "xxx-objs"
68637 host-cshlib := $(sort $(filter %.so, $(host-cobjs)))
68638 +host-cshlib += $(sort $(filter %.so, $(__hostlibs)))
68639 # Remove .so files from "xxx-objs"
68640 host-cobjs := $(filter-out %.so,$(host-cobjs))
68641
68642 diff -urNp linux-2.6.39.4/scripts/mod/file2alias.c linux-2.6.39.4/scripts/mod/file2alias.c
68643 --- linux-2.6.39.4/scripts/mod/file2alias.c 2011-05-19 00:06:34.000000000 -0400
68644 +++ linux-2.6.39.4/scripts/mod/file2alias.c 2011-08-05 19:44:37.000000000 -0400
68645 @@ -72,7 +72,7 @@ static void device_id_check(const char *
68646 unsigned long size, unsigned long id_size,
68647 void *symval)
68648 {
68649 - int i;
68650 + unsigned int i;
68651
68652 if (size % id_size || size < id_size) {
68653 if (cross_build != 0)
68654 @@ -102,7 +102,7 @@ static void device_id_check(const char *
68655 /* USB is special because the bcdDevice can be matched against a numeric range */
68656 /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */
68657 static void do_usb_entry(struct usb_device_id *id,
68658 - unsigned int bcdDevice_initial, int bcdDevice_initial_digits,
68659 + unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits,
68660 unsigned char range_lo, unsigned char range_hi,
68661 unsigned char max, struct module *mod)
68662 {
68663 @@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *sy
68664 for (i = 0; i < count; i++) {
68665 const char *id = (char *)devs[i].id;
68666 char acpi_id[sizeof(devs[0].id)];
68667 - int j;
68668 + unsigned int j;
68669
68670 buf_printf(&mod->dev_table_buf,
68671 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
68672 @@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *sy
68673
68674 for (j = 0; j < PNP_MAX_DEVICES; j++) {
68675 const char *id = (char *)card->devs[j].id;
68676 - int i2, j2;
68677 + unsigned int i2, j2;
68678 int dup = 0;
68679
68680 if (!id[0])
68681 @@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *sy
68682 /* add an individual alias for every device entry */
68683 if (!dup) {
68684 char acpi_id[sizeof(card->devs[0].id)];
68685 - int k;
68686 + unsigned int k;
68687
68688 buf_printf(&mod->dev_table_buf,
68689 "MODULE_ALIAS(\"pnp:d%s*\");\n", id);
68690 @@ -768,7 +768,7 @@ static void dmi_ascii_filter(char *d, co
68691 static int do_dmi_entry(const char *filename, struct dmi_system_id *id,
68692 char *alias)
68693 {
68694 - int i, j;
68695 + unsigned int i, j;
68696
68697 sprintf(alias, "dmi*");
68698
68699 diff -urNp linux-2.6.39.4/scripts/mod/modpost.c linux-2.6.39.4/scripts/mod/modpost.c
68700 --- linux-2.6.39.4/scripts/mod/modpost.c 2011-05-19 00:06:34.000000000 -0400
68701 +++ linux-2.6.39.4/scripts/mod/modpost.c 2011-08-05 19:44:37.000000000 -0400
68702 @@ -896,6 +896,7 @@ enum mismatch {
68703 ANY_INIT_TO_ANY_EXIT,
68704 ANY_EXIT_TO_ANY_INIT,
68705 EXPORT_TO_INIT_EXIT,
68706 + DATA_TO_TEXT
68707 };
68708
68709 struct sectioncheck {
68710 @@ -1004,6 +1005,12 @@ const struct sectioncheck sectioncheck[]
68711 .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL },
68712 .mismatch = EXPORT_TO_INIT_EXIT,
68713 .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL },
68714 +},
68715 +/* Do not reference code from writable data */
68716 +{
68717 + .fromsec = { DATA_SECTIONS, NULL },
68718 + .tosec = { TEXT_SECTIONS, NULL },
68719 + .mismatch = DATA_TO_TEXT
68720 }
68721 };
68722
68723 @@ -1126,10 +1133,10 @@ static Elf_Sym *find_elf_symbol(struct e
68724 continue;
68725 if (ELF_ST_TYPE(sym->st_info) == STT_SECTION)
68726 continue;
68727 - if (sym->st_value == addr)
68728 - return sym;
68729 /* Find a symbol nearby - addr are maybe negative */
68730 d = sym->st_value - addr;
68731 + if (d == 0)
68732 + return sym;
68733 if (d < 0)
68734 d = addr - sym->st_value;
68735 if (d < distance) {
68736 @@ -1408,6 +1415,14 @@ static void report_sec_mismatch(const ch
68737 tosym, prl_to, prl_to, tosym);
68738 free(prl_to);
68739 break;
68740 + case DATA_TO_TEXT:
68741 +/*
68742 + fprintf(stderr,
68743 + "The variable %s references\n"
68744 + "the %s %s%s%s\n",
68745 + fromsym, to, sec2annotation(tosec), tosym, to_p);
68746 +*/
68747 + break;
68748 }
68749 fprintf(stderr, "\n");
68750 }
68751 @@ -1633,7 +1648,7 @@ static void section_rel(const char *modn
68752 static void check_sec_ref(struct module *mod, const char *modname,
68753 struct elf_info *elf)
68754 {
68755 - int i;
68756 + unsigned int i;
68757 Elf_Shdr *sechdrs = elf->sechdrs;
68758
68759 /* Walk through all sections */
68760 @@ -1731,7 +1746,7 @@ void __attribute__((format(printf, 2, 3)
68761 va_end(ap);
68762 }
68763
68764 -void buf_write(struct buffer *buf, const char *s, int len)
68765 +void buf_write(struct buffer *buf, const char *s, unsigned int len)
68766 {
68767 if (buf->size - buf->pos < len) {
68768 buf->size += len + SZ;
68769 @@ -1943,7 +1958,7 @@ static void write_if_changed(struct buff
68770 if (fstat(fileno(file), &st) < 0)
68771 goto close_write;
68772
68773 - if (st.st_size != b->pos)
68774 + if (st.st_size != (off_t)b->pos)
68775 goto close_write;
68776
68777 tmp = NOFAIL(malloc(b->pos));
68778 diff -urNp linux-2.6.39.4/scripts/mod/modpost.h linux-2.6.39.4/scripts/mod/modpost.h
68779 --- linux-2.6.39.4/scripts/mod/modpost.h 2011-05-19 00:06:34.000000000 -0400
68780 +++ linux-2.6.39.4/scripts/mod/modpost.h 2011-08-05 19:44:37.000000000 -0400
68781 @@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e
68782
68783 struct buffer {
68784 char *p;
68785 - int pos;
68786 - int size;
68787 + unsigned int pos;
68788 + unsigned int size;
68789 };
68790
68791 void __attribute__((format(printf, 2, 3)))
68792 buf_printf(struct buffer *buf, const char *fmt, ...);
68793
68794 void
68795 -buf_write(struct buffer *buf, const char *s, int len);
68796 +buf_write(struct buffer *buf, const char *s, unsigned int len);
68797
68798 struct module {
68799 struct module *next;
68800 diff -urNp linux-2.6.39.4/scripts/mod/sumversion.c linux-2.6.39.4/scripts/mod/sumversion.c
68801 --- linux-2.6.39.4/scripts/mod/sumversion.c 2011-05-19 00:06:34.000000000 -0400
68802 +++ linux-2.6.39.4/scripts/mod/sumversion.c 2011-08-05 19:44:37.000000000 -0400
68803 @@ -470,7 +470,7 @@ static void write_version(const char *fi
68804 goto out;
68805 }
68806
68807 - if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) {
68808 + if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) {
68809 warn("writing sum in %s failed: %s\n",
68810 filename, strerror(errno));
68811 goto out;
68812 diff -urNp linux-2.6.39.4/scripts/pnmtologo.c linux-2.6.39.4/scripts/pnmtologo.c
68813 --- linux-2.6.39.4/scripts/pnmtologo.c 2011-05-19 00:06:34.000000000 -0400
68814 +++ linux-2.6.39.4/scripts/pnmtologo.c 2011-08-05 19:44:37.000000000 -0400
68815 @@ -237,14 +237,14 @@ static void write_header(void)
68816 fprintf(out, " * Linux logo %s\n", logoname);
68817 fputs(" */\n\n", out);
68818 fputs("#include <linux/linux_logo.h>\n\n", out);
68819 - fprintf(out, "static unsigned char %s_data[] __initdata = {\n",
68820 + fprintf(out, "static unsigned char %s_data[] = {\n",
68821 logoname);
68822 }
68823
68824 static void write_footer(void)
68825 {
68826 fputs("\n};\n\n", out);
68827 - fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname);
68828 + fprintf(out, "const struct linux_logo %s = {\n", logoname);
68829 fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]);
68830 fprintf(out, "\t.width\t\t= %d,\n", logo_width);
68831 fprintf(out, "\t.height\t\t= %d,\n", logo_height);
68832 @@ -374,7 +374,7 @@ static void write_logo_clut224(void)
68833 fputs("\n};\n\n", out);
68834
68835 /* write logo clut */
68836 - fprintf(out, "static unsigned char %s_clut[] __initdata = {\n",
68837 + fprintf(out, "static unsigned char %s_clut[] = {\n",
68838 logoname);
68839 write_hex_cnt = 0;
68840 for (i = 0; i < logo_clutsize; i++) {
68841 diff -urNp linux-2.6.39.4/security/apparmor/lsm.c linux-2.6.39.4/security/apparmor/lsm.c
68842 --- linux-2.6.39.4/security/apparmor/lsm.c 2011-06-25 12:55:23.000000000 -0400
68843 +++ linux-2.6.39.4/security/apparmor/lsm.c 2011-08-05 20:34:06.000000000 -0400
68844 @@ -621,7 +621,7 @@ static int apparmor_task_setrlimit(struc
68845 return error;
68846 }
68847
68848 -static struct security_operations apparmor_ops = {
68849 +static struct security_operations apparmor_ops __read_only = {
68850 .name = "apparmor",
68851
68852 .ptrace_access_check = apparmor_ptrace_access_check,
68853 diff -urNp linux-2.6.39.4/security/commoncap.c linux-2.6.39.4/security/commoncap.c
68854 --- linux-2.6.39.4/security/commoncap.c 2011-05-19 00:06:34.000000000 -0400
68855 +++ linux-2.6.39.4/security/commoncap.c 2011-08-05 19:44:37.000000000 -0400
68856 @@ -28,6 +28,7 @@
68857 #include <linux/prctl.h>
68858 #include <linux/securebits.h>
68859 #include <linux/user_namespace.h>
68860 +#include <net/sock.h>
68861
68862 /*
68863 * If a non-root user executes a setuid-root binary in
68864 @@ -58,7 +59,7 @@ int cap_netlink_send(struct sock *sk, st
68865
68866 int cap_netlink_recv(struct sk_buff *skb, int cap)
68867 {
68868 - if (!cap_raised(current_cap(), cap))
68869 + if (!cap_raised(current_cap(), cap) || !gr_is_capable(cap))
68870 return -EPERM;
68871 return 0;
68872 }
68873 @@ -580,6 +581,9 @@ int cap_bprm_secureexec(struct linux_bin
68874 {
68875 const struct cred *cred = current_cred();
68876
68877 + if (gr_acl_enable_at_secure())
68878 + return 1;
68879 +
68880 if (cred->uid != 0) {
68881 if (bprm->cap_effective)
68882 return 1;
68883 diff -urNp linux-2.6.39.4/security/integrity/ima/ima_api.c linux-2.6.39.4/security/integrity/ima/ima_api.c
68884 --- linux-2.6.39.4/security/integrity/ima/ima_api.c 2011-05-19 00:06:34.000000000 -0400
68885 +++ linux-2.6.39.4/security/integrity/ima/ima_api.c 2011-08-05 19:44:37.000000000 -0400
68886 @@ -75,7 +75,7 @@ void ima_add_violation(struct inode *ino
68887 int result;
68888
68889 /* can overflow, only indicator */
68890 - atomic_long_inc(&ima_htable.violations);
68891 + atomic_long_inc_unchecked(&ima_htable.violations);
68892
68893 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
68894 if (!entry) {
68895 diff -urNp linux-2.6.39.4/security/integrity/ima/ima_fs.c linux-2.6.39.4/security/integrity/ima/ima_fs.c
68896 --- linux-2.6.39.4/security/integrity/ima/ima_fs.c 2011-05-19 00:06:34.000000000 -0400
68897 +++ linux-2.6.39.4/security/integrity/ima/ima_fs.c 2011-08-05 19:44:37.000000000 -0400
68898 @@ -28,12 +28,12 @@
68899 static int valid_policy = 1;
68900 #define TMPBUFLEN 12
68901 static ssize_t ima_show_htable_value(char __user *buf, size_t count,
68902 - loff_t *ppos, atomic_long_t *val)
68903 + loff_t *ppos, atomic_long_unchecked_t *val)
68904 {
68905 char tmpbuf[TMPBUFLEN];
68906 ssize_t len;
68907
68908 - len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val));
68909 + len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val));
68910 return simple_read_from_buffer(buf, count, ppos, tmpbuf, len);
68911 }
68912
68913 diff -urNp linux-2.6.39.4/security/integrity/ima/ima.h linux-2.6.39.4/security/integrity/ima/ima.h
68914 --- linux-2.6.39.4/security/integrity/ima/ima.h 2011-05-19 00:06:34.000000000 -0400
68915 +++ linux-2.6.39.4/security/integrity/ima/ima.h 2011-08-05 19:44:37.000000000 -0400
68916 @@ -85,8 +85,8 @@ void ima_add_violation(struct inode *ino
68917 extern spinlock_t ima_queue_lock;
68918
68919 struct ima_h_table {
68920 - atomic_long_t len; /* number of stored measurements in the list */
68921 - atomic_long_t violations;
68922 + atomic_long_unchecked_t len; /* number of stored measurements in the list */
68923 + atomic_long_unchecked_t violations;
68924 struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE];
68925 };
68926 extern struct ima_h_table ima_htable;
68927 diff -urNp linux-2.6.39.4/security/integrity/ima/ima_queue.c linux-2.6.39.4/security/integrity/ima/ima_queue.c
68928 --- linux-2.6.39.4/security/integrity/ima/ima_queue.c 2011-05-19 00:06:34.000000000 -0400
68929 +++ linux-2.6.39.4/security/integrity/ima/ima_queue.c 2011-08-05 19:44:37.000000000 -0400
68930 @@ -79,7 +79,7 @@ static int ima_add_digest_entry(struct i
68931 INIT_LIST_HEAD(&qe->later);
68932 list_add_tail_rcu(&qe->later, &ima_measurements);
68933
68934 - atomic_long_inc(&ima_htable.len);
68935 + atomic_long_inc_unchecked(&ima_htable.len);
68936 key = ima_hash_key(entry->digest);
68937 hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]);
68938 return 0;
68939 diff -urNp linux-2.6.39.4/security/Kconfig linux-2.6.39.4/security/Kconfig
68940 --- linux-2.6.39.4/security/Kconfig 2011-05-19 00:06:34.000000000 -0400
68941 +++ linux-2.6.39.4/security/Kconfig 2011-08-05 19:44:37.000000000 -0400
68942 @@ -4,6 +4,554 @@
68943
68944 menu "Security options"
68945
68946 +source grsecurity/Kconfig
68947 +
68948 +menu "PaX"
68949 +
68950 + config ARCH_TRACK_EXEC_LIMIT
68951 + bool
68952 +
68953 + config PAX_PER_CPU_PGD
68954 + bool
68955 +
68956 + config TASK_SIZE_MAX_SHIFT
68957 + int
68958 + depends on X86_64
68959 + default 47 if !PAX_PER_CPU_PGD
68960 + default 42 if PAX_PER_CPU_PGD
68961 +
68962 + config PAX_ENABLE_PAE
68963 + bool
68964 + default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM))
68965 +
68966 +config PAX
68967 + bool "Enable various PaX features"
68968 + depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86)
68969 + help
68970 + This allows you to enable various PaX features. PaX adds
68971 + intrusion prevention mechanisms to the kernel that reduce
68972 + the risks posed by exploitable memory corruption bugs.
68973 +
68974 +menu "PaX Control"
68975 + depends on PAX
68976 +
68977 +config PAX_SOFTMODE
68978 + bool 'Support soft mode'
68979 + select PAX_PT_PAX_FLAGS
68980 + help
68981 + Enabling this option will allow you to run PaX in soft mode, that
68982 + is, PaX features will not be enforced by default, only on executables
68983 + marked explicitly. You must also enable PT_PAX_FLAGS support as it
68984 + is the only way to mark executables for soft mode use.
68985 +
68986 + Soft mode can be activated by using the "pax_softmode=1" kernel command
68987 + line option on boot. Furthermore you can control various PaX features
68988 + at runtime via the entries in /proc/sys/kernel/pax.
68989 +
68990 +config PAX_EI_PAX
68991 + bool 'Use legacy ELF header marking'
68992 + help
68993 + Enabling this option will allow you to control PaX features on
68994 + a per executable basis via the 'chpax' utility available at
68995 + http://pax.grsecurity.net/. The control flags will be read from
68996 + an otherwise reserved part of the ELF header. This marking has
68997 + numerous drawbacks (no support for soft-mode, toolchain does not
68998 + know about the non-standard use of the ELF header) therefore it
68999 + has been deprecated in favour of PT_PAX_FLAGS support.
69000 +
69001 + Note that if you enable PT_PAX_FLAGS marking support as well,
69002 + the PT_PAX_FLAG marks will override the legacy EI_PAX marks.
69003 +
69004 +config PAX_PT_PAX_FLAGS
69005 + bool 'Use ELF program header marking'
69006 + help
69007 + Enabling this option will allow you to control PaX features on
69008 + a per executable basis via the 'paxctl' utility available at
69009 + http://pax.grsecurity.net/. The control flags will be read from
69010 + a PaX specific ELF program header (PT_PAX_FLAGS). This marking
69011 + has the benefits of supporting both soft mode and being fully
69012 + integrated into the toolchain (the binutils patch is available
69013 + from http://pax.grsecurity.net).
69014 +
69015 + If your toolchain does not support PT_PAX_FLAGS markings,
69016 + you can create one in most cases with 'paxctl -C'.
69017 +
69018 + Note that if you enable the legacy EI_PAX marking support as well,
69019 + the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks.
69020 +
69021 +choice
69022 + prompt 'MAC system integration'
69023 + default PAX_HAVE_ACL_FLAGS
69024 + help
69025 + Mandatory Access Control systems have the option of controlling
69026 + PaX flags on a per executable basis, choose the method supported
69027 + by your particular system.
69028 +
69029 + - "none": if your MAC system does not interact with PaX,
69030 + - "direct": if your MAC system defines pax_set_initial_flags() itself,
69031 + - "hook": if your MAC system uses the pax_set_initial_flags_func callback.
69032 +
69033 + NOTE: this option is for developers/integrators only.
69034 +
69035 + config PAX_NO_ACL_FLAGS
69036 + bool 'none'
69037 +
69038 + config PAX_HAVE_ACL_FLAGS
69039 + bool 'direct'
69040 +
69041 + config PAX_HOOK_ACL_FLAGS
69042 + bool 'hook'
69043 +endchoice
69044 +
69045 +endmenu
69046 +
69047 +menu "Non-executable pages"
69048 + depends on PAX
69049 +
69050 +config PAX_NOEXEC
69051 + bool "Enforce non-executable pages"
69052 + depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86)
69053 + help
69054 + By design some architectures do not allow for protecting memory
69055 + pages against execution or even if they do, Linux does not make
69056 + use of this feature. In practice this means that if a page is
69057 + readable (such as the stack or heap) it is also executable.
69058 +
69059 + There is a well known exploit technique that makes use of this
69060 + fact and a common programming mistake where an attacker can
69061 + introduce code of his choice somewhere in the attacked program's
69062 + memory (typically the stack or the heap) and then execute it.
69063 +
69064 + If the attacked program was running with different (typically
69065 + higher) privileges than that of the attacker, then he can elevate
69066 + his own privilege level (e.g. get a root shell, write to files for
69067 + which he does not have write access to, etc).
69068 +
69069 + Enabling this option will let you choose from various features
69070 + that prevent the injection and execution of 'foreign' code in
69071 + a program.
69072 +
69073 + This will also break programs that rely on the old behaviour and
69074 + expect that dynamically allocated memory via the malloc() family
69075 + of functions is executable (which it is not). Notable examples
69076 + are the XFree86 4.x server, the java runtime and wine.
69077 +
69078 +config PAX_PAGEEXEC
69079 + bool "Paging based non-executable pages"
69080 + depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7)
69081 + select S390_SWITCH_AMODE if S390
69082 + select S390_EXEC_PROTECT if S390
69083 + select ARCH_TRACK_EXEC_LIMIT if X86_32
69084 + help
69085 + This implementation is based on the paging feature of the CPU.
69086 + On i386 without hardware non-executable bit support there is a
69087 + variable but usually low performance impact, however on Intel's
69088 + P4 core based CPUs it is very high so you should not enable this
69089 + for kernels meant to be used on such CPUs.
69090 +
69091 + On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386
69092 + with hardware non-executable bit support there is no performance
69093 + impact, on ppc the impact is negligible.
69094 +
69095 + Note that several architectures require various emulations due to
69096 + badly designed userland ABIs, this will cause a performance impact
69097 + but will disappear as soon as userland is fixed. For example, ppc
69098 + userland MUST have been built with secure-plt by a recent toolchain.
69099 +
69100 +config PAX_SEGMEXEC
69101 + bool "Segmentation based non-executable pages"
69102 + depends on PAX_NOEXEC && X86_32
69103 + help
69104 + This implementation is based on the segmentation feature of the
69105 + CPU and has a very small performance impact, however applications
69106 + will be limited to a 1.5 GB address space instead of the normal
69107 + 3 GB.
69108 +
69109 +config PAX_EMUTRAMP
69110 + bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86)
69111 + default y if PARISC
69112 + help
69113 + There are some programs and libraries that for one reason or
69114 + another attempt to execute special small code snippets from
69115 + non-executable memory pages. Most notable examples are the
69116 + signal handler return code generated by the kernel itself and
69117 + the GCC trampolines.
69118 +
69119 + If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
69120 + such programs will no longer work under your kernel.
69121 +
69122 + As a remedy you can say Y here and use the 'chpax' or 'paxctl'
69123 + utilities to enable trampoline emulation for the affected programs
69124 + yet still have the protection provided by the non-executable pages.
69125 +
69126 + On parisc you MUST enable this option and EMUSIGRT as well, otherwise
69127 + your system will not even boot.
69128 +
69129 + Alternatively you can say N here and use the 'chpax' or 'paxctl'
69130 + utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
69131 + for the affected files.
69132 +
69133 + NOTE: enabling this feature *may* open up a loophole in the
69134 + protection provided by non-executable pages that an attacker
69135 + could abuse. Therefore the best solution is to not have any
69136 + files on your system that would require this option. This can
69137 + be achieved by not using libc5 (which relies on the kernel
69138 + signal handler return code) and not using or rewriting programs
69139 + that make use of the nested function implementation of GCC.
69140 + Skilled users can just fix GCC itself so that it implements
69141 + nested function calls in a way that does not interfere with PaX.
69142 +
69143 +config PAX_EMUSIGRT
69144 + bool "Automatically emulate sigreturn trampolines"
69145 + depends on PAX_EMUTRAMP && PARISC
69146 + default y
69147 + help
69148 + Enabling this option will have the kernel automatically detect
69149 + and emulate signal return trampolines executing on the stack
69150 + that would otherwise lead to task termination.
69151 +
69152 + This solution is intended as a temporary one for users with
69153 + legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
69154 + Modula-3 runtime, etc) or executables linked to such, basically
69155 + everything that does not specify its own SA_RESTORER function in
69156 + normal executable memory like glibc 2.1+ does.
69157 +
69158 + On parisc you MUST enable this option, otherwise your system will
69159 + not even boot.
69160 +
69161 + NOTE: this feature cannot be disabled on a per executable basis
69162 + and since it *does* open up a loophole in the protection provided
69163 + by non-executable pages, the best solution is to not have any
69164 + files on your system that would require this option.
69165 +
69166 +config PAX_MPROTECT
69167 + bool "Restrict mprotect()"
69168 + depends on (PAX_PAGEEXEC || PAX_SEGMEXEC)
69169 + help
69170 + Enabling this option will prevent programs from
69171 + - changing the executable status of memory pages that were
69172 + not originally created as executable,
69173 + - making read-only executable pages writable again,
69174 + - creating executable pages from anonymous memory,
69175 + - making read-only-after-relocations (RELRO) data pages writable again.
69176 +
69177 + You should say Y here to complete the protection provided by
69178 + the enforcement of non-executable pages.
69179 +
69180 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
69181 + this feature on a per file basis.
69182 +
69183 +config PAX_MPROTECT_COMPAT
69184 + bool "Use legacy/compat protection demoting (read help)"
69185 + depends on PAX_MPROTECT
69186 + default n
69187 + help
69188 + The current implementation of PAX_MPROTECT denies RWX allocations/mprotects
69189 + by sending the proper error code to the application. For some broken
69190 + userland, this can cause problems with Python or other applications. The
69191 + current implementation however allows for applications like clamav to
69192 + detect if JIT compilation/execution is allowed and to fall back gracefully
69193 + to an interpreter-based mode if it does not. While we encourage everyone
69194 + to use the current implementation as-is and push upstream to fix broken
69195 + userland (note that the RWX logging option can assist with this), in some
69196 + environments this may not be possible. Having to disable MPROTECT
69197 + completely on certain binaries reduces the security benefit of PaX,
69198 + so this option is provided for those environments to revert to the old
69199 + behavior.
69200 +
69201 +config PAX_ELFRELOCS
69202 + bool "Allow ELF text relocations (read help)"
69203 + depends on PAX_MPROTECT
69204 + default n
69205 + help
69206 + Non-executable pages and mprotect() restrictions are effective
69207 + in preventing the introduction of new executable code into an
69208 + attacked task's address space. There remain only two venues
69209 + for this kind of attack: if the attacker can execute already
69210 + existing code in the attacked task then he can either have it
69211 + create and mmap() a file containing his code or have it mmap()
69212 + an already existing ELF library that does not have position
69213 + independent code in it and use mprotect() on it to make it
69214 + writable and copy his code there. While protecting against
69215 + the former approach is beyond PaX, the latter can be prevented
69216 + by having only PIC ELF libraries on one's system (which do not
69217 + need to relocate their code). If you are sure this is your case,
69218 + as is the case with all modern Linux distributions, then leave
69219 + this option disabled. You should say 'n' here.
69220 +
69221 +config PAX_ETEXECRELOCS
69222 + bool "Allow ELF ET_EXEC text relocations"
69223 + depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
69224 + select PAX_ELFRELOCS
69225 + default y
69226 + help
69227 + On some architectures there are incorrectly created applications
69228 + that require text relocations and would not work without enabling
69229 + this option. If you are an alpha, ia64 or parisc user, you should
69230 + enable this option and disable it once you have made sure that
69231 + none of your applications need it.
69232 +
69233 +config PAX_EMUPLT
69234 + bool "Automatically emulate ELF PLT"
69235 + depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC)
69236 + default y
69237 + help
69238 + Enabling this option will have the kernel automatically detect
69239 + and emulate the Procedure Linkage Table entries in ELF files.
69240 + On some architectures such entries are in writable memory, and
69241 + become non-executable leading to task termination. Therefore
69242 + it is mandatory that you enable this option on alpha, parisc,
69243 + sparc and sparc64, otherwise your system would not even boot.
69244 +
69245 + NOTE: this feature *does* open up a loophole in the protection
69246 + provided by the non-executable pages, therefore the proper
69247 + solution is to modify the toolchain to produce a PLT that does
69248 + not need to be writable.
69249 +
69250 +config PAX_DLRESOLVE
69251 + bool 'Emulate old glibc resolver stub'
69252 + depends on PAX_EMUPLT && SPARC
69253 + default n
69254 + help
69255 + This option is needed if userland has an old glibc (before 2.4)
69256 + that puts a 'save' instruction into the runtime generated resolver
69257 + stub that needs special emulation.
69258 +
69259 +config PAX_KERNEXEC
69260 + bool "Enforce non-executable kernel pages"
69261 + depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN
69262 + select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE)
69263 + help
69264 + This is the kernel land equivalent of PAGEEXEC and MPROTECT,
69265 + that is, enabling this option will make it harder to inject
69266 + and execute 'foreign' code in kernel memory itself.
69267 +
69268 + Note that on x86_64 kernels there is a known regression when
69269 + this feature and KVM/VMX are both enabled in the host kernel.
69270 +
69271 +config PAX_KERNEXEC_MODULE_TEXT
69272 + int "Minimum amount of memory reserved for module code"
69273 + default "4"
69274 + depends on PAX_KERNEXEC && X86_32 && MODULES
69275 + help
69276 + Due to implementation details the kernel must reserve a fixed
69277 + amount of memory for module code at compile time that cannot be
69278 + changed at runtime. Here you can specify the minimum amount
69279 + in MB that will be reserved. Due to the same implementation
69280 + details this size will always be rounded up to the next 2/4 MB
69281 + boundary (depends on PAE) so the actually available memory for
69282 + module code will usually be more than this minimum.
69283 +
69284 + The default 4 MB should be enough for most users but if you have
69285 + an excessive number of modules (e.g., most distribution configs
69286 + compile many drivers as modules) or use huge modules such as
69287 + nvidia's kernel driver, you will need to adjust this amount.
69288 + A good rule of thumb is to look at your currently loaded kernel
69289 + modules and add up their sizes.
69290 +
69291 +endmenu
69292 +
69293 +menu "Address Space Layout Randomization"
69294 + depends on PAX
69295 +
69296 +config PAX_ASLR
69297 + bool "Address Space Layout Randomization"
69298 + depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
69299 + help
69300 + Many if not most exploit techniques rely on the knowledge of
69301 + certain addresses in the attacked program. The following options
69302 + will allow the kernel to apply a certain amount of randomization
69303 + to specific parts of the program thereby forcing an attacker to
69304 + guess them in most cases. Any failed guess will most likely crash
69305 + the attacked program which allows the kernel to detect such attempts
69306 + and react on them. PaX itself provides no reaction mechanisms,
69307 + instead it is strongly encouraged that you make use of Nergal's
69308 + segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
69309 + (http://www.grsecurity.net/) built-in crash detection features or
69310 + develop one yourself.
69311 +
69312 + By saying Y here you can choose to randomize the following areas:
69313 + - top of the task's kernel stack
69314 + - top of the task's userland stack
69315 + - base address for mmap() requests that do not specify one
69316 + (this includes all libraries)
69317 + - base address of the main executable
69318 +
69319 + It is strongly recommended to say Y here as address space layout
69320 + randomization has negligible impact on performance yet it provides
69321 + a very effective protection.
69322 +
69323 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control
69324 + this feature on a per file basis.
69325 +
69326 +config PAX_RANDKSTACK
69327 + bool "Randomize kernel stack base"
69328 + depends on PAX_ASLR && X86_TSC && X86
69329 + help
69330 + By saying Y here the kernel will randomize every task's kernel
69331 + stack on every system call. This will not only force an attacker
69332 + to guess it but also prevent him from making use of possible
69333 + leaked information about it.
69334 +
69335 + Since the kernel stack is a rather scarce resource, randomization
69336 + may cause unexpected stack overflows, therefore you should very
69337 + carefully test your system. Note that once enabled in the kernel
69338 + configuration, this feature cannot be disabled on a per file basis.
69339 +
69340 +config PAX_RANDUSTACK
69341 + bool "Randomize user stack base"
69342 + depends on PAX_ASLR
69343 + help
69344 + By saying Y here the kernel will randomize every task's userland
69345 + stack. The randomization is done in two steps where the second
69346 + one may apply a big amount of shift to the top of the stack and
69347 + cause problems for programs that want to use lots of memory (more
69348 + than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
69349 + For this reason the second step can be controlled by 'chpax' or
69350 + 'paxctl' on a per file basis.
69351 +
69352 +config PAX_RANDMMAP
69353 + bool "Randomize mmap() base"
69354 + depends on PAX_ASLR
69355 + help
69356 + By saying Y here the kernel will use a randomized base address for
69357 + mmap() requests that do not specify one themselves. As a result
69358 + all dynamically loaded libraries will appear at random addresses
69359 + and therefore be harder to exploit by a technique where an attacker
69360 + attempts to execute library code for his purposes (e.g. spawn a
69361 + shell from an exploited program that is running at an elevated
69362 + privilege level).
69363 +
69364 + Furthermore, if a program is relinked as a dynamic ELF file, its
69365 + base address will be randomized as well, completing the full
69366 + randomization of the address space layout. Attacking such programs
69367 + becomes a guess game. You can find an example of doing this at
69368 + http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
69369 + http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
69370 +
69371 + NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
69372 + feature on a per file basis.
69373 +
69374 +endmenu
69375 +
69376 +menu "Miscellaneous hardening features"
69377 +
69378 +config PAX_MEMORY_SANITIZE
69379 + bool "Sanitize all freed memory"
69380 + help
69381 + By saying Y here the kernel will erase memory pages as soon as they
69382 + are freed. This in turn reduces the lifetime of data stored in the
69383 + pages, making it less likely that sensitive information such as
69384 + passwords, cryptographic secrets, etc stay in memory for too long.
69385 +
69386 + This is especially useful for programs whose runtime is short, long
69387 + lived processes and the kernel itself benefit from this as long as
69388 + they operate on whole memory pages and ensure timely freeing of pages
69389 + that may hold sensitive information.
69390 +
69391 + The tradeoff is performance impact, on a single CPU system kernel
69392 + compilation sees a 3% slowdown, other systems and workloads may vary
69393 + and you are advised to test this feature on your expected workload
69394 + before deploying it.
69395 +
69396 + Note that this feature does not protect data stored in live pages,
69397 + e.g., process memory swapped to disk may stay there for a long time.
69398 +
69399 +config PAX_MEMORY_STACKLEAK
69400 + bool "Sanitize kernel stack"
69401 + depends on X86
69402 + help
69403 + By saying Y here the kernel will erase the kernel stack before it
69404 + returns from a system call. This in turn reduces the information
69405 + that a kernel stack leak bug can reveal.
69406 +
69407 + Note that such a bug can still leak information that was put on
69408 + the stack by the current system call (the one eventually triggering
69409 + the bug) but traces of earlier system calls on the kernel stack
69410 + cannot leak anymore.
69411 +
69412 + The tradeoff is performance impact: on a single CPU system kernel
69413 + compilation sees a 1% slowdown, other systems and workloads may vary
69414 + and you are advised to test this feature on your expected workload
69415 + before deploying it.
69416 +
69417 + Note: full support for this feature requires gcc with plugin support
69418 + so make sure your compiler is at least gcc 4.5.0 (cross compilation
69419 + is not supported). Using older gcc versions means that functions
69420 + with large enough stack frames may leave uninitialized memory behind
69421 + that may be exposed to a later syscall leaking the stack.
69422 +
69423 +config PAX_MEMORY_UDEREF
69424 + bool "Prevent invalid userland pointer dereference"
69425 + depends on X86 && !UML_X86 && !XEN
69426 + select PAX_PER_CPU_PGD if X86_64
69427 + help
69428 + By saying Y here the kernel will be prevented from dereferencing
69429 + userland pointers in contexts where the kernel expects only kernel
69430 + pointers. This is both a useful runtime debugging feature and a
69431 + security measure that prevents exploiting a class of kernel bugs.
69432 +
69433 + The tradeoff is that some virtualization solutions may experience
69434 + a huge slowdown and therefore you should not enable this feature
69435 + for kernels meant to run in such environments. Whether a given VM
69436 + solution is affected or not is best determined by simply trying it
69437 + out, the performance impact will be obvious right on boot as this
69438 + mechanism engages from very early on. A good rule of thumb is that
69439 + VMs running on CPUs without hardware virtualization support (i.e.,
69440 + the majority of IA-32 CPUs) will likely experience the slowdown.
69441 +
69442 +config PAX_REFCOUNT
69443 + bool "Prevent various kernel object reference counter overflows"
69444 + depends on GRKERNSEC && (X86 || SPARC64)
69445 + help
69446 + By saying Y here the kernel will detect and prevent overflowing
69447 + various (but not all) kinds of object reference counters. Such
69448 + overflows can normally occur due to bugs only and are often, if
69449 + not always, exploitable.
69450 +
69451 + The tradeoff is that data structures protected by an overflowed
69452 + refcount will never be freed and therefore will leak memory. Note
69453 + that this leak also happens even without this protection but in
69454 + that case the overflow can eventually trigger the freeing of the
69455 + data structure while it is still being used elsewhere, resulting
69456 + in the exploitable situation that this feature prevents.
69457 +
69458 + Since this has a negligible performance impact, you should enable
69459 + this feature.
69460 +
69461 +config PAX_USERCOPY
69462 + bool "Harden heap object copies between kernel and userland"
69463 + depends on X86 || PPC || SPARC || ARM
69464 + depends on GRKERNSEC && (SLAB || SLUB || SLOB)
69465 + help
69466 + By saying Y here the kernel will enforce the size of heap objects
69467 + when they are copied in either direction between the kernel and
69468 + userland, even if only a part of the heap object is copied.
69469 +
69470 + Specifically, this checking prevents information leaking from the
69471 + kernel heap during kernel to userland copies (if the kernel heap
69472 + object is otherwise fully initialized) and prevents kernel heap
69473 + overflows during userland to kernel copies.
69474 +
69475 + Note that the current implementation provides the strictest bounds
69476 + checks for the SLUB allocator.
69477 +
69478 + Enabling this option also enables per-slab cache protection against
69479 + data in a given cache being copied into/out of via userland
69480 + accessors. Though the whitelist of regions will be reduced over
69481 + time, it notably protects important data structures like task structs.
69482 +
69483 + If frame pointers are enabled on x86, this option will also restrict
69484 + copies into and out of the kernel stack to local variables within a
69485 + single frame.
69486 +
69487 + Since this has a negligible performance impact, you should enable
69488 + this feature.
69489 +
69490 +endmenu
69491 +
69492 +endmenu
69493 +
69494 config KEYS
69495 bool "Enable access key retention support"
69496 help
69497 @@ -167,7 +715,7 @@ config INTEL_TXT
69498 config LSM_MMAP_MIN_ADDR
69499 int "Low address space for LSM to protect from user allocation"
69500 depends on SECURITY && SECURITY_SELINUX
69501 - default 65536
69502 + default 32768
69503 help
69504 This is the portion of low virtual memory which should be protected
69505 from userspace allocation. Keeping a user from writing to low pages
69506 diff -urNp linux-2.6.39.4/security/keys/keyring.c linux-2.6.39.4/security/keys/keyring.c
69507 --- linux-2.6.39.4/security/keys/keyring.c 2011-05-19 00:06:34.000000000 -0400
69508 +++ linux-2.6.39.4/security/keys/keyring.c 2011-08-05 19:44:37.000000000 -0400
69509 @@ -213,15 +213,15 @@ static long keyring_read(const struct ke
69510 ret = -EFAULT;
69511
69512 for (loop = 0; loop < klist->nkeys; loop++) {
69513 + key_serial_t serial;
69514 key = klist->keys[loop];
69515 + serial = key->serial;
69516
69517 tmp = sizeof(key_serial_t);
69518 if (tmp > buflen)
69519 tmp = buflen;
69520
69521 - if (copy_to_user(buffer,
69522 - &key->serial,
69523 - tmp) != 0)
69524 + if (copy_to_user(buffer, &serial, tmp))
69525 goto error;
69526
69527 buflen -= tmp;
69528 diff -urNp linux-2.6.39.4/security/min_addr.c linux-2.6.39.4/security/min_addr.c
69529 --- linux-2.6.39.4/security/min_addr.c 2011-05-19 00:06:34.000000000 -0400
69530 +++ linux-2.6.39.4/security/min_addr.c 2011-08-05 19:44:37.000000000 -0400
69531 @@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG
69532 */
69533 static void update_mmap_min_addr(void)
69534 {
69535 +#ifndef SPARC
69536 #ifdef CONFIG_LSM_MMAP_MIN_ADDR
69537 if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR)
69538 mmap_min_addr = dac_mmap_min_addr;
69539 @@ -22,6 +23,7 @@ static void update_mmap_min_addr(void)
69540 #else
69541 mmap_min_addr = dac_mmap_min_addr;
69542 #endif
69543 +#endif
69544 }
69545
69546 /*
69547 diff -urNp linux-2.6.39.4/security/security.c linux-2.6.39.4/security/security.c
69548 --- linux-2.6.39.4/security/security.c 2011-05-19 00:06:34.000000000 -0400
69549 +++ linux-2.6.39.4/security/security.c 2011-08-05 19:44:37.000000000 -0400
69550 @@ -25,8 +25,8 @@ static __initdata char chosen_lsm[SECURI
69551 /* things that live in capability.c */
69552 extern void __init security_fixup_ops(struct security_operations *ops);
69553
69554 -static struct security_operations *security_ops;
69555 -static struct security_operations default_security_ops = {
69556 +static struct security_operations *security_ops __read_only;
69557 +static struct security_operations default_security_ops __read_only = {
69558 .name = "default",
69559 };
69560
69561 @@ -67,7 +67,9 @@ int __init security_init(void)
69562
69563 void reset_security_ops(void)
69564 {
69565 + pax_open_kernel();
69566 security_ops = &default_security_ops;
69567 + pax_close_kernel();
69568 }
69569
69570 /* Save user chosen LSM */
69571 diff -urNp linux-2.6.39.4/security/selinux/hooks.c linux-2.6.39.4/security/selinux/hooks.c
69572 --- linux-2.6.39.4/security/selinux/hooks.c 2011-05-19 00:06:34.000000000 -0400
69573 +++ linux-2.6.39.4/security/selinux/hooks.c 2011-08-05 19:44:37.000000000 -0400
69574 @@ -93,7 +93,6 @@
69575 #define NUM_SEL_MNT_OPTS 5
69576
69577 extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
69578 -extern struct security_operations *security_ops;
69579
69580 /* SECMARK reference count */
69581 atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
69582 @@ -5431,7 +5430,7 @@ static int selinux_key_getsecurity(struc
69583
69584 #endif
69585
69586 -static struct security_operations selinux_ops = {
69587 +static struct security_operations selinux_ops __read_only = {
69588 .name = "selinux",
69589
69590 .ptrace_access_check = selinux_ptrace_access_check,
69591 diff -urNp linux-2.6.39.4/security/selinux/include/xfrm.h linux-2.6.39.4/security/selinux/include/xfrm.h
69592 --- linux-2.6.39.4/security/selinux/include/xfrm.h 2011-05-19 00:06:34.000000000 -0400
69593 +++ linux-2.6.39.4/security/selinux/include/xfrm.h 2011-08-05 19:44:37.000000000 -0400
69594 @@ -48,7 +48,7 @@ int selinux_xfrm_decode_session(struct s
69595
69596 static inline void selinux_xfrm_notify_policyload(void)
69597 {
69598 - atomic_inc(&flow_cache_genid);
69599 + atomic_inc_unchecked(&flow_cache_genid);
69600 }
69601 #else
69602 static inline int selinux_xfrm_enabled(void)
69603 diff -urNp linux-2.6.39.4/security/selinux/ss/services.c linux-2.6.39.4/security/selinux/ss/services.c
69604 --- linux-2.6.39.4/security/selinux/ss/services.c 2011-05-19 00:06:34.000000000 -0400
69605 +++ linux-2.6.39.4/security/selinux/ss/services.c 2011-08-05 19:44:37.000000000 -0400
69606 @@ -1806,6 +1806,8 @@ int security_load_policy(void *data, siz
69607 int rc = 0;
69608 struct policy_file file = { data, len }, *fp = &file;
69609
69610 + pax_track_stack();
69611 +
69612 if (!ss_initialized) {
69613 avtab_cache_init();
69614 rc = policydb_read(&policydb, fp);
69615 diff -urNp linux-2.6.39.4/security/smack/smack_lsm.c linux-2.6.39.4/security/smack/smack_lsm.c
69616 --- linux-2.6.39.4/security/smack/smack_lsm.c 2011-05-19 00:06:34.000000000 -0400
69617 +++ linux-2.6.39.4/security/smack/smack_lsm.c 2011-08-05 19:44:37.000000000 -0400
69618 @@ -3386,7 +3386,7 @@ static int smack_inode_getsecctx(struct
69619 return 0;
69620 }
69621
69622 -struct security_operations smack_ops = {
69623 +struct security_operations smack_ops __read_only = {
69624 .name = "smack",
69625
69626 .ptrace_access_check = smack_ptrace_access_check,
69627 diff -urNp linux-2.6.39.4/security/tomoyo/tomoyo.c linux-2.6.39.4/security/tomoyo/tomoyo.c
69628 --- linux-2.6.39.4/security/tomoyo/tomoyo.c 2011-05-19 00:06:34.000000000 -0400
69629 +++ linux-2.6.39.4/security/tomoyo/tomoyo.c 2011-08-05 19:44:37.000000000 -0400
69630 @@ -240,7 +240,7 @@ static int tomoyo_sb_pivotroot(struct pa
69631 * tomoyo_security_ops is a "struct security_operations" which is used for
69632 * registering TOMOYO.
69633 */
69634 -static struct security_operations tomoyo_security_ops = {
69635 +static struct security_operations tomoyo_security_ops __read_only = {
69636 .name = "tomoyo",
69637 .cred_alloc_blank = tomoyo_cred_alloc_blank,
69638 .cred_prepare = tomoyo_cred_prepare,
69639 diff -urNp linux-2.6.39.4/sound/aoa/codecs/onyx.c linux-2.6.39.4/sound/aoa/codecs/onyx.c
69640 --- linux-2.6.39.4/sound/aoa/codecs/onyx.c 2011-05-19 00:06:34.000000000 -0400
69641 +++ linux-2.6.39.4/sound/aoa/codecs/onyx.c 2011-08-05 19:44:37.000000000 -0400
69642 @@ -54,7 +54,7 @@ struct onyx {
69643 spdif_locked:1,
69644 analog_locked:1,
69645 original_mute:2;
69646 - int open_count;
69647 + local_t open_count;
69648 struct codec_info *codec_info;
69649
69650 /* mutex serializes concurrent access to the device
69651 @@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_i
69652 struct onyx *onyx = cii->codec_data;
69653
69654 mutex_lock(&onyx->mutex);
69655 - onyx->open_count++;
69656 + local_inc(&onyx->open_count);
69657 mutex_unlock(&onyx->mutex);
69658
69659 return 0;
69660 @@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_
69661 struct onyx *onyx = cii->codec_data;
69662
69663 mutex_lock(&onyx->mutex);
69664 - onyx->open_count--;
69665 - if (!onyx->open_count)
69666 + if (local_dec_and_test(&onyx->open_count))
69667 onyx->spdif_locked = onyx->analog_locked = 0;
69668 mutex_unlock(&onyx->mutex);
69669
69670 diff -urNp linux-2.6.39.4/sound/aoa/codecs/onyx.h linux-2.6.39.4/sound/aoa/codecs/onyx.h
69671 --- linux-2.6.39.4/sound/aoa/codecs/onyx.h 2011-05-19 00:06:34.000000000 -0400
69672 +++ linux-2.6.39.4/sound/aoa/codecs/onyx.h 2011-08-05 19:44:37.000000000 -0400
69673 @@ -11,6 +11,7 @@
69674 #include <linux/i2c.h>
69675 #include <asm/pmac_low_i2c.h>
69676 #include <asm/prom.h>
69677 +#include <asm/local.h>
69678
69679 /* PCM3052 register definitions */
69680
69681 diff -urNp linux-2.6.39.4/sound/core/seq/seq_device.c linux-2.6.39.4/sound/core/seq/seq_device.c
69682 --- linux-2.6.39.4/sound/core/seq/seq_device.c 2011-05-19 00:06:34.000000000 -0400
69683 +++ linux-2.6.39.4/sound/core/seq/seq_device.c 2011-08-05 20:34:06.000000000 -0400
69684 @@ -63,7 +63,7 @@ struct ops_list {
69685 int argsize; /* argument size */
69686
69687 /* operators */
69688 - struct snd_seq_dev_ops ops;
69689 + struct snd_seq_dev_ops *ops;
69690
69691 /* registred devices */
69692 struct list_head dev_list; /* list of devices */
69693 @@ -332,7 +332,7 @@ int snd_seq_device_register_driver(char
69694
69695 mutex_lock(&ops->reg_mutex);
69696 /* copy driver operators */
69697 - ops->ops = *entry;
69698 + ops->ops = entry;
69699 ops->driver |= DRIVER_LOADED;
69700 ops->argsize = argsize;
69701
69702 @@ -462,7 +462,7 @@ static int init_device(struct snd_seq_de
69703 dev->name, ops->id, ops->argsize, dev->argsize);
69704 return -EINVAL;
69705 }
69706 - if (ops->ops.init_device(dev) >= 0) {
69707 + if (ops->ops->init_device(dev) >= 0) {
69708 dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
69709 ops->num_init_devices++;
69710 } else {
69711 @@ -489,7 +489,7 @@ static int free_device(struct snd_seq_de
69712 dev->name, ops->id, ops->argsize, dev->argsize);
69713 return -EINVAL;
69714 }
69715 - if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
69716 + if ((result = ops->ops->free_device(dev)) >= 0 || result == -ENXIO) {
69717 dev->status = SNDRV_SEQ_DEVICE_FREE;
69718 dev->driver_data = NULL;
69719 ops->num_init_devices--;
69720 diff -urNp linux-2.6.39.4/sound/drivers/mts64.c linux-2.6.39.4/sound/drivers/mts64.c
69721 --- linux-2.6.39.4/sound/drivers/mts64.c 2011-05-19 00:06:34.000000000 -0400
69722 +++ linux-2.6.39.4/sound/drivers/mts64.c 2011-08-05 20:34:06.000000000 -0400
69723 @@ -28,6 +28,7 @@
69724 #include <sound/initval.h>
69725 #include <sound/rawmidi.h>
69726 #include <sound/control.h>
69727 +#include <asm/local.h>
69728
69729 #define CARD_NAME "Miditerminal 4140"
69730 #define DRIVER_NAME "MTS64"
69731 @@ -66,7 +67,7 @@ struct mts64 {
69732 struct pardevice *pardev;
69733 int pardev_claimed;
69734
69735 - int open_count;
69736 + local_t open_count;
69737 int current_midi_output_port;
69738 int current_midi_input_port;
69739 u8 mode[MTS64_NUM_INPUT_PORTS];
69740 @@ -696,7 +697,7 @@ static int snd_mts64_rawmidi_open(struct
69741 {
69742 struct mts64 *mts = substream->rmidi->private_data;
69743
69744 - if (mts->open_count == 0) {
69745 + if (local_read(&mts->open_count) == 0) {
69746 /* We don't need a spinlock here, because this is just called
69747 if the device has not been opened before.
69748 So there aren't any IRQs from the device */
69749 @@ -704,7 +705,7 @@ static int snd_mts64_rawmidi_open(struct
69750
69751 msleep(50);
69752 }
69753 - ++(mts->open_count);
69754 + local_inc(&mts->open_count);
69755
69756 return 0;
69757 }
69758 @@ -714,8 +715,7 @@ static int snd_mts64_rawmidi_close(struc
69759 struct mts64 *mts = substream->rmidi->private_data;
69760 unsigned long flags;
69761
69762 - --(mts->open_count);
69763 - if (mts->open_count == 0) {
69764 + if (local_dec_return(&mts->open_count) == 0) {
69765 /* We need the spinlock_irqsave here because we can still
69766 have IRQs at this point */
69767 spin_lock_irqsave(&mts->lock, flags);
69768 @@ -724,8 +724,8 @@ static int snd_mts64_rawmidi_close(struc
69769
69770 msleep(500);
69771
69772 - } else if (mts->open_count < 0)
69773 - mts->open_count = 0;
69774 + } else if (local_read(&mts->open_count) < 0)
69775 + local_set(&mts->open_count, 0);
69776
69777 return 0;
69778 }
69779 diff -urNp linux-2.6.39.4/sound/drivers/opl4/opl4_lib.c linux-2.6.39.4/sound/drivers/opl4/opl4_lib.c
69780 --- linux-2.6.39.4/sound/drivers/opl4/opl4_lib.c 2011-05-19 00:06:34.000000000 -0400
69781 +++ linux-2.6.39.4/sound/drivers/opl4/opl4_lib.c 2011-08-05 20:34:06.000000000 -0400
69782 @@ -28,7 +28,7 @@ MODULE_AUTHOR("Clemens Ladisch <clemens@
69783 MODULE_DESCRIPTION("OPL4 driver");
69784 MODULE_LICENSE("GPL");
69785
69786 -static void inline snd_opl4_wait(struct snd_opl4 *opl4)
69787 +static inline void snd_opl4_wait(struct snd_opl4 *opl4)
69788 {
69789 int timeout = 10;
69790 while ((inb(opl4->fm_port) & OPL4_STATUS_BUSY) && --timeout > 0)
69791 diff -urNp linux-2.6.39.4/sound/drivers/portman2x4.c linux-2.6.39.4/sound/drivers/portman2x4.c
69792 --- linux-2.6.39.4/sound/drivers/portman2x4.c 2011-05-19 00:06:34.000000000 -0400
69793 +++ linux-2.6.39.4/sound/drivers/portman2x4.c 2011-08-05 20:34:06.000000000 -0400
69794 @@ -47,6 +47,7 @@
69795 #include <sound/initval.h>
69796 #include <sound/rawmidi.h>
69797 #include <sound/control.h>
69798 +#include <asm/local.h>
69799
69800 #define CARD_NAME "Portman 2x4"
69801 #define DRIVER_NAME "portman"
69802 @@ -84,7 +85,7 @@ struct portman {
69803 struct pardevice *pardev;
69804 int pardev_claimed;
69805
69806 - int open_count;
69807 + local_t open_count;
69808 int mode[PORTMAN_NUM_INPUT_PORTS];
69809 struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS];
69810 };
69811 diff -urNp linux-2.6.39.4/sound/firewire/amdtp.c linux-2.6.39.4/sound/firewire/amdtp.c
69812 --- linux-2.6.39.4/sound/firewire/amdtp.c 2011-05-19 00:06:34.000000000 -0400
69813 +++ linux-2.6.39.4/sound/firewire/amdtp.c 2011-08-05 19:44:37.000000000 -0400
69814 @@ -371,7 +371,7 @@ static void queue_out_packet(struct amdt
69815 ptr = s->pcm_buffer_pointer + data_blocks;
69816 if (ptr >= pcm->runtime->buffer_size)
69817 ptr -= pcm->runtime->buffer_size;
69818 - ACCESS_ONCE(s->pcm_buffer_pointer) = ptr;
69819 + ACCESS_ONCE_RW(s->pcm_buffer_pointer) = ptr;
69820
69821 s->pcm_period_pointer += data_blocks;
69822 if (s->pcm_period_pointer >= pcm->runtime->period_size) {
69823 @@ -510,7 +510,7 @@ EXPORT_SYMBOL(amdtp_out_stream_start);
69824 */
69825 void amdtp_out_stream_update(struct amdtp_out_stream *s)
69826 {
69827 - ACCESS_ONCE(s->source_node_id_field) =
69828 + ACCESS_ONCE_RW(s->source_node_id_field) =
69829 (fw_parent_device(s->unit)->card->node_id & 0x3f) << 24;
69830 }
69831 EXPORT_SYMBOL(amdtp_out_stream_update);
69832 diff -urNp linux-2.6.39.4/sound/firewire/amdtp.h linux-2.6.39.4/sound/firewire/amdtp.h
69833 --- linux-2.6.39.4/sound/firewire/amdtp.h 2011-05-19 00:06:34.000000000 -0400
69834 +++ linux-2.6.39.4/sound/firewire/amdtp.h 2011-08-05 19:44:37.000000000 -0400
69835 @@ -146,7 +146,7 @@ static inline void amdtp_out_stream_pcm_
69836 static inline void amdtp_out_stream_pcm_trigger(struct amdtp_out_stream *s,
69837 struct snd_pcm_substream *pcm)
69838 {
69839 - ACCESS_ONCE(s->pcm) = pcm;
69840 + ACCESS_ONCE_RW(s->pcm) = pcm;
69841 }
69842
69843 /**
69844 diff -urNp linux-2.6.39.4/sound/isa/cmi8330.c linux-2.6.39.4/sound/isa/cmi8330.c
69845 --- linux-2.6.39.4/sound/isa/cmi8330.c 2011-05-19 00:06:34.000000000 -0400
69846 +++ linux-2.6.39.4/sound/isa/cmi8330.c 2011-08-05 20:34:06.000000000 -0400
69847 @@ -172,7 +172,7 @@ struct snd_cmi8330 {
69848
69849 struct snd_pcm *pcm;
69850 struct snd_cmi8330_stream {
69851 - struct snd_pcm_ops ops;
69852 + snd_pcm_ops_no_const ops;
69853 snd_pcm_open_callback_t open;
69854 void *private_data; /* sb or wss */
69855 } streams[2];
69856 diff -urNp linux-2.6.39.4/sound/oss/sb_audio.c linux-2.6.39.4/sound/oss/sb_audio.c
69857 --- linux-2.6.39.4/sound/oss/sb_audio.c 2011-05-19 00:06:34.000000000 -0400
69858 +++ linux-2.6.39.4/sound/oss/sb_audio.c 2011-08-05 19:44:37.000000000 -0400
69859 @@ -901,7 +901,7 @@ sb16_copy_from_user(int dev,
69860 buf16 = (signed short *)(localbuf + localoffs);
69861 while (c)
69862 {
69863 - locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
69864 + locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c);
69865 if (copy_from_user(lbuf8,
69866 userbuf+useroffs + p,
69867 locallen))
69868 diff -urNp linux-2.6.39.4/sound/oss/swarm_cs4297a.c linux-2.6.39.4/sound/oss/swarm_cs4297a.c
69869 --- linux-2.6.39.4/sound/oss/swarm_cs4297a.c 2011-05-19 00:06:34.000000000 -0400
69870 +++ linux-2.6.39.4/sound/oss/swarm_cs4297a.c 2011-08-05 19:44:37.000000000 -0400
69871 @@ -2606,7 +2606,6 @@ static int __init cs4297a_init(void)
69872 {
69873 struct cs4297a_state *s;
69874 u32 pwr, id;
69875 - mm_segment_t fs;
69876 int rval;
69877 #ifndef CONFIG_BCM_CS4297A_CSWARM
69878 u64 cfg;
69879 @@ -2696,22 +2695,23 @@ static int __init cs4297a_init(void)
69880 if (!rval) {
69881 char *sb1250_duart_present;
69882
69883 +#if 0
69884 + mm_segment_t fs;
69885 fs = get_fs();
69886 set_fs(KERNEL_DS);
69887 -#if 0
69888 val = SOUND_MASK_LINE;
69889 mixer_ioctl(s, SOUND_MIXER_WRITE_RECSRC, (unsigned long) &val);
69890 for (i = 0; i < ARRAY_SIZE(initvol); i++) {
69891 val = initvol[i].vol;
69892 mixer_ioctl(s, initvol[i].mixch, (unsigned long) &val);
69893 }
69894 + set_fs(fs);
69895 // cs4297a_write_ac97(s, 0x18, 0x0808);
69896 #else
69897 // cs4297a_write_ac97(s, 0x5e, 0x180);
69898 cs4297a_write_ac97(s, 0x02, 0x0808);
69899 cs4297a_write_ac97(s, 0x18, 0x0808);
69900 #endif
69901 - set_fs(fs);
69902
69903 list_add(&s->list, &cs4297a_devs);
69904
69905 diff -urNp linux-2.6.39.4/sound/pci/hda/hda_codec.h linux-2.6.39.4/sound/pci/hda/hda_codec.h
69906 --- linux-2.6.39.4/sound/pci/hda/hda_codec.h 2011-05-19 00:06:34.000000000 -0400
69907 +++ linux-2.6.39.4/sound/pci/hda/hda_codec.h 2011-08-05 20:34:06.000000000 -0400
69908 @@ -615,7 +615,7 @@ struct hda_bus_ops {
69909 /* notify power-up/down from codec to controller */
69910 void (*pm_notify)(struct hda_bus *bus);
69911 #endif
69912 -};
69913 +} __no_const;
69914
69915 /* template to pass to the bus constructor */
69916 struct hda_bus_template {
69917 @@ -713,6 +713,7 @@ struct hda_codec_ops {
69918 #endif
69919 void (*reboot_notify)(struct hda_codec *codec);
69920 };
69921 +typedef struct hda_codec_ops __no_const hda_codec_ops_no_const;
69922
69923 /* record for amp information cache */
69924 struct hda_cache_head {
69925 @@ -743,7 +744,7 @@ struct hda_pcm_ops {
69926 struct snd_pcm_substream *substream);
69927 int (*cleanup)(struct hda_pcm_stream *info, struct hda_codec *codec,
69928 struct snd_pcm_substream *substream);
69929 -};
69930 +} __no_const;
69931
69932 /* PCM information for each substream */
69933 struct hda_pcm_stream {
69934 @@ -801,7 +802,7 @@ struct hda_codec {
69935 const char *modelname; /* model name for preset */
69936
69937 /* set by patch */
69938 - struct hda_codec_ops patch_ops;
69939 + hda_codec_ops_no_const patch_ops;
69940
69941 /* PCM to create, set by patch_ops.build_pcms callback */
69942 unsigned int num_pcms;
69943 diff -urNp linux-2.6.39.4/sound/pci/ice1712/ice1712.h linux-2.6.39.4/sound/pci/ice1712/ice1712.h
69944 --- linux-2.6.39.4/sound/pci/ice1712/ice1712.h 2011-05-19 00:06:34.000000000 -0400
69945 +++ linux-2.6.39.4/sound/pci/ice1712/ice1712.h 2011-08-05 20:34:06.000000000 -0400
69946 @@ -269,7 +269,7 @@ struct snd_ak4xxx_private {
69947 unsigned int mask_flags; /* total mask bits */
69948 struct snd_akm4xxx_ops {
69949 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
69950 - } ops;
69951 + } __no_const ops;
69952 };
69953
69954 struct snd_ice1712_spdif {
69955 @@ -285,7 +285,7 @@ struct snd_ice1712_spdif {
69956 int (*default_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
69957 void (*stream_get)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
69958 int (*stream_put)(struct snd_ice1712 *, struct snd_ctl_elem_value *ucontrol);
69959 - } ops;
69960 + } __no_const ops;
69961 };
69962
69963
69964 diff -urNp linux-2.6.39.4/sound/pci/intel8x0m.c linux-2.6.39.4/sound/pci/intel8x0m.c
69965 --- linux-2.6.39.4/sound/pci/intel8x0m.c 2011-05-19 00:06:34.000000000 -0400
69966 +++ linux-2.6.39.4/sound/pci/intel8x0m.c 2011-08-05 20:34:06.000000000 -0400
69967 @@ -1265,7 +1265,7 @@ static struct shortname_table {
69968 { 0x5455, "ALi M5455" },
69969 { 0x746d, "AMD AMD8111" },
69970 #endif
69971 - { 0 },
69972 + { 0, },
69973 };
69974
69975 static int __devinit snd_intel8x0m_probe(struct pci_dev *pci,
69976 diff -urNp linux-2.6.39.4/sound/pci/ymfpci/ymfpci_main.c linux-2.6.39.4/sound/pci/ymfpci/ymfpci_main.c
69977 --- linux-2.6.39.4/sound/pci/ymfpci/ymfpci_main.c 2011-05-19 00:06:34.000000000 -0400
69978 +++ linux-2.6.39.4/sound/pci/ymfpci/ymfpci_main.c 2011-08-05 20:34:06.000000000 -0400
69979 @@ -202,8 +202,8 @@ static void snd_ymfpci_hw_stop(struct sn
69980 if ((snd_ymfpci_readl(chip, YDSXGR_STATUS) & 2) == 0)
69981 break;
69982 }
69983 - if (atomic_read(&chip->interrupt_sleep_count)) {
69984 - atomic_set(&chip->interrupt_sleep_count, 0);
69985 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
69986 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
69987 wake_up(&chip->interrupt_sleep);
69988 }
69989 __end:
69990 @@ -787,7 +787,7 @@ static void snd_ymfpci_irq_wait(struct s
69991 continue;
69992 init_waitqueue_entry(&wait, current);
69993 add_wait_queue(&chip->interrupt_sleep, &wait);
69994 - atomic_inc(&chip->interrupt_sleep_count);
69995 + atomic_inc_unchecked(&chip->interrupt_sleep_count);
69996 schedule_timeout_uninterruptible(msecs_to_jiffies(50));
69997 remove_wait_queue(&chip->interrupt_sleep, &wait);
69998 }
69999 @@ -825,8 +825,8 @@ static irqreturn_t snd_ymfpci_interrupt(
70000 snd_ymfpci_writel(chip, YDSXGR_MODE, mode);
70001 spin_unlock(&chip->reg_lock);
70002
70003 - if (atomic_read(&chip->interrupt_sleep_count)) {
70004 - atomic_set(&chip->interrupt_sleep_count, 0);
70005 + if (atomic_read_unchecked(&chip->interrupt_sleep_count)) {
70006 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
70007 wake_up(&chip->interrupt_sleep);
70008 }
70009 }
70010 @@ -2363,7 +2363,7 @@ int __devinit snd_ymfpci_create(struct s
70011 spin_lock_init(&chip->reg_lock);
70012 spin_lock_init(&chip->voice_lock);
70013 init_waitqueue_head(&chip->interrupt_sleep);
70014 - atomic_set(&chip->interrupt_sleep_count, 0);
70015 + atomic_set_unchecked(&chip->interrupt_sleep_count, 0);
70016 chip->card = card;
70017 chip->pci = pci;
70018 chip->irq = -1;
70019 diff -urNp linux-2.6.39.4/sound/soc/soc-core.c linux-2.6.39.4/sound/soc/soc-core.c
70020 --- linux-2.6.39.4/sound/soc/soc-core.c 2011-05-19 00:06:34.000000000 -0400
70021 +++ linux-2.6.39.4/sound/soc/soc-core.c 2011-08-05 20:34:06.000000000 -0400
70022 @@ -1027,7 +1027,7 @@ static snd_pcm_uframes_t soc_pcm_pointer
70023 }
70024
70025 /* ASoC PCM operations */
70026 -static struct snd_pcm_ops soc_pcm_ops = {
70027 +static snd_pcm_ops_no_const soc_pcm_ops = {
70028 .open = soc_pcm_open,
70029 .close = soc_codec_close,
70030 .hw_params = soc_pcm_hw_params,
70031 @@ -2105,6 +2105,7 @@ static int soc_new_pcm(struct snd_soc_pc
70032
70033 rtd->pcm = pcm;
70034 pcm->private_data = rtd;
70035 + /* this whole logic is broken... */
70036 soc_pcm_ops.mmap = platform->driver->ops->mmap;
70037 soc_pcm_ops.pointer = platform->driver->ops->pointer;
70038 soc_pcm_ops.ioctl = platform->driver->ops->ioctl;
70039 diff -urNp linux-2.6.39.4/sound/usb/card.h linux-2.6.39.4/sound/usb/card.h
70040 --- linux-2.6.39.4/sound/usb/card.h 2011-05-19 00:06:34.000000000 -0400
70041 +++ linux-2.6.39.4/sound/usb/card.h 2011-08-05 20:34:06.000000000 -0400
70042 @@ -44,6 +44,7 @@ struct snd_urb_ops {
70043 int (*prepare_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
70044 int (*retire_sync)(struct snd_usb_substream *subs, struct snd_pcm_runtime *runtime, struct urb *u);
70045 };
70046 +typedef struct snd_urb_ops __no_const snd_urb_ops_no_const;
70047
70048 struct snd_usb_substream {
70049 struct snd_usb_stream *stream;
70050 @@ -93,7 +94,7 @@ struct snd_usb_substream {
70051 struct snd_pcm_hw_constraint_list rate_list; /* limited rates */
70052 spinlock_t lock;
70053
70054 - struct snd_urb_ops ops; /* callbacks (must be filled at init) */
70055 + snd_urb_ops_no_const ops; /* callbacks (must be filled at init) */
70056 };
70057
70058 struct snd_usb_stream {
70059 diff -urNp linux-2.6.39.4/tools/gcc/constify_plugin.c linux-2.6.39.4/tools/gcc/constify_plugin.c
70060 --- linux-2.6.39.4/tools/gcc/constify_plugin.c 1969-12-31 19:00:00.000000000 -0500
70061 +++ linux-2.6.39.4/tools/gcc/constify_plugin.c 2011-08-05 20:34:06.000000000 -0400
70062 @@ -0,0 +1,189 @@
70063 +/*
70064 + * Copyright 2011 by Emese Revfy <re.emese@gmail.com>
70065 + * Licensed under the GPL v2, or (at your option) v3
70066 + *
70067 + * This gcc plugin constifies all structures which contain only function pointers and const fields.
70068 + *
70069 + * Usage:
70070 + * $ gcc -I`gcc -print-file-name=plugin`/include -fPIC -shared -O2 -o constify_plugin.so constify_plugin.c
70071 + * $ gcc -fplugin=constify_plugin.so test.c -O2
70072 + */
70073 +
70074 +#include "gcc-plugin.h"
70075 +#include "config.h"
70076 +#include "system.h"
70077 +#include "coretypes.h"
70078 +#include "tree.h"
70079 +#include "tree-pass.h"
70080 +#include "intl.h"
70081 +#include "plugin-version.h"
70082 +#include "tm.h"
70083 +#include "toplev.h"
70084 +#include "function.h"
70085 +#include "tree-flow.h"
70086 +#include "plugin.h"
70087 +
70088 +int plugin_is_GPL_compatible;
70089 +
70090 +static struct plugin_info const_plugin_info = {
70091 + .version = "20110721",
70092 + .help = "no-constify\tturn off constification\n",
70093 +};
70094 +
70095 +static bool walk_struct(tree node);
70096 +
70097 +static void deconstify_node(tree node)
70098 +{
70099 + tree field;
70100 +
70101 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
70102 + enum tree_code code = TREE_CODE(TREE_TYPE(field));
70103 + if (code == RECORD_TYPE || code == UNION_TYPE)
70104 + deconstify_node(TREE_TYPE(field));
70105 + TREE_READONLY(field) = 0;
70106 + TREE_READONLY(TREE_TYPE(field)) = 0;
70107 + }
70108 +}
70109 +
70110 +static tree handle_no_const_attribute(tree *node, tree name, tree args, int flags, bool *no_add_attrs)
70111 +{
70112 + if (TREE_CODE(*node) == FUNCTION_DECL) {
70113 + error("%qE attribute does not apply to functions", name);
70114 + *no_add_attrs = true;
70115 + return NULL_TREE;
70116 + }
70117 +
70118 + if (DECL_P(*node) && lookup_attribute("no_const", TYPE_ATTRIBUTES(TREE_TYPE(*node)))) {
70119 + error("%qE attribute is already applied to the type" , name);
70120 + *no_add_attrs = true;
70121 + return NULL_TREE;
70122 + }
70123 +
70124 + if (TREE_CODE(*node) == TYPE_DECL && !TREE_READONLY(TREE_TYPE(*node))) {
70125 + error("%qE attribute used on type that is not constified" , name);
70126 + *no_add_attrs = true;
70127 + return NULL_TREE;
70128 + }
70129 +
70130 + if (TREE_CODE(*node) == TYPE_DECL) {
70131 + tree chain = TREE_CHAIN(TREE_TYPE(*node));
70132 + TREE_TYPE(*node) = copy_node(TREE_TYPE(*node));
70133 + TREE_CHAIN(TREE_TYPE(*node)) = copy_list(chain);
70134 + TREE_READONLY(TREE_TYPE(*node)) = 0;
70135 + deconstify_node(TREE_TYPE(*node));
70136 + return NULL_TREE;
70137 + }
70138 +
70139 + return NULL_TREE;
70140 +}
70141 +
70142 +static struct attribute_spec no_const_attr = {
70143 + .name = "no_const",
70144 + .min_length = 0,
70145 + .max_length = 0,
70146 + .decl_required = false,
70147 + .type_required = false,
70148 + .function_type_required = false,
70149 + .handler = handle_no_const_attribute
70150 +};
70151 +
70152 +static void register_attributes(void *event_data, void *data)
70153 +{
70154 + register_attribute(&no_const_attr);
70155 +}
70156 +
70157 +/*
70158 +static void printnode(char *prefix, tree node)
70159 +{
70160 + enum tree_code code;
70161 + enum tree_code_class tclass;
70162 +
70163 + tclass = TREE_CODE_CLASS(TREE_CODE (node));
70164 +
70165 + code = TREE_CODE(node);
70166 + fprintf(stderr, "\n%s node: %p, code: %d type: %s\n", prefix, node, code, tree_code_name[(int)code]);
70167 + if (DECL_CONTEXT(node) != NULL_TREE && TYPE_NAME(DECL_CONTEXT(node)) != NULL_TREE)
70168 + fprintf(stderr, "struct name: %s\n", IDENTIFIER_POINTER(TYPE_NAME(DECL_CONTEXT(node))));
70169 + if (tclass == tcc_declaration && DECL_NAME(node) != NULL_TREE)
70170 + fprintf(stderr, "field name: %s\n", IDENTIFIER_POINTER(DECL_NAME(node)));
70171 +}
70172 +*/
70173 +
70174 +static void constify_node(tree node)
70175 +{
70176 + TREE_READONLY(node) = 1;
70177 +}
70178 +
70179 +static bool is_fptr(tree field)
70180 +{
70181 + tree ptr = TREE_TYPE(field);
70182 +
70183 + if (TREE_CODE(ptr) != POINTER_TYPE)
70184 + return false;
70185 +
70186 + return TREE_CODE(TREE_TYPE(ptr)) == FUNCTION_TYPE;
70187 +}
70188 +
70189 +static bool walk_struct(tree node)
70190 +{
70191 + tree field;
70192 +
70193 + for (field = TYPE_FIELDS(node); field; field = TREE_CHAIN(field)) {
70194 + enum tree_code code = TREE_CODE(TREE_TYPE(field));
70195 + if (code == RECORD_TYPE || code == UNION_TYPE) {
70196 + if (!(walk_struct(TREE_TYPE(field))))
70197 + return false;
70198 + } else if (is_fptr(field) == false && !TREE_READONLY(field))
70199 + return false;
70200 + }
70201 + return true;
70202 +}
70203 +
70204 +static void finish_type(void *event_data, void *data)
70205 +{
70206 + tree node = (tree)event_data;
70207 +
70208 + if (node == NULL_TREE)
70209 + return;
70210 +
70211 + if (lookup_attribute("no_const", TYPE_ATTRIBUTES(node)))
70212 + return;
70213 +
70214 + if (TREE_READONLY(node))
70215 + return;
70216 +
70217 + if (TYPE_FIELDS(node) == NULL_TREE)
70218 + return;
70219 +
70220 + if (walk_struct(node))
70221 + constify_node(node);
70222 +}
70223 +
70224 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
70225 +{
70226 + const char * const plugin_name = plugin_info->base_name;
70227 + const int argc = plugin_info->argc;
70228 + const struct plugin_argument * const argv = plugin_info->argv;
70229 + int i;
70230 + bool constify = true;
70231 +
70232 + if (!plugin_default_version_check(version, &gcc_version)) {
70233 + error(G_("incompatible gcc/plugin versions"));
70234 + return 1;
70235 + }
70236 +
70237 + for (i = 0; i < argc; ++i) {
70238 + if (!(strcmp(argv[i].key, "no-constify"))) {
70239 + constify = false;
70240 + continue;
70241 + }
70242 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
70243 + }
70244 +
70245 + register_callback(plugin_name, PLUGIN_INFO, NULL, &const_plugin_info);
70246 + if (constify)
70247 + register_callback(plugin_name, PLUGIN_FINISH_TYPE, finish_type, NULL);
70248 + register_callback(plugin_name, PLUGIN_ATTRIBUTES, register_attributes, NULL);
70249 +
70250 + return 0;
70251 +}
70252 diff -urNp linux-2.6.39.4/tools/gcc/Makefile linux-2.6.39.4/tools/gcc/Makefile
70253 --- linux-2.6.39.4/tools/gcc/Makefile 1969-12-31 19:00:00.000000000 -0500
70254 +++ linux-2.6.39.4/tools/gcc/Makefile 2011-08-05 20:34:06.000000000 -0400
70255 @@ -0,0 +1,12 @@
70256 +#CC := gcc
70257 +#PLUGIN_SOURCE_FILES := pax_plugin.c
70258 +#PLUGIN_OBJECT_FILES := $(patsubst %.c,%.o,$(PLUGIN_SOURCE_FILES))
70259 +GCCPLUGINS_DIR := $(shell $(HOSTCC) -print-file-name=plugin)
70260 +#CFLAGS += -I$(GCCPLUGINS_DIR)/include -fPIC -O2 -Wall -W
70261 +
70262 +HOST_EXTRACFLAGS += -I$(GCCPLUGINS_DIR)/include
70263 +
70264 +hostlibs-y := stackleak_plugin.so constify_plugin.so
70265 +always := $(hostlibs-y)
70266 +stackleak_plugin-objs := stackleak_plugin.o
70267 +constify_plugin-objs := constify_plugin.o
70268 diff -urNp linux-2.6.39.4/tools/gcc/stackleak_plugin.c linux-2.6.39.4/tools/gcc/stackleak_plugin.c
70269 --- linux-2.6.39.4/tools/gcc/stackleak_plugin.c 1969-12-31 19:00:00.000000000 -0500
70270 +++ linux-2.6.39.4/tools/gcc/stackleak_plugin.c 2011-08-05 20:34:06.000000000 -0400
70271 @@ -0,0 +1,243 @@
70272 +/*
70273 + * Copyright 2011 by the PaX Team <pageexec@freemail.hu>
70274 + * Licensed under the GPL v2
70275 + *
70276 + * Note: the choice of the license means that the compilation process is
70277 + * NOT 'eligible' as defined by gcc's library exception to the GPL v3,
70278 + * but for the kernel it doesn't matter since it doesn't link against
70279 + * any of the gcc libraries
70280 + *
70281 + * gcc plugin to help implement various PaX features
70282 + *
70283 + * - track lowest stack pointer
70284 + *
70285 + * TODO:
70286 + * - initialize all local variables
70287 + *
70288 + * BUGS:
70289 + * - cloned functions are instrumented twice
70290 + */
70291 +#include "gcc-plugin.h"
70292 +#include "plugin-version.h"
70293 +#include "config.h"
70294 +#include "system.h"
70295 +#include "coretypes.h"
70296 +#include "tm.h"
70297 +#include "toplev.h"
70298 +#include "basic-block.h"
70299 +#include "gimple.h"
70300 +//#include "expr.h" where are you...
70301 +#include "diagnostic.h"
70302 +#include "rtl.h"
70303 +#include "emit-rtl.h"
70304 +#include "function.h"
70305 +#include "tree.h"
70306 +#include "tree-pass.h"
70307 +#include "intl.h"
70308 +
70309 +int plugin_is_GPL_compatible;
70310 +
70311 +static int track_frame_size = -1;
70312 +static const char track_function[] = "pax_track_stack";
70313 +static bool init_locals;
70314 +
70315 +static struct plugin_info stackleak_plugin_info = {
70316 + .version = "201106030000",
70317 + .help = "track-lowest-sp=nn\ttrack sp in functions whose frame size is at least nn bytes\n"
70318 +// "initialize-locals\t\tforcibly initialize all stack frames\n"
70319 +};
70320 +
70321 +static bool gate_stackleak_track_stack(void);
70322 +static unsigned int execute_stackleak_tree_instrument(void);
70323 +static unsigned int execute_stackleak_final(void);
70324 +
70325 +static struct gimple_opt_pass stackleak_tree_instrument_pass = {
70326 + .pass = {
70327 + .type = GIMPLE_PASS,
70328 + .name = "stackleak_tree_instrument",
70329 + .gate = gate_stackleak_track_stack,
70330 + .execute = execute_stackleak_tree_instrument,
70331 + .sub = NULL,
70332 + .next = NULL,
70333 + .static_pass_number = 0,
70334 + .tv_id = TV_NONE,
70335 + .properties_required = PROP_gimple_leh | PROP_cfg,
70336 + .properties_provided = 0,
70337 + .properties_destroyed = 0,
70338 + .todo_flags_start = 0, //TODO_verify_ssa | TODO_verify_flow | TODO_verify_stmts,
70339 + .todo_flags_finish = TODO_verify_stmts // | TODO_dump_func
70340 + }
70341 +};
70342 +
70343 +static struct rtl_opt_pass stackleak_final_rtl_opt_pass = {
70344 + .pass = {
70345 + .type = RTL_PASS,
70346 + .name = "stackleak_final",
70347 + .gate = gate_stackleak_track_stack,
70348 + .execute = execute_stackleak_final,
70349 + .sub = NULL,
70350 + .next = NULL,
70351 + .static_pass_number = 0,
70352 + .tv_id = TV_NONE,
70353 + .properties_required = 0,
70354 + .properties_provided = 0,
70355 + .properties_destroyed = 0,
70356 + .todo_flags_start = 0,
70357 + .todo_flags_finish = 0
70358 + }
70359 +};
70360 +
70361 +static bool gate_stackleak_track_stack(void)
70362 +{
70363 + return track_frame_size >= 0;
70364 +}
70365 +
70366 +static void stackleak_add_instrumentation(gimple_stmt_iterator *gsi, bool before)
70367 +{
70368 + gimple call;
70369 + tree decl, type;
70370 +
70371 + // insert call to void pax_track_stack(void)
70372 + type = build_function_type_list(void_type_node, NULL_TREE);
70373 + decl = build_fn_decl(track_function, type);
70374 + DECL_ASSEMBLER_NAME(decl); // for LTO
70375 + call = gimple_build_call(decl, 0);
70376 + if (before)
70377 + gsi_insert_before(gsi, call, GSI_CONTINUE_LINKING);
70378 + else
70379 + gsi_insert_after(gsi, call, GSI_CONTINUE_LINKING);
70380 +}
70381 +
70382 +static unsigned int execute_stackleak_tree_instrument(void)
70383 +{
70384 + basic_block bb;
70385 + gimple_stmt_iterator gsi;
70386 +
70387 + // 1. loop through BBs and GIMPLE statements
70388 + FOR_EACH_BB(bb) {
70389 + for (gsi = gsi_start_bb(bb); !gsi_end_p(gsi); gsi_next(&gsi)) {
70390 + // gimple match: align 8 built-in BUILT_IN_NORMAL:BUILT_IN_ALLOCA attributes <tree_list 0xb7576450>
70391 + tree decl;
70392 + gimple stmt = gsi_stmt(gsi);
70393 +
70394 + if (!is_gimple_call(stmt))
70395 + continue;
70396 + decl = gimple_call_fndecl(stmt);
70397 + if (!decl)
70398 + continue;
70399 + if (TREE_CODE(decl) != FUNCTION_DECL)
70400 + continue;
70401 + if (!DECL_BUILT_IN(decl))
70402 + continue;
70403 + if (DECL_BUILT_IN_CLASS(decl) != BUILT_IN_NORMAL)
70404 + continue;
70405 + if (DECL_FUNCTION_CODE(decl) != BUILT_IN_ALLOCA)
70406 + continue;
70407 +
70408 + // 2. insert track call after each __builtin_alloca call
70409 + stackleak_add_instrumentation(&gsi, false);
70410 +// print_node(stderr, "pax", decl, 4);
70411 + }
70412 + }
70413 +
70414 + // 3. insert track call at the beginning
70415 + bb = ENTRY_BLOCK_PTR_FOR_FUNCTION(cfun)->next_bb;
70416 + gsi = gsi_start_bb(bb);
70417 + stackleak_add_instrumentation(&gsi, true);
70418 +
70419 + return 0;
70420 +}
70421 +
70422 +static unsigned int execute_stackleak_final(void)
70423 +{
70424 + rtx insn;
70425 +
70426 + if (cfun->calls_alloca)
70427 + return 0;
70428 +
70429 + // 1. find pax_track_stack calls
70430 + for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) {
70431 + // rtl match: (call_insn 8 7 9 3 (call (mem (symbol_ref ("pax_track_stack") [flags 0x41] <function_decl 0xb7470e80 pax_track_stack>) [0 S1 A8]) (4)) -1 (nil) (nil))
70432 + rtx body;
70433 +
70434 + if (!CALL_P(insn))
70435 + continue;
70436 + body = PATTERN(insn);
70437 + if (GET_CODE(body) != CALL)
70438 + continue;
70439 + body = XEXP(body, 0);
70440 + if (GET_CODE(body) != MEM)
70441 + continue;
70442 + body = XEXP(body, 0);
70443 + if (GET_CODE(body) != SYMBOL_REF)
70444 + continue;
70445 + if (strcmp(XSTR(body, 0), track_function))
70446 + continue;
70447 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
70448 + // 2. delete call if function frame is not big enough
70449 + if (get_frame_size() >= track_frame_size)
70450 + continue;
70451 + delete_insn_and_edges(insn);
70452 + }
70453 +
70454 +// print_simple_rtl(stderr, get_insns());
70455 +// print_rtl(stderr, get_insns());
70456 +// warning(0, "track_frame_size: %d %ld %d", cfun->calls_alloca, get_frame_size(), track_frame_size);
70457 +
70458 + return 0;
70459 +}
70460 +
70461 +int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version)
70462 +{
70463 + const char * const plugin_name = plugin_info->base_name;
70464 + const int argc = plugin_info->argc;
70465 + const struct plugin_argument * const argv = plugin_info->argv;
70466 + int i;
70467 + struct register_pass_info stackleak_tree_instrument_pass_info = {
70468 + .pass = &stackleak_tree_instrument_pass.pass,
70469 +// .reference_pass_name = "tree_profile",
70470 + .reference_pass_name = "optimized",
70471 + .ref_pass_instance_number = 0,
70472 + .pos_op = PASS_POS_INSERT_AFTER
70473 + };
70474 + struct register_pass_info stackleak_final_pass_info = {
70475 + .pass = &stackleak_final_rtl_opt_pass.pass,
70476 + .reference_pass_name = "final",
70477 + .ref_pass_instance_number = 0,
70478 + .pos_op = PASS_POS_INSERT_BEFORE
70479 + };
70480 +
70481 + if (!plugin_default_version_check(version, &gcc_version)) {
70482 + error(G_("incompatible gcc/plugin versions"));
70483 + return 1;
70484 + }
70485 +
70486 + register_callback(plugin_name, PLUGIN_INFO, NULL, &stackleak_plugin_info);
70487 +
70488 + for (i = 0; i < argc; ++i) {
70489 + if (!strcmp(argv[i].key, "track-lowest-sp")) {
70490 + if (!argv[i].value) {
70491 + error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
70492 + continue;
70493 + }
70494 + track_frame_size = atoi(argv[i].value);
70495 + if (argv[i].value[0] < '0' || argv[i].value[0] > '9' || track_frame_size < 0)
70496 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
70497 + continue;
70498 + }
70499 + if (!strcmp(argv[i].key, "initialize-locals")) {
70500 + if (argv[i].value) {
70501 + error(G_("invalid option argument '-fplugin-arg-%s-%s=%s'"), plugin_name, argv[i].key, argv[i].value);
70502 + continue;
70503 + }
70504 + init_locals = true;
70505 + continue;
70506 + }
70507 + error(G_("unkown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key);
70508 + }
70509 +
70510 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_tree_instrument_pass_info);
70511 + register_callback(plugin_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &stackleak_final_pass_info);
70512 +
70513 + return 0;
70514 +}
70515 diff -urNp linux-2.6.39.4/usr/gen_init_cpio.c linux-2.6.39.4/usr/gen_init_cpio.c
70516 --- linux-2.6.39.4/usr/gen_init_cpio.c 2011-05-19 00:06:34.000000000 -0400
70517 +++ linux-2.6.39.4/usr/gen_init_cpio.c 2011-08-05 19:44:38.000000000 -0400
70518 @@ -305,7 +305,7 @@ static int cpio_mkfile(const char *name,
70519 int retval;
70520 int rc = -1;
70521 int namesize;
70522 - int i;
70523 + unsigned int i;
70524
70525 mode |= S_IFREG;
70526
70527 @@ -394,9 +394,10 @@ static char *cpio_replace_env(char *new_
70528 *env_var = *expanded = '\0';
70529 strncat(env_var, start + 2, end - start - 2);
70530 strncat(expanded, new_location, start - new_location);
70531 - strncat(expanded, getenv(env_var), PATH_MAX);
70532 - strncat(expanded, end + 1, PATH_MAX);
70533 + strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded));
70534 + strncat(expanded, end + 1, PATH_MAX - strlen(expanded));
70535 strncpy(new_location, expanded, PATH_MAX);
70536 + new_location[PATH_MAX] = 0;
70537 } else
70538 break;
70539 }
70540 diff -urNp linux-2.6.39.4/virt/kvm/kvm_main.c linux-2.6.39.4/virt/kvm/kvm_main.c
70541 --- linux-2.6.39.4/virt/kvm/kvm_main.c 2011-05-19 00:06:34.000000000 -0400
70542 +++ linux-2.6.39.4/virt/kvm/kvm_main.c 2011-08-05 20:34:06.000000000 -0400
70543 @@ -73,7 +73,7 @@ LIST_HEAD(vm_list);
70544
70545 static cpumask_var_t cpus_hardware_enabled;
70546 static int kvm_usage_count = 0;
70547 -static atomic_t hardware_enable_failed;
70548 +static atomic_unchecked_t hardware_enable_failed;
70549
70550 struct kmem_cache *kvm_vcpu_cache;
70551 EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
70552 @@ -2187,7 +2187,7 @@ static void hardware_enable_nolock(void
70553
70554 if (r) {
70555 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
70556 - atomic_inc(&hardware_enable_failed);
70557 + atomic_inc_unchecked(&hardware_enable_failed);
70558 printk(KERN_INFO "kvm: enabling virtualization on "
70559 "CPU%d failed\n", cpu);
70560 }
70561 @@ -2241,10 +2241,10 @@ static int hardware_enable_all(void)
70562
70563 kvm_usage_count++;
70564 if (kvm_usage_count == 1) {
70565 - atomic_set(&hardware_enable_failed, 0);
70566 + atomic_set_unchecked(&hardware_enable_failed, 0);
70567 on_each_cpu(hardware_enable_nolock, NULL, 1);
70568
70569 - if (atomic_read(&hardware_enable_failed)) {
70570 + if (atomic_read_unchecked(&hardware_enable_failed)) {
70571 hardware_disable_all_nolock();
70572 r = -EBUSY;
70573 }
70574 @@ -2509,7 +2509,7 @@ static void kvm_sched_out(struct preempt
70575 kvm_arch_vcpu_put(vcpu);
70576 }
70577
70578 -int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
70579 +int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
70580 struct module *module)
70581 {
70582 int r;
70583 @@ -2572,7 +2572,7 @@ int kvm_init(void *opaque, unsigned vcpu
70584 if (!vcpu_align)
70585 vcpu_align = __alignof__(struct kvm_vcpu);
70586 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align,
70587 - 0, NULL);
70588 + SLAB_USERCOPY, NULL);
70589 if (!kvm_vcpu_cache) {
70590 r = -ENOMEM;
70591 goto out_free_3;
70592 @@ -2582,9 +2582,11 @@ int kvm_init(void *opaque, unsigned vcpu
70593 if (r)
70594 goto out_free;
70595
70596 - kvm_chardev_ops.owner = module;
70597 - kvm_vm_fops.owner = module;
70598 - kvm_vcpu_fops.owner = module;
70599 + pax_open_kernel();
70600 + *(void **)&kvm_chardev_ops.owner = module;
70601 + *(void **)&kvm_vm_fops.owner = module;
70602 + *(void **)&kvm_vcpu_fops.owner = module;
70603 + pax_close_kernel();
70604
70605 r = misc_register(&kvm_dev);
70606 if (r) {